From e3c73d9881180a454b654cd89b2e5b7b07e31581 Mon Sep 17 00:00:00 2001 From: Simon Giesecke Date: Sun, 27 May 2018 11:10:39 +0200 Subject: [PATCH] Problem: inconsistent naming style for private data members, conflicts with naming of local variables and member functions Solution: apply and check _lower_case naming style for private data members --- .clang-tidy | 16 +- src/array.hpp | 40 +- src/atomic_counter.hpp | 57 +-- src/atomic_ptr.hpp | 84 ++-- src/blob.hpp | 92 ++-- src/client.cpp | 28 +- src/client.hpp | 4 +- src/clock.cpp | 18 +- src/clock.hpp | 4 +- src/condition_variable.hpp | 69 +-- src/ctx.cpp | 340 ++++++------- src/ctx.hpp | 58 +-- src/curve_client.cpp | 38 +- src/curve_client.hpp | 4 +- src/curve_server.cpp | 36 +- src/curve_server.hpp | 10 +- src/dbuffer.hpp | 54 +-- src/dealer.cpp | 28 +- src/dealer.hpp | 6 +- src/decoder.hpp | 70 +-- src/decoder_allocators.cpp | 68 +-- src/decoder_allocators.hpp | 36 +- src/dgram.cpp | 48 +- src/dgram.hpp | 8 +- src/dish.cpp | 80 ++-- src/dish.hpp | 14 +- src/dist.cpp | 106 ++-- src/dist.hpp | 10 +- src/encoder.hpp | 66 +-- src/epoll.cpp | 40 +- src/epoll.hpp | 8 +- src/fq.cpp | 74 +-- src/fq.hpp | 12 +- src/gather.cpp | 18 +- src/gather.hpp | 2 +- src/generic_mtrie.hpp | 10 +- src/generic_mtrie_impl.hpp | 350 +++++++------- src/io_object.cpp | 26 +- src/io_object.hpp | 2 +- src/io_thread.cpp | 34 +- src/io_thread.hpp | 6 +- src/ip_resolver.cpp | 60 +-- src/ip_resolver.hpp | 12 +- src/lb.cpp | 80 ++-- src/lb.hpp | 10 +- src/mailbox.cpp | 38 +- src/mailbox.hpp | 10 +- src/mailbox_safe.cpp | 40 +- src/mailbox_safe.hpp | 8 +- src/mechanism.cpp | 10 +- src/mechanism.hpp | 4 +- src/metadata.cpp | 10 +- src/metadata.hpp | 4 +- src/msg.cpp | 305 ++++++------ src/msg.hpp | 2 +- src/mutex.hpp | 66 +-- src/null_mechanism.cpp | 44 +- src/null_mechanism.hpp | 12 +- src/object.cpp | 38 +- src/object.hpp | 4 +- src/own.cpp | 74 +-- src/own.hpp | 12 +- src/pair.cpp | 38 +- src/pair.hpp | 6 +- src/pipe.cpp | 265 +++++----- src/pipe.hpp | 38 +- src/plain_client.cpp | 24 +- src/plain_client.hpp | 2 +- src/poller_base.cpp | 32 +- src/poller_base.hpp | 10 +- src/pull.cpp | 12 +- src/pull.hpp | 2 +- src/push.cpp | 10 +- src/push.hpp | 2 +- src/radio.cpp | 66 +-- src/radio.hpp | 12 +- src/raw_decoder.cpp | 22 +- src/raw_decoder.hpp | 6 +- src/reaper.cpp | 54 +-- src/reaper.hpp | 12 +- src/rep.cpp | 22 +- src/rep.hpp | 4 +- src/req.cpp | 86 ++-- src/req.hpp | 14 +- src/router.cpp | 260 +++++----- src/router.cpp~RF40cad05.TMP | 549 +++++++++++++++++++++ src/router.hpp | 38 +- src/scatter.cpp | 10 +- src/scatter.hpp | 2 +- src/select.cpp | 92 ++-- src/select.hpp | 10 +- src/server.cpp | 46 +- src/server.hpp | 8 +- src/session_base.cpp | 270 +++++------ src/session_base.cpp~RF4069b78.TMP | 711 +++++++++++++++++++++++++++ src/session_base.hpp | 22 +- src/signaler.cpp | 74 +-- src/signaler.hpp | 4 +- src/socket_base.cpp | 296 ++++++------ src/socket_base.hpp | 40 +- src/socket_poller.cpp | 244 +++++----- src/socket_poller.hpp | 24 +- src/socks.cpp | 94 ++-- src/socks.hpp | 20 +- src/socks_connecter.cpp | 215 ++++----- src/socks_connecter.hpp | 32 +- src/stream.cpp | 120 ++--- src/stream.hpp | 18 +- src/stream_engine.cpp | 746 ++++++++++++++--------------- src/stream_engine.hpp | 70 +-- src/tcp_address.cpp | 90 ++-- src/tcp_address.hpp | 6 +- src/tcp_connecter.cpp | 166 +++---- src/tcp_connecter.cpp.orig | 444 +++++++++++++++++ src/tcp_connecter.hpp | 20 +- src/tcp_listener.cpp | 95 ++-- src/tcp_listener.hpp | 10 +- src/thread.cpp | 109 ++--- src/thread.hpp | 28 +- src/timers.cpp | 78 +-- src/timers.hpp | 10 +- src/trie.cpp | 246 +++++----- src/trie.hpp | 10 +- src/udp_address.cpp | 58 +-- src/udp_address.hpp | 10 +- src/udp_engine.cpp | 174 +++---- src/udp_engine.hpp | 26 +- src/v1_decoder.cpp | 46 +- src/v1_decoder.hpp | 8 +- src/v1_encoder.cpp | 14 +- src/v1_encoder.hpp | 2 +- src/v2_decoder.cpp | 60 +-- src/v2_decoder.hpp | 12 +- src/v2_encoder.cpp | 10 +- src/v2_encoder.hpp | 2 +- src/xpub.cpp | 167 +++---- src/xpub.hpp | 28 +- src/xsub.cpp | 68 +-- src/xsub.hpp | 12 +- src/ypipe.hpp | 50 +- src/yqueue.hpp | 98 ++-- src/zap_client.cpp | 4 +- src/zap_client.hpp | 2 +- 143 files changed, 5783 insertions(+), 4051 deletions(-) create mode 100644 src/router.cpp~RF40cad05.TMP create mode 100644 src/session_base.cpp~RF4069b78.TMP create mode 100644 src/tcp_connecter.cpp.orig diff --git a/.clang-tidy b/.clang-tidy index 72f27f24..a5059308 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -270,9 +270,9 @@ CheckOptions: - key: readability-identifier-naming.LocalVariableSuffix value: '' # - key: readability-identifier-naming.MemberCase - # value: aNy_CasE + # value: lower_case # - key: readability-identifier-naming.MemberPrefix - # value: '' + # value: '_' # - key: readability-identifier-naming.MemberSuffix # value: '' # - key: readability-identifier-naming.MethodCase @@ -299,12 +299,12 @@ CheckOptions: # value: '' - key: readability-identifier-naming.ParameterSuffix value: '_' - # - key: readability-identifier-naming.PrivateMemberCase - # value: aNy_CasE - # - key: readability-identifier-naming.PrivateMemberPrefix - # value: '' - # - key: readability-identifier-naming.PrivateMemberSuffix - # value: '' + - key: readability-identifier-naming.PrivateMemberCase + value: lower_case + - key: readability-identifier-naming.PrivateMemberPrefix + value: '_' + - key: readability-identifier-naming.PrivateMemberSuffix + value: '' # - key: readability-identifier-naming.PrivateMethodCase # value: aNy_CasE # - key: readability-identifier-naming.PrivateMethodPrefix diff --git a/src/array.hpp b/src/array.hpp index de252c06..2f7589f5 100644 --- a/src/array.hpp +++ b/src/array.hpp @@ -51,18 +51,18 @@ namespace zmq template class array_item_t { public: - inline array_item_t () : array_index (-1) {} + inline array_item_t () : _array_index (-1) {} // The destructor doesn't have to be virtual. It is made virtual // just to keep ICC and code checking tools from complaining. inline virtual ~array_item_t () {} - inline void set_array_index (int index_) { array_index = index_; } + inline void set_array_index (int index_) { _array_index = index_; } - inline int get_array_index () { return array_index; } + inline int get_array_index () { return _array_index; } private: - int array_index; + int _array_index; array_item_t (const array_item_t &); const array_item_t &operator= (const array_item_t &); @@ -81,17 +81,17 @@ template class array_t inline ~array_t () {} - inline size_type size () { return items.size (); } + inline size_type size () { return _items.size (); } - inline bool empty () { return items.empty (); } + inline bool empty () { return _items.empty (); } - inline T *&operator[] (size_type index_) { return items[index_]; } + inline T *&operator[] (size_type index_) { return _items[index_]; } inline void push_back (T *item_) { if (item_) - ((item_t *) item_)->set_array_index ((int) items.size ()); - items.push_back (item_); + ((item_t *) item_)->set_array_index ((int) _items.size ()); + _items.push_back (item_); } inline void erase (T *item_) @@ -101,22 +101,22 @@ template class array_t inline void erase (size_type index_) { - if (items.back ()) - ((item_t *) items.back ())->set_array_index ((int) index_); - items[index_] = items.back (); - items.pop_back (); + if (_items.back ()) + ((item_t *) _items.back ())->set_array_index ((int) index_); + _items[index_] = _items.back (); + _items.pop_back (); } inline void swap (size_type index1_, size_type index2_) { - if (items[index1_]) - ((item_t *) items[index1_])->set_array_index ((int) index2_); - if (items[index2_]) - ((item_t *) items[index2_])->set_array_index ((int) index1_); - std::swap (items[index1_], items[index2_]); + if (_items[index1_]) + ((item_t *) _items[index1_])->set_array_index ((int) index2_); + if (_items[index2_]) + ((item_t *) _items[index2_])->set_array_index ((int) index1_); + std::swap (_items[index1_], _items[index2_]); } - inline void clear () { items.clear (); } + inline void clear () { _items.clear (); } inline size_type index (T *item_) { @@ -125,7 +125,7 @@ template class array_t private: typedef std::vector items_t; - items_t items; + items_t _items; array_t (const array_t &); const array_t &operator= (const array_t &); diff --git a/src/atomic_counter.hpp b/src/atomic_counter.hpp index 63daa25d..1b0cc58f 100644 --- a/src/atomic_counter.hpp +++ b/src/atomic_counter.hpp @@ -90,33 +90,33 @@ class atomic_counter_t public: typedef uint32_t integer_t; - inline atomic_counter_t (integer_t value_ = 0) : value (value_) {} + inline atomic_counter_t (integer_t value_ = 0) : _value (value_) {} inline ~atomic_counter_t () {} - // Set counter value (not thread-safe). - inline void set (integer_t value_) { value = value_; } + // Set counter _value (not thread-safe). + inline void set (integer_t value_) { _value = value_; } - // Atomic addition. Returns the old value. + // Atomic addition. Returns the old _value. inline integer_t add (integer_t increment_) { integer_t old_value; #if defined ZMQ_ATOMIC_COUNTER_WINDOWS - old_value = InterlockedExchangeAdd ((LONG *) &value, increment_); + old_value = InterlockedExchangeAdd ((LONG *) &_value, increment_); #elif defined ZMQ_ATOMIC_COUNTER_INTRINSIC - old_value = __atomic_fetch_add (&value, increment_, __ATOMIC_ACQ_REL); + old_value = __atomic_fetch_add (&_value, increment_, __ATOMIC_ACQ_REL); #elif defined ZMQ_ATOMIC_COUNTER_CXX11 - old_value = value.fetch_add (increment_, std::memory_order_acq_rel); + old_value = _value.fetch_add (increment_, std::memory_order_acq_rel); #elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H - integer_t new_value = atomic_add_32_nv (&value, increment_); + integer_t new_value = atomic_add_32_nv (&_value, increment_); old_value = new_value - increment_; #elif defined ZMQ_ATOMIC_COUNTER_TILE - old_value = arch_atomic_add (&value, increment_); + old_value = arch_atomic_add (&_value, increment_); #elif defined ZMQ_ATOMIC_COUNTER_X86 __asm__ volatile("lock; xadd %0, %1 \n\t" - : "=r"(old_value), "=m"(value) - : "0"(increment_), "m"(value) + : "=r"(old_value), "=m"(_value) + : "0"(increment_), "m"(_value) : "cc", "memory"); #elif defined ZMQ_ATOMIC_COUNTER_ARM integer_t flag, tmp; @@ -128,13 +128,13 @@ class atomic_counter_t " bne 1b\n\t" " dmb sy\n\t" : "=&r"(old_value), "=&r"(flag), "=&r"(tmp), - "+Qo"(value) - : "Ir"(increment_), "r"(&value) + "+Qo"(_value) + : "Ir"(increment_), "r"(&_value) : "cc"); #elif defined ZMQ_ATOMIC_COUNTER_MUTEX sync.lock (); - old_value = value; - value += increment_; + old_value = _value; + _value += increment_; sync.unlock (); #else #error atomic_counter is not implemented for this platform @@ -147,26 +147,27 @@ class atomic_counter_t { #if defined ZMQ_ATOMIC_COUNTER_WINDOWS LONG delta = -((LONG) decrement_); - integer_t old = InterlockedExchangeAdd ((LONG *) &value, delta); + integer_t old = InterlockedExchangeAdd ((LONG *) &_value, delta); return old - decrement_ != 0; #elif defined ZMQ_ATOMIC_COUNTER_INTRINSIC integer_t nv = - __atomic_sub_fetch (&value, decrement_, __ATOMIC_ACQ_REL); + __atomic_sub_fetch (&_value, decrement_, __ATOMIC_ACQ_REL); return nv != 0; #elif defined ZMQ_ATOMIC_COUNTER_CXX11 - integer_t old = value.fetch_sub (decrement_, std::memory_order_acq_rel); + integer_t old = + _value.fetch_sub (decrement_, std::memory_order_acq_rel); return old - decrement_ != 0; #elif defined ZMQ_ATOMIC_COUNTER_ATOMIC_H int32_t delta = -((int32_t) decrement_); - integer_t nv = atomic_add_32_nv (&value, delta); + integer_t nv = atomic_add_32_nv (&_value, delta); return nv != 0; #elif defined ZMQ_ATOMIC_COUNTER_TILE int32_t delta = -((int32_t) decrement_); - integer_t nv = arch_atomic_add (&value, delta); + integer_t nv = arch_atomic_add (&_value, delta); return nv != 0; #elif defined ZMQ_ATOMIC_COUNTER_X86 integer_t oldval = -decrement_; - volatile integer_t *val = &value; + volatile integer_t *val = &_value; __asm__ volatile("lock; xaddl %0,%1" : "=r"(oldval), "=m"(*val) : "0"(oldval), "m"(*val) @@ -182,14 +183,14 @@ class atomic_counter_t " bne 1b\n\t" " dmb sy\n\t" : "=&r"(old_value), "=&r"(flag), "=&r"(tmp), - "+Qo"(value) - : "Ir"(decrement_), "r"(&value) + "+Qo"(_value) + : "Ir"(decrement_), "r"(&_value) : "cc"); return old_value - decrement != 0; #elif defined ZMQ_ATOMIC_COUNTER_MUTEX sync.lock (); - value -= decrement_; - bool result = value ? true : false; + _value -= decrement_; + bool result = _value ? true : false; sync.unlock (); return result; #else @@ -197,13 +198,13 @@ class atomic_counter_t #endif } - inline integer_t get () const { return value; } + inline integer_t get () const { return _value; } private: #if defined ZMQ_ATOMIC_COUNTER_CXX11 - std::atomic value; + std::atomic _value; #else - volatile integer_t value; + volatile integer_t _value; #endif #if defined ZMQ_ATOMIC_COUNTER_MUTEX diff --git a/src/atomic_ptr.hpp b/src/atomic_ptr.hpp index 2df69fb2..5b85073c 100644 --- a/src/atomic_ptr.hpp +++ b/src/atomic_ptr.hpp @@ -67,27 +67,27 @@ namespace zmq { #if !defined ZMQ_ATOMIC_PTR_CXX11 -inline void *atomic_xchg_ptr (void **ptr, +inline void *atomic_xchg_ptr (void **ptr_, void *const val_ #if defined ZMQ_ATOMIC_PTR_MUTEX , - mutex_t &sync + mutex_t &_sync #endif ) { #if defined ZMQ_ATOMIC_PTR_WINDOWS - return InterlockedExchangePointer ((PVOID *) ptr, val_); + return InterlockedExchangePointer ((PVOID *) ptr_, val_); #elif defined ZMQ_ATOMIC_PTR_INTRINSIC - return __atomic_exchange_n (ptr, val_, __ATOMIC_ACQ_REL); + return __atomic_exchange_n (ptr_, val_, __ATOMIC_ACQ_REL); #elif defined ZMQ_ATOMIC_PTR_ATOMIC_H - return atomic_swap_ptr (ptr, val_); + return atomic_swap_ptr (ptr_, val_); #elif defined ZMQ_ATOMIC_PTR_TILE - return arch_atomic_exchange (ptr, val_); + return arch_atomic_exchange (ptr_, val_); #elif defined ZMQ_ATOMIC_PTR_X86 void *old; __asm__ volatile("lock; xchg %0, %2" - : "=r"(old), "=m"(*ptr) - : "m"(*ptr), "0"(val_)); + : "=r"(old), "=m"(*ptr_) + : "m"(*ptr_), "0"(val_)); return old; #elif defined ZMQ_ATOMIC_PTR_ARM void *old; @@ -98,15 +98,15 @@ inline void *atomic_xchg_ptr (void **ptr, " teq %0, #0\n\t" " bne 1b\n\t" " dmb sy\n\t" - : "=&r"(flag), "=&r"(old), "+Qo"(*ptr) - : "r"(ptr), "r"(val_) + : "=&r"(flag), "=&r"(old), "+Qo"(*ptr_) + : "r"(ptr_), "r"(val_) : "cc"); return old; #elif defined ZMQ_ATOMIC_PTR_MUTEX - sync.lock (); - void *old = *ptr; - *ptr = val_; - sync.unlock (); + _sync.lock (); + void *old = *ptr_; + *ptr_ = val_; + _sync.unlock (); return old; #else #error atomic_ptr is not implemented for this platform @@ -118,7 +118,7 @@ inline void *atomic_cas (void *volatile *ptr_, void *val_ #if defined ZMQ_ATOMIC_PTR_MUTEX , - mutex_t &sync + mutex_t &_sync #endif ) { @@ -158,11 +158,11 @@ inline void *atomic_cas (void *volatile *ptr_, : "cc"); return old; #elif defined ZMQ_ATOMIC_PTR_MUTEX - sync.lock (); + _sync.lock (); void *old = *ptr_; if (*ptr_ == cmp_) *ptr_ = val_; - sync.unlock (); + _sync.unlock (); return old; #else #error atomic_ptr is not implemented for this platform @@ -176,7 +176,7 @@ template class atomic_ptr_t { public: // Initialise atomic pointer - inline atomic_ptr_t () { ptr = NULL; } + inline atomic_ptr_t () { _ptr = NULL; } // Destroy atomic pointer inline ~atomic_ptr_t () {} @@ -184,19 +184,19 @@ template class atomic_ptr_t // Set value of atomic pointer in a non-threadsafe way // Use this function only when you are sure that at most one // thread is accessing the pointer at the moment. - inline void set (T *ptr_) { this->ptr = ptr_; } + inline void set (T *ptr_) { _ptr = ptr_; } // Perform atomic 'exchange pointers' operation. Pointer is set - // to the 'val' value. Old value is returned. + // to the 'val_' value. Old value is returned. inline T *xchg (T *val_) { #if defined ZMQ_ATOMIC_PTR_CXX11 - return ptr.exchange (val_, std::memory_order_acq_rel); + return _ptr.exchange (val_, std::memory_order_acq_rel); #else - return (T *) atomic_xchg_ptr ((void **) &ptr, val_ + return (T *) atomic_xchg_ptr ((void **) &_ptr, val_ #if defined ZMQ_ATOMIC_PTR_MUTEX , - sync + _sync #endif ); #endif @@ -204,18 +204,18 @@ template class atomic_ptr_t // Perform atomic 'compare and swap' operation on the pointer. // The pointer is compared to 'cmp' argument and if they are - // equal, its value is set to 'val'. Old value of the pointer + // equal, its value is set to 'val_'. Old value of the pointer // is returned. inline T *cas (T *cmp_, T *val_) { #if defined ZMQ_ATOMIC_PTR_CXX11 - ptr.compare_exchange_strong (cmp_, val_, std::memory_order_acq_rel); + _ptr.compare_exchange_strong (cmp_, val_, std::memory_order_acq_rel); return cmp_; #else - return (T *) atomic_cas ((void **) &ptr, cmp_, val_ + return (T *) atomic_cas ((void **) &_ptr, cmp_, val_ #if defined ZMQ_ATOMIC_PTR_MUTEX , - sync + _sync #endif ); #endif @@ -223,13 +223,13 @@ template class atomic_ptr_t private: #if defined ZMQ_ATOMIC_PTR_CXX11 - std::atomic ptr; + std::atomic _ptr; #else - volatile T *ptr; + volatile T *_ptr; #endif #if defined ZMQ_ATOMIC_PTR_MUTEX - mutex_t sync; + mutex_t _sync; #endif #if !defined ZMQ_ATOMIC_PTR_CXX11 @@ -240,19 +240,19 @@ template class atomic_ptr_t struct atomic_value_t { - atomic_value_t (const int value_) : value (value_) {} + atomic_value_t (const int value_) : _value (value_) {} - atomic_value_t (const atomic_value_t &src_) : value (src_.load ()) {} + atomic_value_t (const atomic_value_t &src_) : _value (src_.load ()) {} void store (const int value_) { #if defined ZMQ_ATOMIC_PTR_CXX11 - value.store (value_, std::memory_order_release); + _value.store (value_, std::memory_order_release); #else - atomic_xchg_ptr ((void **) &value, (void *) (ptrdiff_t) value_ + atomic_xchg_ptr ((void **) &_value, (void *) (ptrdiff_t) value_ #if defined ZMQ_ATOMIC_PTR_MUTEX , - sync + _sync #endif ); #endif @@ -261,15 +261,15 @@ struct atomic_value_t int load () const { #if defined ZMQ_ATOMIC_PTR_CXX11 - return value.load (std::memory_order_acquire); + return _value.load (std::memory_order_acquire); #else - return (int) (ptrdiff_t) atomic_cas ((void **) &value, 0, 0 + return (int) (ptrdiff_t) atomic_cas ((void **) &_value, 0, 0 #if defined ZMQ_ATOMIC_PTR_MUTEX , #if defined __SUNPRO_CC - const_cast (sync) + const_cast (_sync) #else - sync + _sync #endif #endif ); @@ -278,13 +278,13 @@ struct atomic_value_t private: #if defined ZMQ_ATOMIC_PTR_CXX11 - std::atomic value; + std::atomic _value; #else - volatile ptrdiff_t value; + volatile ptrdiff_t _value; #endif #if defined ZMQ_ATOMIC_PTR_MUTEX - mutable mutex_t sync; + mutable mutex_t _sync; #endif private: diff --git a/src/blob.hpp b/src/blob.hpp index 900da977..43217141 100644 --- a/src/blob.hpp +++ b/src/blob.hpp @@ -71,26 +71,26 @@ struct reference_tag_t struct blob_t { // Creates an empty blob_t. - blob_t () : data_ (0), size_ (0), owned_ (true) {} + blob_t () : _data (0), _size (0), _owned (true) {} // Creates a blob_t of a given size, with uninitialized content. explicit blob_t (const size_t size_) : - data_ (static_cast (malloc (size_))), - size_ (size_), - owned_ (true) + _data (static_cast (malloc (size_))), + _size (size_), + _owned (true) { - alloc_assert (data_); + alloc_assert (_data); } // Creates a blob_t of a given size, an initializes content by copying // from another buffer. blob_t (const unsigned char *const data_, const size_t size_) : - data_ (static_cast (malloc (size_))), - size_ (size_), - owned_ (true) + _data (static_cast (malloc (size_))), + _size (size_), + _owned (true) { - alloc_assert (this->data_); - memcpy (this->data_, data_, size_); + alloc_assert (_data); + memcpy (_data, data_, size_); } // Creates a blob_t for temporary use that only references a @@ -98,65 +98,65 @@ struct blob_t // Use with caution and ensure that the blob_t will not outlive // the referenced data. blob_t (unsigned char *const data_, const size_t size_, reference_tag_t) : - data_ (data_), - size_ (size_), - owned_ (false) + _data (data_), + _size (size_), + _owned (false) { } // Returns the size of the blob_t. - size_t size () const { return size_; } + size_t size () const { return _size; } // Returns a pointer to the data of the blob_t. - const unsigned char *data () const { return data_; } + const unsigned char *data () const { return _data; } // Returns a pointer to the data of the blob_t. - unsigned char *data () { return data_; } + unsigned char *data () { return _data; } // Defines an order relationship on blob_t. bool operator< (blob_t const &other_) const { int cmpres = - memcmp (data_, other_.data_, std::min (size_, other_.size_)); - return cmpres < 0 || (cmpres == 0 && size_ < other_.size_); + memcmp (_data, other_._data, std::min (_size, other_._size)); + return cmpres < 0 || (cmpres == 0 && _size < other_._size); } // Sets a blob_t to a deep copy of another blob_t. void set_deep_copy (blob_t const &other_) { clear (); - data_ = static_cast (malloc (other_.size_)); - alloc_assert (data_); - size_ = other_.size_; - owned_ = true; - memcpy (data_, other_.data_, size_); + _data = static_cast (malloc (other_._size)); + alloc_assert (_data); + _size = other_._size; + _owned = true; + memcpy (_data, other_._data, _size); } // Sets a blob_t to a copy of a given buffer. void set (const unsigned char *const data_, const size_t size_) { clear (); - this->data_ = static_cast (malloc (size_)); - alloc_assert (this->data_); - this->size_ = size_; - owned_ = true; - memcpy (this->data_, data_, size_); + _data = static_cast (malloc (size_)); + alloc_assert (_data); + _size = size_; + _owned = true; + memcpy (_data, data_, size_); } // Empties a blob_t. void clear () { - if (owned_) { - free (data_); + if (_owned) { + free (_data); } - data_ = 0; - size_ = 0; + _data = 0; + _size = 0; } ~blob_t () { - if (owned_) { - free (data_); + if (_owned) { + free (_data); } } @@ -165,25 +165,25 @@ struct blob_t blob_t &operator= (const blob_t &) = delete; blob_t (blob_t &&other_) : - data_ (other_.data_), - size_ (other_.size_), - owned_ (other_.owned_) + _data (other_._data), + _size (other_._size), + _owned (other_._owned) { - other_.owned_ = false; + other_._owned = false; } blob_t &operator= (blob_t &&other_) { if (this != &other_) { clear (); - data_ = other_.data_; - size_ = other_.size_; - owned_ = other_.owned_; - other_.owned_ = false; + _data = other_._data; + _size = other_._size; + _owned = other_._owned; + other_._owned = false; } return *this; } #else - blob_t (const blob_t &other) : owned_ (false) { set_deep_copy (other); } + blob_t (const blob_t &other) : _owned (false) { set_deep_copy (other); } blob_t &operator= (const blob_t &other) { if (this != &other) { @@ -195,9 +195,9 @@ struct blob_t #endif private: - unsigned char *data_; - size_t size_; - bool owned_; + unsigned char *_data; + size_t _size; + bool _owned; }; } diff --git a/src/client.cpp b/src/client.cpp index 7d49be36..9ec1a5f9 100644 --- a/src/client.cpp +++ b/src/client.cpp @@ -49,8 +49,8 @@ void zmq::client_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) zmq_assert (pipe_); - fq.attach (pipe_); - lb.attach (pipe_); + _fq.attach (pipe_); + _lb.attach (pipe_); } int zmq::client_t::xsend (msg_t *msg_) @@ -60,24 +60,24 @@ int zmq::client_t::xsend (msg_t *msg_) errno = EINVAL; return -1; } - return lb.sendpipe (msg_, NULL); + return _lb.sendpipe (msg_, NULL); } int zmq::client_t::xrecv (msg_t *msg_) { - int rc = fq.recvpipe (msg_, NULL); + int rc = _fq.recvpipe (msg_, NULL); // Drop any messages with more flag while (rc == 0 && msg_->flags () & msg_t::more) { // drop all frames of the current multi-frame message - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); while (rc == 0 && msg_->flags () & msg_t::more) - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); // get the new message if (rc == 0) - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); } return rc; @@ -85,31 +85,31 @@ int zmq::client_t::xrecv (msg_t *msg_) bool zmq::client_t::xhas_in () { - return fq.has_in (); + return _fq.has_in (); } bool zmq::client_t::xhas_out () { - return lb.has_out (); + return _lb.has_out (); } const zmq::blob_t &zmq::client_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } void zmq::client_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::client_t::xwrite_activated (pipe_t *pipe_) { - lb.activated (pipe_); + _lb.activated (pipe_); } void zmq::client_t::xpipe_terminated (pipe_t *pipe_) { - fq.pipe_terminated (pipe_); - lb.pipe_terminated (pipe_); + _fq.pipe_terminated (pipe_); + _lb.pipe_terminated (pipe_); } diff --git a/src/client.hpp b/src/client.hpp index 9fb8aefc..162e36dd 100644 --- a/src/client.hpp +++ b/src/client.hpp @@ -62,8 +62,8 @@ class client_t : public socket_base_t private: // Messages are fair-queued from inbound pipes. And load-balanced to // the outbound pipes. - fq_t fq; - lb_t lb; + fq_t _fq; + lb_t _lb; client_t (const client_t &); const client_t &operator= (const client_t &); diff --git a/src/clock.cpp b/src/clock.cpp index 5e89149e..4cfcfc2a 100644 --- a/src/clock.cpp +++ b/src/clock.cpp @@ -127,11 +127,11 @@ const uint64_t usecs_per_sec = 1000000; const uint64_t nsecs_per_usec = 1000; zmq::clock_t::clock_t () : - last_tsc (rdtsc ()), + _last_tsc (rdtsc ()), #ifdef ZMQ_HAVE_WINDOWS - last_time (static_cast ((*my_get_tick_count64) ())) + _last_time (static_cast ((*my_get_tick_count64) ())) #else - last_time (now_us () / usecs_per_msec) + _last_time (now_us () / usecs_per_msec) #endif { } @@ -218,16 +218,16 @@ uint64_t zmq::clock_t::now_ms () // If TSC haven't jumped back (in case of migration to a different // CPU core) and if not too much time elapsed since last measurement, // we can return cached time value. - if (likely (tsc - last_tsc <= (clock_precision / 2) && tsc >= last_tsc)) - return last_time; + if (likely (tsc - _last_tsc <= (clock_precision / 2) && tsc >= _last_tsc)) + return _last_time; - last_tsc = tsc; + _last_tsc = tsc; #ifdef ZMQ_HAVE_WINDOWS - last_time = static_cast ((*my_get_tick_count64) ()); + _last_time = static_cast ((*my_get_tick_count64) ()); #else - last_time = now_us () / usecs_per_msec; + _last_time = now_us () / usecs_per_msec; #endif - return last_time; + return _last_time; } uint64_t zmq::clock_t::rdtsc () diff --git a/src/clock.hpp b/src/clock.hpp index 9d680984..df17fea3 100644 --- a/src/clock.hpp +++ b/src/clock.hpp @@ -67,10 +67,10 @@ class clock_t private: // TSC timestamp of when last time measurement was made. - uint64_t last_tsc; + uint64_t _last_tsc; // Physical time corresponding to the TSC above (in milliseconds). - uint64_t last_time; + uint64_t _last_time; clock_t (const clock_t &); const clock_t &operator= (const clock_t &); diff --git a/src/condition_variable.hpp b/src/condition_variable.hpp index d65378fb..e5c72243 100644 --- a/src/condition_variable.hpp +++ b/src/condition_variable.hpp @@ -93,13 +93,13 @@ namespace zmq class condition_variable_t { public: - inline condition_variable_t () { InitializeConditionVariable (&cv); } + inline condition_variable_t () { InitializeConditionVariable (&_cv); } inline ~condition_variable_t () {} inline int wait (mutex_t *mutex_, int timeout_) { - int rc = SleepConditionVariableCS (&cv, mutex_->get_cs (), timeout_); + int rc = SleepConditionVariableCS (&_cv, mutex_->get_cs (), timeout_); if (rc != 0) return 0; @@ -113,10 +113,10 @@ class condition_variable_t return -1; } - inline void broadcast () { WakeAllConditionVariable (&cv); } + inline void broadcast () { WakeAllConditionVariable (&_cv); } private: - CONDITION_VARIABLE cv; + CONDITION_VARIABLE _cv; // Disable copy construction and assignment. condition_variable_t (const condition_variable_t &); @@ -132,13 +132,13 @@ class condition_variable_t inline int wait (mutex_t *mutex_, int timeout_) { - std::unique_lock lck (mtx); // lock mtx - mutex_->unlock (); // unlock mutex_ + std::unique_lock lck (_mtx); // lock mtx + mutex_->unlock (); // unlock mutex_ int res = 0; if (timeout_ == -1) { - cv.wait ( + _cv.wait ( lck); // unlock mtx and wait cv.notify_all(), lock mtx after cv.notify_all() - } else if (cv.wait_for (lck, std::chrono::milliseconds (timeout_)) + } else if (_cv.wait_for (lck, std::chrono::milliseconds (timeout_)) == std::cv_status::timeout) { // time expired errno = EAGAIN; @@ -151,13 +151,13 @@ class condition_variable_t inline void broadcast () { - std::unique_lock lck (mtx); // lock mtx - cv.notify_all (); + std::unique_lock lck (_mtx); // lock mtx + _cv.notify_all (); } private: - std::condition_variable cv; - std::mutex mtx; + std::condition_variable _cv; + std::mutex _mtx; // Disable copy construction and assignment. condition_variable_t (const condition_variable_t &); @@ -182,9 +182,9 @@ class condition_variable_t inline ~condition_variable_t () { - scoped_lock_t l (m_listenersMutex); - for (size_t i = 0; i < m_listeners.size (); i++) { - semDelete (m_listeners[i]); + scoped_lock_t l (_listenersMutex); + for (size_t i = 0; i < _listeners.size (); i++) { + semDelete (_listeners[i]); } } @@ -198,8 +198,8 @@ class condition_variable_t SEM_ID sem = semBCreate (SEM_Q_PRIORITY, SEM_EMPTY); { - scoped_lock_t l (m_listenersMutex); - m_listeners.push_back (sem); + scoped_lock_t l (_listenersMutex); + _listeners.push_back (sem); } mutex_->unlock (); @@ -213,11 +213,11 @@ class condition_variable_t } { - scoped_lock_t l (m_listenersMutex); + scoped_lock_t l (_listenersMutex); // remove sem from listeners - for (size_t i = 0; i < m_listeners.size (); i++) { - if (m_listeners[i] == sem) { - m_listeners.erase (m_listeners.begin () + i); + for (size_t i = 0; i < _listeners.size (); i++) { + if (_listeners[i] == sem) { + _listeners.erase (_listeners.begin () + i); break; } } @@ -238,15 +238,15 @@ class condition_variable_t inline void broadcast () { - scoped_lock_t l (m_listenersMutex); - for (size_t i = 0; i < m_listeners.size (); i++) { - semGive (m_listeners[i]); + scoped_lock_t l (_listenersMutex); + for (size_t i = 0; i < _listeners.size (); i++) { + semGive (_listeners[i]); } } private: - mutex_t m_listenersMutex; - std::vector m_listeners; + mutex_t _listenersMutex; + std::vector _listeners; // Disable copy construction and assignment. condition_variable_t (const condition_variable_t &); @@ -276,13 +276,13 @@ class condition_variable_t #if !defined(ZMQ_HAVE_OSX) && !defined(ANDROID_LEGACY) pthread_condattr_setclock (&attr, CLOCK_MONOTONIC); #endif - int rc = pthread_cond_init (&cond, &attr); + int rc = pthread_cond_init (&_cond, &attr); posix_assert (rc); } inline ~condition_variable_t () { - int rc = pthread_cond_destroy (&cond); + int rc = pthread_cond_destroy (&_cond); posix_assert (rc); } @@ -309,15 +309,16 @@ class condition_variable_t } #ifdef ZMQ_HAVE_OSX rc = pthread_cond_timedwait_relative_np ( - &cond, mutex_->get_mutex (), &timeout); + &_cond, mutex_->get_mutex (), &timeout); #elif defined(ANDROID_LEGACY) rc = pthread_cond_timedwait_monotonic_np ( - &cond, mutex_->get_mutex (), &timeout); + &_cond, mutex_->get_mutex (), &timeout); #else - rc = pthread_cond_timedwait (&cond, mutex_->get_mutex (), &timeout); + rc = + pthread_cond_timedwait (&_cond, mutex_->get_mutex (), &timeout); #endif } else - rc = pthread_cond_wait (&cond, mutex_->get_mutex ()); + rc = pthread_cond_wait (&_cond, mutex_->get_mutex ()); if (rc == 0) return 0; @@ -333,12 +334,12 @@ class condition_variable_t inline void broadcast () { - int rc = pthread_cond_broadcast (&cond); + int rc = pthread_cond_broadcast (&_cond); posix_assert (rc); } private: - pthread_cond_t cond; + pthread_cond_t _cond; // Disable copy construction and assignment. condition_variable_t (const condition_variable_t &); diff --git a/src/ctx.cpp b/src/ctx.cpp index 8983e021..545c98a5 100644 --- a/src/ctx.cpp +++ b/src/ctx.cpp @@ -66,25 +66,25 @@ int clipped_maxsocket (int max_requested_) } zmq::ctx_t::ctx_t () : - tag (ZMQ_CTX_TAG_VALUE_GOOD), - starting (true), - terminating (false), - reaper (NULL), - slot_count (0), - slots (NULL), - max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)), - max_msgsz (INT_MAX), - io_thread_count (ZMQ_IO_THREADS_DFLT), - blocky (true), - ipv6 (false), - zero_copy (true) + _tag (ZMQ_CTX_TAG_VALUE_GOOD), + _starting (true), + _terminating (false), + _reaper (NULL), + _slot_count (0), + _slots (NULL), + _max_sockets (clipped_maxsocket (ZMQ_MAX_SOCKETS_DFLT)), + _max_msgsz (INT_MAX), + _io_thread_count (ZMQ_IO_THREADS_DFLT), + _blocky (true), + _ipv6 (false), + _zero_copy (true) { #ifdef HAVE_FORK - pid = getpid (); + _pid = getpid (); #endif #ifdef ZMQ_HAVE_VMCI - vmci_fd = -1; - vmci_family = -1; + _vmci_fd = -1; + _vmci_family = -1; #endif // Initialise crypto library, if needed. @@ -93,54 +93,54 @@ zmq::ctx_t::ctx_t () : bool zmq::ctx_t::check_tag () { - return tag == ZMQ_CTX_TAG_VALUE_GOOD; + return _tag == ZMQ_CTX_TAG_VALUE_GOOD; } zmq::ctx_t::~ctx_t () { - // Check that there are no remaining sockets. - zmq_assert (sockets.empty ()); + // Check that there are no remaining _sockets. + zmq_assert (_sockets.empty ()); // Ask I/O threads to terminate. If stop signal wasn't sent to I/O // thread subsequent invocation of destructor would hang-up. - for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) { - io_threads[i]->stop (); + for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) { + _io_threads[i]->stop (); } // Wait till I/O threads actually terminate. - for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) { - LIBZMQ_DELETE (io_threads[i]); + for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) { + LIBZMQ_DELETE (_io_threads[i]); } // Deallocate the reaper thread object. - LIBZMQ_DELETE (reaper); + LIBZMQ_DELETE (_reaper); // Deallocate the array of mailboxes. No special work is // needed as mailboxes themselves were deallocated with their // corresponding io_thread/socket objects. - free (slots); + free (_slots); // De-initialise crypto library, if needed. zmq::random_close (); // Remove the tag, so that the object is considered dead. - tag = ZMQ_CTX_TAG_VALUE_BAD; + _tag = ZMQ_CTX_TAG_VALUE_BAD; } bool zmq::ctx_t::valid () const { - return term_mailbox.valid (); + return _term_mailbox.valid (); } int zmq::ctx_t::terminate () { - slot_sync.lock (); + _slot_sync.lock (); - bool save_terminating = terminating; - terminating = false; + bool save_terminating = _terminating; + _terminating = false; // Connect up any pending inproc connections, otherwise we will hang - pending_connections_t copy = pending_connections; + pending_connections_t copy = _pending_connections; for (pending_connections_t::iterator p = copy.begin (); p != copy.end (); ++p) { zmq::socket_base_t *s = create_socket (ZMQ_PAIR); @@ -149,57 +149,57 @@ int zmq::ctx_t::terminate () s->bind (p->first.c_str ()); s->close (); } - terminating = save_terminating; + _terminating = save_terminating; - if (!starting) { + if (!_starting) { #ifdef HAVE_FORK - if (pid != getpid ()) { + if (_pid != getpid ()) { // we are a forked child process. Close all file descriptors // inherited from the parent. - for (sockets_t::size_type i = 0; i != sockets.size (); i++) - sockets[i]->get_mailbox ()->forked (); + for (sockets_t::size_type i = 0; i != _sockets.size (); i++) + _sockets[i]->get_mailbox ()->forked (); - term_mailbox.forked (); + _term_mailbox.forked (); } #endif // Check whether termination was already underway, but interrupted and now // restarted. - bool restarted = terminating; - terminating = true; + bool restarted = _terminating; + _terminating = true; // First attempt to terminate the context. if (!restarted) { // First send stop command to sockets so that any blocking calls // can be interrupted. If there are no sockets we can ask reaper // thread to stop. - for (sockets_t::size_type i = 0; i != sockets.size (); i++) - sockets[i]->stop (); - if (sockets.empty ()) - reaper->stop (); + for (sockets_t::size_type i = 0; i != _sockets.size (); i++) + _sockets[i]->stop (); + if (_sockets.empty ()) + _reaper->stop (); } - slot_sync.unlock (); + _slot_sync.unlock (); // Wait till reaper thread closes all the sockets. command_t cmd; - int rc = term_mailbox.recv (&cmd, -1); + int rc = _term_mailbox.recv (&cmd, -1); if (rc == -1 && errno == EINTR) return -1; errno_assert (rc == 0); zmq_assert (cmd.type == command_t::done); - slot_sync.lock (); - zmq_assert (sockets.empty ()); + _slot_sync.lock (); + zmq_assert (_sockets.empty ()); } - slot_sync.unlock (); + _slot_sync.unlock (); #ifdef ZMQ_HAVE_VMCI - vmci_sync.lock (); + _vmci_sync.lock (); - VMCISock_ReleaseAFValueFd (vmci_fd); - vmci_family = -1; - vmci_fd = -1; + VMCISock_ReleaseAFValueFd (_vmci_fd); + _vmci_family = -1; + _vmci_fd = -1; - vmci_sync.unlock (); + _vmci_sync.unlock (); #endif // Deallocate the resources. @@ -210,18 +210,18 @@ int zmq::ctx_t::terminate () int zmq::ctx_t::shutdown () { - scoped_lock_t locker (slot_sync); + scoped_lock_t locker (_slot_sync); - if (!starting && !terminating) { - terminating = true; + if (!_starting && !_terminating) { + _terminating = true; // Send stop command to sockets so that any blocking calls // can be interrupted. If there are no sockets we can ask reaper // thread to stop. - for (sockets_t::size_type i = 0; i != sockets.size (); i++) - sockets[i]->stop (); - if (sockets.empty ()) - reaper->stop (); + for (sockets_t::size_type i = 0; i != _sockets.size (); i++) + _sockets[i]->stop (); + if (_sockets.empty ()) + _reaper->stop (); } return 0; @@ -232,23 +232,23 @@ int zmq::ctx_t::set (int option_, int optval_) int rc = 0; if (option_ == ZMQ_MAX_SOCKETS && optval_ >= 1 && optval_ == clipped_maxsocket (optval_)) { - scoped_lock_t locker (opt_sync); - max_sockets = optval_; + scoped_lock_t locker (_opt_sync); + _max_sockets = optval_; } else if (option_ == ZMQ_IO_THREADS && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - io_thread_count = optval_; + scoped_lock_t locker (_opt_sync); + _io_thread_count = optval_; } else if (option_ == ZMQ_IPV6 && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - ipv6 = (optval_ != 0); + scoped_lock_t locker (_opt_sync); + _ipv6 = (optval_ != 0); } else if (option_ == ZMQ_BLOCKY && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - blocky = (optval_ != 0); + scoped_lock_t locker (_opt_sync); + _blocky = (optval_ != 0); } else if (option_ == ZMQ_MAX_MSGSZ && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX; + scoped_lock_t locker (_opt_sync); + _max_msgsz = optval_ < INT_MAX ? optval_ : INT_MAX; } else if (option_ == ZMQ_ZERO_COPY_RECV && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - zero_copy = (optval_ != 0); + scoped_lock_t locker (_opt_sync); + _zero_copy = (optval_ != 0); } else { rc = thread_ctx_t::set (option_, optval_); } @@ -259,21 +259,21 @@ int zmq::ctx_t::get (int option_) { int rc = 0; if (option_ == ZMQ_MAX_SOCKETS) - rc = max_sockets; + rc = _max_sockets; else if (option_ == ZMQ_SOCKET_LIMIT) rc = clipped_maxsocket (65535); else if (option_ == ZMQ_IO_THREADS) - rc = io_thread_count; + rc = _io_thread_count; else if (option_ == ZMQ_IPV6) - rc = ipv6; + rc = _ipv6; else if (option_ == ZMQ_BLOCKY) - rc = blocky; + rc = _blocky; else if (option_ == ZMQ_MAX_MSGSZ) - rc = max_msgsz; + rc = _max_msgsz; else if (option_ == ZMQ_MSG_T_SIZE) rc = sizeof (zmq_msg_t); else if (option_ == ZMQ_ZERO_COPY_RECV) { - rc = zero_copy; + rc = _zero_copy; } else { errno = EINVAL; rc = -1; @@ -285,36 +285,36 @@ bool zmq::ctx_t::start () { // Initialise the array of mailboxes. Additional three slots are for // zmq_ctx_term thread and reaper thread. - opt_sync.lock (); - int mazmq = max_sockets; - int ios = io_thread_count; - opt_sync.unlock (); - slot_count = mazmq + ios + 2; - slots = - static_cast (malloc (sizeof (i_mailbox *) * slot_count)); - if (!slots) { + _opt_sync.lock (); + int mazmq = _max_sockets; + int ios = _io_thread_count; + _opt_sync.unlock (); + _slot_count = mazmq + ios + 2; + _slots = + static_cast (malloc (sizeof (i_mailbox *) * _slot_count)); + if (!_slots) { errno = ENOMEM; goto fail; } // Initialise the infrastructure for zmq_ctx_term thread. - slots[term_tid] = &term_mailbox; + _slots[term_tid] = &_term_mailbox; // Create the reaper thread. - reaper = new (std::nothrow) reaper_t (this, reaper_tid); - if (!reaper) { + _reaper = new (std::nothrow) reaper_t (this, reaper_tid); + if (!_reaper) { errno = ENOMEM; goto fail_cleanup_slots; } - if (!reaper->get_mailbox ()->valid ()) + if (!_reaper->get_mailbox ()->valid ()) goto fail_cleanup_reaper; - slots[reaper_tid] = reaper->get_mailbox (); - reaper->start (); + _slots[reaper_tid] = _reaper->get_mailbox (); + _reaper->start (); // Create I/O thread objects and launch them. - for (int32_t i = static_cast (slot_count) - 1; + for (int32_t i = static_cast (_slot_count) - 1; i >= static_cast (2); i--) { - slots[i] = NULL; + _slots[i] = NULL; } for (int i = 2; i != ios + 2; i++) { @@ -327,28 +327,28 @@ bool zmq::ctx_t::start () delete io_thread; goto fail_cleanup_reaper; } - io_threads.push_back (io_thread); - slots[i] = io_thread->get_mailbox (); + _io_threads.push_back (io_thread); + _slots[i] = io_thread->get_mailbox (); io_thread->start (); } // In the unused part of the slot array, create a list of empty slots. - for (int32_t i = static_cast (slot_count) - 1; + for (int32_t i = static_cast (_slot_count) - 1; i >= static_cast (ios) + 2; i--) { - empty_slots.push_back (i); + _empty_slots.push_back (i); } - starting = false; + _starting = false; return true; fail_cleanup_reaper: - reaper->stop (); - delete reaper; - reaper = NULL; + _reaper->stop (); + delete _reaper; + _reaper = NULL; fail_cleanup_slots: - free (slots); - slots = NULL; + free (_slots); + _slots = NULL; fail: return false; @@ -356,28 +356,28 @@ fail: zmq::socket_base_t *zmq::ctx_t::create_socket (int type_) { - scoped_lock_t locker (slot_sync); + scoped_lock_t locker (_slot_sync); - if (unlikely (starting)) { + if (unlikely (_starting)) { if (!start ()) return NULL; } // Once zmq_ctx_term() was called, we can't create new sockets. - if (terminating) { + if (_terminating) { errno = ETERM; return NULL; } // If max_sockets limit was reached, return error. - if (empty_slots.empty ()) { + if (_empty_slots.empty ()) { errno = EMFILE; return NULL; } // Choose a slot for the socket. - uint32_t slot = empty_slots.back (); - empty_slots.pop_back (); + uint32_t slot = _empty_slots.back (); + _empty_slots.pop_back (); // Generate new unique socket ID. int sid = (static_cast (max_socket_id.add (1))) + 1; @@ -385,41 +385,41 @@ zmq::socket_base_t *zmq::ctx_t::create_socket (int type_) // Create the socket and register its mailbox. socket_base_t *s = socket_base_t::create (type_, this, slot, sid); if (!s) { - empty_slots.push_back (slot); + _empty_slots.push_back (slot); return NULL; } - sockets.push_back (s); - slots[slot] = s->get_mailbox (); + _sockets.push_back (s); + _slots[slot] = s->get_mailbox (); return s; } void zmq::ctx_t::destroy_socket (class socket_base_t *socket_) { - scoped_lock_t locker (slot_sync); + scoped_lock_t locker (_slot_sync); // Free the associated thread slot. uint32_t tid = socket_->get_tid (); - empty_slots.push_back (tid); - slots[tid] = NULL; + _empty_slots.push_back (tid); + _slots[tid] = NULL; // Remove the socket from the list of sockets. - sockets.erase (socket_); + _sockets.erase (socket_); // If zmq_ctx_term() was already called and there are no more socket // we can ask reaper thread to terminate. - if (terminating && sockets.empty ()) - reaper->stop (); + if (_terminating && _sockets.empty ()) + _reaper->stop (); } zmq::object_t *zmq::ctx_t::get_reaper () { - return reaper; + return _reaper; } zmq::thread_ctx_t::thread_ctx_t () : - thread_priority (ZMQ_THREAD_PRIORITY_DFLT), - thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT) + _thread_priority (ZMQ_THREAD_PRIORITY_DFLT), + _thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT) { } @@ -429,13 +429,13 @@ void zmq::thread_ctx_t::start_thread (thread_t &thread_, { static unsigned int nthreads_started = 0; - thread_.setSchedulingParameters (thread_priority, thread_sched_policy, - thread_affinity_cpus); + thread_.setSchedulingParameters (_thread_priority, _thread_sched_policy, + _thread_affinity_cpus); thread_.start (tfn_, arg_); #ifndef ZMQ_HAVE_ANDROID std::ostringstream s; - if (!thread_name_prefix.empty ()) - s << thread_name_prefix << "/"; + if (!_thread_name_prefix.empty ()) + s << _thread_name_prefix << "/"; s << "ZMQbg/" << nthreads_started; thread_.setThreadName (s.str ().c_str ()); #endif @@ -446,16 +446,16 @@ int zmq::thread_ctx_t::set (int option_, int optval_) { int rc = 0; if (option_ == ZMQ_THREAD_SCHED_POLICY && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - thread_sched_policy = optval_; + scoped_lock_t locker (_opt_sync); + _thread_sched_policy = optval_; } else if (option_ == ZMQ_THREAD_AFFINITY_CPU_ADD && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - thread_affinity_cpus.insert (optval_); + scoped_lock_t locker (_opt_sync); + _thread_affinity_cpus.insert (optval_); } else if (option_ == ZMQ_THREAD_AFFINITY_CPU_REMOVE && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - std::set::iterator it = thread_affinity_cpus.find (optval_); - if (it != thread_affinity_cpus.end ()) { - thread_affinity_cpus.erase (it); + scoped_lock_t locker (_opt_sync); + std::set::iterator it = _thread_affinity_cpus.find (optval_); + if (it != _thread_affinity_cpus.end ()) { + _thread_affinity_cpus.erase (it); } else { errno = EINVAL; rc = -1; @@ -463,11 +463,11 @@ int zmq::thread_ctx_t::set (int option_, int optval_) } else if (option_ == ZMQ_THREAD_NAME_PREFIX && optval_ >= 0) { std::ostringstream s; s << optval_; - scoped_lock_t locker (opt_sync); - thread_name_prefix = s.str (); + scoped_lock_t locker (_opt_sync); + _thread_name_prefix = s.str (); } else if (option_ == ZMQ_THREAD_PRIORITY && optval_ >= 0) { - scoped_lock_t locker (opt_sync); - thread_priority = optval_; + scoped_lock_t locker (_opt_sync); + _thread_priority = optval_; } else { errno = EINVAL; rc = -1; @@ -477,23 +477,23 @@ int zmq::thread_ctx_t::set (int option_, int optval_) void zmq::ctx_t::send_command (uint32_t tid_, const command_t &command_) { - slots[tid_]->send (command_); + _slots[tid_]->send (command_); } zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_) { - if (io_threads.empty ()) + if (_io_threads.empty ()) return NULL; // Find the I/O thread with minimum load. int min_load = -1; io_thread_t *selected_io_thread = NULL; - for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) { + for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) { if (!affinity_ || (affinity_ & (uint64_t (1) << i))) { - int load = io_threads[i]->get_load (); + int load = _io_threads[i]->get_load (); if (selected_io_thread == NULL || load < min_load) { min_load = load; - selected_io_thread = io_threads[i]; + selected_io_thread = _io_threads[i]; } } } @@ -503,10 +503,10 @@ zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_) int zmq::ctx_t::register_endpoint (const char *addr_, const endpoint_t &endpoint_) { - scoped_lock_t locker (endpoints_sync); + scoped_lock_t locker (_endpoints_sync); const bool inserted = - endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), endpoint_) + _endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), endpoint_) .second; if (!inserted) { errno = EADDRINUSE; @@ -518,30 +518,30 @@ int zmq::ctx_t::register_endpoint (const char *addr_, int zmq::ctx_t::unregister_endpoint (const std::string &addr_, socket_base_t *socket_) { - scoped_lock_t locker (endpoints_sync); + scoped_lock_t locker (_endpoints_sync); - const endpoints_t::iterator it = endpoints.find (addr_); - if (it == endpoints.end () || it->second.socket != socket_) { + const endpoints_t::iterator it = _endpoints.find (addr_); + if (it == _endpoints.end () || it->second.socket != socket_) { errno = ENOENT; return -1; } // Remove endpoint. - endpoints.erase (it); + _endpoints.erase (it); return 0; } void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_) { - scoped_lock_t locker (endpoints_sync); + scoped_lock_t locker (_endpoints_sync); - endpoints_t::iterator it = endpoints.begin (); - while (it != endpoints.end ()) { + endpoints_t::iterator it = _endpoints.begin (); + while (it != _endpoints.end ()) { if (it->second.socket == socket_) { endpoints_t::iterator to_erase = it; ++it; - endpoints.erase (to_erase); + _endpoints.erase (to_erase); continue; } ++it; @@ -550,10 +550,10 @@ void zmq::ctx_t::unregister_endpoints (socket_base_t *socket_) zmq::endpoint_t zmq::ctx_t::find_endpoint (const char *addr_) { - scoped_lock_t locker (endpoints_sync); + scoped_lock_t locker (_endpoints_sync); - endpoints_t::iterator it = endpoints.find (addr_); - if (it == endpoints.end ()) { + endpoints_t::iterator it = _endpoints.find (addr_); + if (it == _endpoints.end ()) { errno = ECONNREFUSED; endpoint_t empty = {NULL, options_t ()}; return empty; @@ -573,17 +573,17 @@ void zmq::ctx_t::pend_connection (const std::string &addr_, const endpoint_t &endpoint_, pipe_t **pipes_) { - scoped_lock_t locker (endpoints_sync); + scoped_lock_t locker (_endpoints_sync); const pending_connection_t pending_connection = {endpoint_, pipes_[0], pipes_[1]}; - endpoints_t::iterator it = endpoints.find (addr_); - if (it == endpoints.end ()) { + endpoints_t::iterator it = _endpoints.find (addr_); + if (it == _endpoints.end ()) { // Still no bind. endpoint_.socket->inc_seqnum (); - pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_, - pending_connection); + _pending_connections.ZMQ_MAP_INSERT_OR_EMPLACE (addr_, + pending_connection); } else { // Bind has happened in the mean time, connect directly connect_inproc_sockets (it->second.socket, it->second.options, @@ -594,16 +594,16 @@ void zmq::ctx_t::pend_connection (const std::string &addr_, void zmq::ctx_t::connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_) { - scoped_lock_t locker (endpoints_sync); + scoped_lock_t locker (_endpoints_sync); std::pair - pending = pending_connections.equal_range (addr_); + pending = _pending_connections.equal_range (addr_); for (pending_connections_t::iterator p = pending.first; p != pending.second; ++p) - connect_inproc_sockets (bind_socket_, endpoints[addr_].options, + connect_inproc_sockets (bind_socket_, _endpoints[addr_].options, p->second, bind_side); - pending_connections.erase (pending.first, pending.second); + _pending_connections.erase (pending.first, pending.second); } void zmq::ctx_t::connect_inproc_sockets ( @@ -682,20 +682,20 @@ void zmq::ctx_t::connect_inproc_sockets ( int zmq::ctx_t::get_vmci_socket_family () { - zmq::scoped_lock_t locker (vmci_sync); + zmq::scoped_lock_t locker (_vmci_sync); - if (vmci_fd == -1) { - vmci_family = VMCISock_GetAFValueFd (&vmci_fd); + if (_vmci_fd == -1) { + _vmci_family = VMCISock_GetAFValueFd (&_vmci_fd); - if (vmci_fd != -1) { + if (_vmci_fd != -1) { #ifdef FD_CLOEXEC - int rc = fcntl (vmci_fd, F_SETFD, FD_CLOEXEC); + int rc = fcntl (_vmci_fd, F_SETFD, FD_CLOEXEC); errno_assert (rc != -1); #endif } } - return vmci_family; + return _vmci_family; } #endif diff --git a/src/ctx.hpp b/src/ctx.hpp index 22e090b9..a43dc5df 100644 --- a/src/ctx.hpp +++ b/src/ctx.hpp @@ -73,14 +73,14 @@ class thread_ctx_t protected: // Synchronisation of access to context options. - mutex_t opt_sync; + mutex_t _opt_sync; private: // Thread parameters. - int thread_priority; - int thread_sched_policy; - std::set thread_affinity_cpus; - std::string thread_name_prefix; + int _thread_priority; + int _thread_sched_policy; + std::set _thread_affinity_cpus; + std::string _thread_name_prefix; }; // Context object encapsulates all the global state associated with @@ -165,84 +165,84 @@ class ctx_t : public thread_ctx_t }; // Used to check whether the object is a context. - uint32_t tag; + uint32_t _tag; // Sockets belonging to this context. We need the list so that // we can notify the sockets when zmq_ctx_term() is called. // The sockets will return ETERM then. typedef array_t sockets_t; - sockets_t sockets; + sockets_t _sockets; // List of unused thread slots. typedef std::vector empty_slots_t; - empty_slots_t empty_slots; + empty_slots_t _empty_slots; // If true, zmq_init has been called but no socket has been created // yet. Launching of I/O threads is delayed. - bool starting; + bool _starting; // If true, zmq_ctx_term was already called. - bool terminating; + bool _terminating; // Synchronisation of accesses to global slot-related data: // sockets, empty_slots, terminating. It also synchronises // access to zombie sockets as such (as opposed to slots) and provides // a memory barrier to ensure that all CPU cores see the same data. - mutex_t slot_sync; + mutex_t _slot_sync; // The reaper thread. - zmq::reaper_t *reaper; + zmq::reaper_t *_reaper; // I/O threads. typedef std::vector io_threads_t; - io_threads_t io_threads; + io_threads_t _io_threads; // Array of pointers to mailboxes for both application and I/O threads. - uint32_t slot_count; - i_mailbox **slots; + uint32_t _slot_count; + i_mailbox **_slots; // Mailbox for zmq_ctx_term thread. - mailbox_t term_mailbox; + mailbox_t _term_mailbox; // List of inproc endpoints within this context. typedef std::map endpoints_t; - endpoints_t endpoints; + endpoints_t _endpoints; // List of inproc connection endpoints pending a bind typedef std::multimap pending_connections_t; - pending_connections_t pending_connections; + pending_connections_t _pending_connections; // Synchronisation of access to the list of inproc endpoints. - mutex_t endpoints_sync; + mutex_t _endpoints_sync; // Maximum socket ID. static atomic_counter_t max_socket_id; // Maximum number of sockets that can be opened at the same time. - int max_sockets; + int _max_sockets; // Maximum allowed message size - int max_msgsz; + int _max_msgsz; // Number of I/O threads to launch. - int io_thread_count; + int _io_thread_count; // Does context wait (possibly forever) on termination? - bool blocky; + bool _blocky; // Is IPv6 enabled on this context? - bool ipv6; + bool _ipv6; // Should we use zero copy message decoding in this context? - bool zero_copy; + bool _zero_copy; ctx_t (const ctx_t &); const ctx_t &operator= (const ctx_t &); #ifdef HAVE_FORK // the process that created this context. Used to detect forking. - pid_t pid; + pid_t _pid; #endif enum side { @@ -256,9 +256,9 @@ class ctx_t : public thread_ctx_t side side_); #ifdef ZMQ_HAVE_VMCI - int vmci_fd; - int vmci_family; - mutex_t vmci_sync; + int _vmci_fd; + int _vmci_family; + mutex_t _vmci_sync; #endif }; } diff --git a/src/curve_client.cpp b/src/curve_client.cpp index 3fe9aa0b..d443efe9 100644 --- a/src/curve_client.cpp +++ b/src/curve_client.cpp @@ -44,10 +44,10 @@ zmq::curve_client_t::curve_client_t (session_base_t *session_, mechanism_base_t (session_, options_), curve_mechanism_base_t ( session_, options_, "CurveZMQMESSAGEC", "CurveZMQMESSAGES"), - state (send_hello), - tools (options_.curve_public_key, - options_.curve_secret_key, - options_.curve_server_key) + _state (send_hello), + _tools (options_.curve_public_key, + options_.curve_secret_key, + options_.curve_server_key) { } @@ -59,16 +59,16 @@ int zmq::curve_client_t::next_handshake_command (msg_t *msg_) { int rc = 0; - switch (state) { + switch (_state) { case send_hello: rc = produce_hello (msg_); if (rc == 0) - state = expect_welcome; + _state = expect_welcome; break; case send_initiate: rc = produce_initiate (msg_); if (rc == 0) - state = expect_ready; + _state = expect_ready; break; default: errno = EAGAIN; @@ -111,21 +111,21 @@ int zmq::curve_client_t::process_handshake_command (msg_t *msg_) int zmq::curve_client_t::encode (msg_t *msg_) { - zmq_assert (state == connected); + zmq_assert (_state == connected); return curve_mechanism_base_t::encode (msg_); } int zmq::curve_client_t::decode (msg_t *msg_) { - zmq_assert (state == connected); + zmq_assert (_state == connected); return curve_mechanism_base_t::decode (msg_); } zmq::mechanism_t::status_t zmq::curve_client_t::status () const { - if (state == connected) + if (_state == connected) return mechanism_t::ready; - if (state == error_received) + if (_state == error_received) return mechanism_t::error; else return mechanism_t::handshaking; @@ -136,7 +136,7 @@ int zmq::curve_client_t::produce_hello (msg_t *msg_) int rc = msg_->init_size (200); errno_assert (rc == 0); - rc = tools.produce_hello (msg_->data (), cn_nonce); + rc = _tools.produce_hello (msg_->data (), cn_nonce); if (rc == -1) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC); @@ -157,7 +157,7 @@ int zmq::curve_client_t::produce_hello (msg_t *msg_) int zmq::curve_client_t::process_welcome (const uint8_t *msg_data_, size_t msg_size_) { - int rc = tools.process_welcome (msg_data_, msg_size_, cn_precom); + int rc = _tools.process_welcome (msg_data_, msg_size_, cn_precom); if (rc == -1) { session->get_socket ()->event_handshake_failed_protocol ( @@ -167,7 +167,7 @@ int zmq::curve_client_t::process_welcome (const uint8_t *msg_data_, return -1; } - state = send_initiate; + _state = send_initiate; return 0; } @@ -185,8 +185,8 @@ int zmq::curve_client_t::produce_initiate (msg_t *msg_) int rc = msg_->init_size (msg_size); errno_assert (rc == 0); - rc = tools.produce_initiate (msg_->data (), msg_size, cn_nonce, - metadata_plaintext, metadata_length); + rc = _tools.produce_initiate (msg_->data (), msg_size, cn_nonce, + metadata_plaintext, metadata_length); free (metadata_plaintext); @@ -248,7 +248,7 @@ int zmq::curve_client_t::process_ready (const uint8_t *msg_data_, free (ready_plaintext); if (rc == 0) - state = connected; + _state = connected; else { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA); @@ -261,7 +261,7 @@ int zmq::curve_client_t::process_ready (const uint8_t *msg_data_, int zmq::curve_client_t::process_error (const uint8_t *msg_data_, size_t msg_size_) { - if (state != expect_welcome && state != expect_ready) { + if (_state != expect_welcome && _state != expect_ready) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND); errno = EPROTO; @@ -284,7 +284,7 @@ int zmq::curve_client_t::process_error (const uint8_t *msg_data_, } const char *error_reason = reinterpret_cast (msg_data_) + 7; handle_error_reason (error_reason, error_reason_len); - state = error_received; + _state = error_received; return 0; } diff --git a/src/curve_client.hpp b/src/curve_client.hpp index 34b4ca09..2ad515f2 100644 --- a/src/curve_client.hpp +++ b/src/curve_client.hpp @@ -66,10 +66,10 @@ class curve_client_t : public curve_mechanism_base_t }; // Current FSM state - state_t state; + state_t _state; // CURVE protocol tools - curve_client_tools_t tools; + curve_client_tools_t _tools; int produce_hello (msg_t *msg_); int process_welcome (const uint8_t *cmd_data_, size_t data_size_); diff --git a/src/curve_server.cpp b/src/curve_server.cpp index 6ad903dc..69a1aa9f 100644 --- a/src/curve_server.cpp +++ b/src/curve_server.cpp @@ -49,10 +49,10 @@ zmq::curve_server_t::curve_server_t (session_base_t *session_, { int rc; // Fetch our secret key from socket options - memcpy (secret_key, options_.curve_secret_key, crypto_box_SECRETKEYBYTES); + memcpy (_secret_key, options_.curve_secret_key, crypto_box_SECRETKEYBYTES); // Generate short-term key pair - rc = crypto_box_keypair (cn_public, cn_secret); + rc = crypto_box_keypair (_cn_public, _cn_secret); zmq_assert (rc == 0); } @@ -171,7 +171,7 @@ int zmq::curve_server_t::process_hello (msg_t *msg_) } // Save client's short-term public key (C') - memcpy (cn_client, hello + 80, 32); + memcpy (_cn_client, hello + 80, 32); uint8_t hello_nonce[crypto_box_NONCEBYTES]; uint8_t hello_plaintext[crypto_box_ZEROBYTES + 64]; @@ -186,7 +186,7 @@ int zmq::curve_server_t::process_hello (msg_t *msg_) // Open Box [64 * %x0](C'->S) rc = crypto_box_open (hello_plaintext, hello_box, sizeof hello_box, - hello_nonce, cn_client, secret_key); + hello_nonce, _cn_client, _secret_key); if (rc != 0) { // CURVE I: cannot open client HELLO -- wrong server key? session->get_socket ()->event_handshake_failed_protocol ( @@ -212,16 +212,16 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_) // Generate cookie = Box [C' + s'](t) memset (cookie_plaintext, 0, crypto_secretbox_ZEROBYTES); - memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES, cn_client, 32); - memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, cn_secret, 32); + memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES, _cn_client, 32); + memcpy (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, _cn_secret, 32); // Generate fresh cookie key - randombytes (cookie_key, crypto_secretbox_KEYBYTES); + randombytes (_cookie_key, crypto_secretbox_KEYBYTES); // Encrypt using symmetric cookie key int rc = crypto_secretbox (cookie_ciphertext, cookie_plaintext, - sizeof cookie_plaintext, cookie_nonce, cookie_key); + sizeof cookie_plaintext, cookie_nonce, _cookie_key); zmq_assert (rc == 0); uint8_t welcome_nonce[crypto_box_NONCEBYTES]; @@ -235,15 +235,15 @@ int zmq::curve_server_t::produce_welcome (msg_t *msg_) // Create 144-byte Box [S' + cookie](S->C') memset (welcome_plaintext, 0, crypto_box_ZEROBYTES); - memcpy (welcome_plaintext + crypto_box_ZEROBYTES, cn_public, 32); + memcpy (welcome_plaintext + crypto_box_ZEROBYTES, _cn_public, 32); memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 32, cookie_nonce + 8, 16); memcpy (welcome_plaintext + crypto_box_ZEROBYTES + 48, cookie_ciphertext + crypto_secretbox_BOXZEROBYTES, 80); rc = crypto_box (welcome_ciphertext, welcome_plaintext, - sizeof welcome_plaintext, welcome_nonce, cn_client, - secret_key); + sizeof welcome_plaintext, welcome_nonce, _cn_client, + _secret_key); // TODO I think we should change this back to zmq_assert (rc == 0); // as it was before https://github.com/zeromq/libzmq/pull/1832 @@ -301,7 +301,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_) memcpy (cookie_nonce + 8, initiate + 9, 16); rc = crypto_secretbox_open (cookie_plaintext, cookie_box, sizeof cookie_box, - cookie_nonce, cookie_key); + cookie_nonce, _cookie_key); if (rc != 0) { // CURVE I: cannot open client INITIATE cookie session->get_socket ()->event_handshake_failed_protocol ( @@ -311,9 +311,9 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_) } // Check cookie plain text is as expected [C' + s'] - if (memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES, cn_client, 32) + if (memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES, _cn_client, 32) || memcmp (cookie_plaintext + crypto_secretbox_ZEROBYTES + 32, - cn_secret, 32)) { + _cn_secret, 32)) { // TODO this case is very hard to test, as it would require a modified // client that knows the server's secret temporary cookie key @@ -340,7 +340,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_) cn_peer_nonce = get_uint64 (initiate + 105); rc = crypto_box_open (initiate_plaintext, initiate_box, clen, - initiate_nonce, cn_client, cn_secret); + initiate_nonce, _cn_client, _cn_secret); if (rc != 0) { // CURVE I: cannot open client INITIATE session->get_socket ()->event_handshake_failed_protocol ( @@ -365,7 +365,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_) 16); rc = crypto_box_open (vouch_plaintext, vouch_box, sizeof vouch_box, - vouch_nonce, client_key, cn_secret); + vouch_nonce, client_key, _cn_secret); if (rc != 0) { // CURVE I: cannot open client INITIATE vouch session->get_socket ()->event_handshake_failed_protocol ( @@ -375,7 +375,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_) } // What we decrypted must be the client's short-term public key - if (memcmp (vouch_plaintext + crypto_box_ZEROBYTES, cn_client, 32)) { + if (memcmp (vouch_plaintext + crypto_box_ZEROBYTES, _cn_client, 32)) { // TODO this case is very hard to test, as it would require a modified // client that knows the server's secret short-term key @@ -387,7 +387,7 @@ int zmq::curve_server_t::process_initiate (msg_t *msg_) } // Precompute connection secret from client key - rc = crypto_box_beforenm (cn_precom, cn_client, cn_secret); + rc = crypto_box_beforenm (cn_precom, _cn_client, _cn_secret); zmq_assert (rc == 0); // Given this is a backward-incompatible change, it's behind a socket diff --git a/src/curve_server.hpp b/src/curve_server.hpp index 4aca0585..b3d2dd73 100644 --- a/src/curve_server.hpp +++ b/src/curve_server.hpp @@ -59,19 +59,19 @@ class curve_server_t : public zap_client_common_handshake_t, private: // Our secret key (s) - uint8_t secret_key[crypto_box_SECRETKEYBYTES]; + uint8_t _secret_key[crypto_box_SECRETKEYBYTES]; // Our short-term public key (S') - uint8_t cn_public[crypto_box_PUBLICKEYBYTES]; + uint8_t _cn_public[crypto_box_PUBLICKEYBYTES]; // Our short-term secret key (s') - uint8_t cn_secret[crypto_box_SECRETKEYBYTES]; + uint8_t _cn_secret[crypto_box_SECRETKEYBYTES]; // Client's short-term public key (C') - uint8_t cn_client[crypto_box_PUBLICKEYBYTES]; + uint8_t _cn_client[crypto_box_PUBLICKEYBYTES]; // Key used to produce cookie - uint8_t cookie_key[crypto_secretbox_KEYBYTES]; + uint8_t _cookie_key[crypto_secretbox_KEYBYTES]; int process_hello (msg_t *msg_); int produce_welcome (msg_t *msg_); diff --git a/src/dbuffer.hpp b/src/dbuffer.hpp index e7d07269..a1ef4990 100644 --- a/src/dbuffer.hpp +++ b/src/dbuffer.hpp @@ -59,18 +59,18 @@ template <> class dbuffer_t { public: inline dbuffer_t () : - back (&storage[0]), - front (&storage[1]), - has_msg (false) + _back (&_storage[0]), + _front (&_storage[1]), + _has_msg (false) { - back->init (); - front->init (); + _back->init (); + _front->init (); } inline ~dbuffer_t () { - back->close (); - front->close (); + _back->close (); + _front->close (); } inline void write (const msg_t &value_) @@ -78,15 +78,15 @@ template <> class dbuffer_t msg_t &xvalue = const_cast (value_); zmq_assert (xvalue.check ()); - back->move (xvalue); // cannot just overwrite, might leak + _back->move (xvalue); // cannot just overwrite, might leak - zmq_assert (back->check ()); + zmq_assert (_back->check ()); - if (sync.try_lock ()) { - std::swap (back, front); - has_msg = true; + if (_sync.try_lock ()) { + std::swap (_back, _front); + _has_msg = true; - sync.unlock (); + _sync.unlock (); } } @@ -96,16 +96,16 @@ template <> class dbuffer_t return false; { - scoped_lock_t lock (sync); - if (!has_msg) + scoped_lock_t lock (_sync); + if (!_has_msg) return false; - zmq_assert (front->check ()); + zmq_assert (_front->check ()); - *value_ = *front; - front->init (); // avoid double free + *value_ = *_front; + _front->init (); // avoid double free - has_msg = false; + _has_msg = false; return true; } } @@ -113,24 +113,24 @@ template <> class dbuffer_t inline bool check_read () { - scoped_lock_t lock (sync); + scoped_lock_t lock (_sync); - return has_msg; + return _has_msg; } inline bool probe (bool (*fn_) (const msg_t &)) { - scoped_lock_t lock (sync); - return (*fn_) (*front); + scoped_lock_t lock (_sync); + return (*fn_) (*_front); } private: - msg_t storage[2]; - msg_t *back, *front; + msg_t _storage[2]; + msg_t *_back, *_front; - mutex_t sync; - bool has_msg; + mutex_t _sync; + bool _has_msg; // Disable copying of dbuffer. dbuffer_t (const dbuffer_t &); diff --git a/src/dealer.cpp b/src/dealer.cpp index a0fd1a4c..ee9dbf49 100644 --- a/src/dealer.cpp +++ b/src/dealer.cpp @@ -35,7 +35,7 @@ zmq::dealer_t::dealer_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - probe_router (false) + _probe_router (false) { options.type = ZMQ_DEALER; } @@ -50,7 +50,7 @@ void zmq::dealer_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) zmq_assert (pipe_); - if (probe_router) { + if (_probe_router) { msg_t probe_msg; int rc = probe_msg.init (); errno_assert (rc == 0); @@ -65,8 +65,8 @@ void zmq::dealer_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) errno_assert (rc == 0); } - fq.attach (pipe_); - lb.attach (pipe_); + _fq.attach (pipe_); + _lb.attach (pipe_); } int zmq::dealer_t::xsetsockopt (int option_, @@ -81,7 +81,7 @@ int zmq::dealer_t::xsetsockopt (int option_, switch (option_) { case ZMQ_PROBE_ROUTER: if (is_int && value >= 0) { - probe_router = (value != 0); + _probe_router = (value != 0); return 0; } break; @@ -106,42 +106,42 @@ int zmq::dealer_t::xrecv (msg_t *msg_) bool zmq::dealer_t::xhas_in () { - return fq.has_in (); + return _fq.has_in (); } bool zmq::dealer_t::xhas_out () { - return lb.has_out (); + return _lb.has_out (); } const zmq::blob_t &zmq::dealer_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } void zmq::dealer_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::dealer_t::xwrite_activated (pipe_t *pipe_) { - lb.activated (pipe_); + _lb.activated (pipe_); } void zmq::dealer_t::xpipe_terminated (pipe_t *pipe_) { - fq.pipe_terminated (pipe_); - lb.pipe_terminated (pipe_); + _fq.pipe_terminated (pipe_); + _lb.pipe_terminated (pipe_); } int zmq::dealer_t::sendpipe (msg_t *msg_, pipe_t **pipe_) { - return lb.sendpipe (msg_, pipe_); + return _lb.sendpipe (msg_, pipe_); } int zmq::dealer_t::recvpipe (msg_t *msg_, pipe_t **pipe_) { - return fq.recvpipe (msg_, pipe_); + return _fq.recvpipe (msg_, pipe_); } diff --git a/src/dealer.hpp b/src/dealer.hpp index 3651c132..77239898 100644 --- a/src/dealer.hpp +++ b/src/dealer.hpp @@ -69,11 +69,11 @@ class dealer_t : public socket_base_t private: // Messages are fair-queued from inbound pipes. And load-balanced to // the outbound pipes. - fq_t fq; - lb_t lb; + fq_t _fq; + lb_t _lb; // if true, send an empty message to every connected router peer - bool probe_router; + bool _probe_router; dealer_t (const dealer_t &); const dealer_t &operator= (const dealer_t &); diff --git a/src/decoder.hpp b/src/decoder.hpp index d54b6e7e..f9268055 100644 --- a/src/decoder.hpp +++ b/src/decoder.hpp @@ -58,22 +58,22 @@ class decoder_base_t : public i_decoder { public: explicit decoder_base_t (const size_t buf_size_) : - next (NULL), - read_pos (NULL), - to_read (0), - allocator (buf_size_) + _next (NULL), + _read_pos (NULL), + _to_read (0), + _allocator (buf_size_) { - buf = allocator.allocate (); + _buf = _allocator.allocate (); } // The destructor doesn't have to be virtual. It is made virtual // just to keep ICC and code checking tools from complaining. - virtual ~decoder_base_t () { allocator.deallocate (); } + virtual ~decoder_base_t () { _allocator.deallocate (); } // Returns a buffer to be filled with binary data. void get_buffer (unsigned char **data_, std::size_t *size_) { - buf = allocator.allocate (); + _buf = _allocator.allocate (); // If we are expected to read large message, we'll opt for zero- // copy, i.e. we'll ask caller to fill the data directly to the @@ -83,14 +83,14 @@ class decoder_base_t : public i_decoder // As a consequence, large messages being received won't block // other engines running in the same I/O thread for excessive // amounts of time. - if (to_read >= allocator.size ()) { - *data_ = read_pos; - *size_ = to_read; + if (_to_read >= _allocator.size ()) { + *data_ = _read_pos; + *size_ = _to_read; return; } - *data_ = buf; - *size_ = allocator.size (); + *data_ = _buf; + *size_ = _allocator.size (); } // Processes the data in the buffer previously allocated using @@ -108,15 +108,15 @@ class decoder_base_t : public i_decoder // In case of zero-copy simply adjust the pointers, no copying // is required. Also, run the state machine in case all the data // were processed. - if (data_ == read_pos) { - zmq_assert (size_ <= to_read); - read_pos += size_; - to_read -= size_; + if (data_ == _read_pos) { + zmq_assert (size_ <= _to_read); + _read_pos += size_; + _to_read -= size_; bytes_used_ = size_; - while (!to_read) { + while (!_to_read) { const int rc = - (static_cast (this)->*next) (data_ + bytes_used_); + (static_cast (this)->*_next) (data_ + bytes_used_); if (rc != 0) return rc; } @@ -125,22 +125,22 @@ class decoder_base_t : public i_decoder while (bytes_used_ < size_) { // Copy the data from buffer to the message. - const size_t to_copy = std::min (to_read, size_ - bytes_used_); + const size_t to_copy = std::min (_to_read, size_ - bytes_used_); // Only copy when destination address is different from the // current address in the buffer. - if (read_pos != data_ + bytes_used_) { - memcpy (read_pos, data_ + bytes_used_, to_copy); + if (_read_pos != data_ + bytes_used_) { + memcpy (_read_pos, data_ + bytes_used_, to_copy); } - read_pos += to_copy; - to_read -= to_copy; + _read_pos += to_copy; + _to_read -= to_copy; bytes_used_ += to_copy; // Try to get more space in the message to fill in. // If none is available, return. - while (to_read == 0) { + while (_to_read == 0) { // pass current address in the buffer const int rc = - (static_cast (this)->*next) (data_ + bytes_used_); + (static_cast (this)->*_next) (data_ + bytes_used_); if (rc != 0) return rc; } @@ -151,7 +151,7 @@ class decoder_base_t : public i_decoder virtual void resize_buffer (std::size_t new_size_) { - allocator.resize (new_size_); + _allocator.resize (new_size_); } protected: @@ -163,28 +163,28 @@ class decoder_base_t : public i_decoder // from the buffer and schedule next state machine action. void next_step (void *read_pos_, std::size_t to_read_, step_t next_) { - read_pos = static_cast (read_pos_); - to_read = to_read_; - next = next_; + _read_pos = static_cast (read_pos_); + _to_read = to_read_; + _next = next_; } - A &get_allocator () { return allocator; } + A &get_allocator () { return _allocator; } private: // Next step. If set to NULL, it means that associated data stream // is dead. Note that there can be still data in the process in such // case. - step_t next; + step_t _next; // Where to store the read data. - unsigned char *read_pos; + unsigned char *_read_pos; // How much data to read before taking next step. - std::size_t to_read; + std::size_t _to_read; // The duffer for data to decode. - A allocator; - unsigned char *buf; + A _allocator; + unsigned char *_buf; decoder_base_t (const decoder_base_t &); const decoder_base_t &operator= (const decoder_base_t &); diff --git a/src/decoder_allocators.cpp b/src/decoder_allocators.cpp index bfb672f3..c77b905d 100644 --- a/src/decoder_allocators.cpp +++ b/src/decoder_allocators.cpp @@ -36,23 +36,23 @@ zmq::shared_message_memory_allocator::shared_message_memory_allocator ( std::size_t bufsize_) : - buf (NULL), - bufsize (0), - max_size (bufsize_), - msg_content (NULL), - maxCounters (static_cast ( - std::ceil (static_cast (max_size) + _buf (NULL), + _buf_size (0), + _max_size (bufsize_), + _msg_content (NULL), + _max_counters (static_cast ( + std::ceil (static_cast (_max_size) / static_cast (msg_t::max_vsm_size)))) { } zmq::shared_message_memory_allocator::shared_message_memory_allocator ( std::size_t bufsize_, std::size_t max_messages_) : - buf (NULL), - bufsize (0), - max_size (bufsize_), - msg_content (NULL), - maxCounters (max_messages_) + _buf (NULL), + _buf_size (0), + _max_size (bufsize_), + _msg_content (NULL), + _max_counters (max_messages_) { } @@ -63,10 +63,10 @@ zmq::shared_message_memory_allocator::~shared_message_memory_allocator () unsigned char *zmq::shared_message_memory_allocator::allocate () { - if (buf) { + if (_buf) { // release reference count to couple lifetime to messages zmq::atomic_counter_t *c = - reinterpret_cast (buf); + reinterpret_cast (_buf); // if refcnt drops to 0, there are no message using the buffer // because either all messages have been closed or only vsm-messages @@ -79,55 +79,55 @@ unsigned char *zmq::shared_message_memory_allocator::allocate () } // if buf != NULL it is not used by any message so we can re-use it for the next run - if (!buf) { + if (!_buf) { // allocate memory for reference counters together with reception buffer std::size_t const allocationsize = - max_size + sizeof (zmq::atomic_counter_t) - + maxCounters * sizeof (zmq::msg_t::content_t); + _max_size + sizeof (zmq::atomic_counter_t) + + _max_counters * sizeof (zmq::msg_t::content_t); - buf = static_cast (std::malloc (allocationsize)); - alloc_assert (buf); + _buf = static_cast (std::malloc (allocationsize)); + alloc_assert (_buf); - new (buf) atomic_counter_t (1); + new (_buf) atomic_counter_t (1); } else { // release reference count to couple lifetime to messages zmq::atomic_counter_t *c = - reinterpret_cast (buf); + reinterpret_cast (_buf); c->set (1); } - bufsize = max_size; - msg_content = reinterpret_cast ( - buf + sizeof (atomic_counter_t) + max_size); - return buf + sizeof (zmq::atomic_counter_t); + _buf_size = _max_size; + _msg_content = reinterpret_cast ( + _buf + sizeof (atomic_counter_t) + _max_size); + return _buf + sizeof (zmq::atomic_counter_t); } void zmq::shared_message_memory_allocator::deallocate () { - zmq::atomic_counter_t *c = reinterpret_cast (buf); - if (buf && !c->sub (1)) { - std::free (buf); + zmq::atomic_counter_t *c = reinterpret_cast (_buf); + if (_buf && !c->sub (1)) { + std::free (_buf); } clear (); } unsigned char *zmq::shared_message_memory_allocator::release () { - unsigned char *b = buf; + unsigned char *b = _buf; clear (); return b; } void zmq::shared_message_memory_allocator::clear () { - buf = NULL; - bufsize = 0; - msg_content = NULL; + _buf = NULL; + _buf_size = 0; + _msg_content = NULL; } void zmq::shared_message_memory_allocator::inc_ref () { - (reinterpret_cast (buf))->add (1); + (reinterpret_cast (_buf))->add (1); } void zmq::shared_message_memory_allocator::call_dec_ref (void *, void *hint_) @@ -146,10 +146,10 @@ void zmq::shared_message_memory_allocator::call_dec_ref (void *, void *hint_) std::size_t zmq::shared_message_memory_allocator::size () const { - return bufsize; + return _buf_size; } unsigned char *zmq::shared_message_memory_allocator::data () { - return buf + sizeof (zmq::atomic_counter_t); + return _buf + sizeof (zmq::atomic_counter_t); } diff --git a/src/decoder_allocators.hpp b/src/decoder_allocators.hpp index d578d13f..c9e51281 100644 --- a/src/decoder_allocators.hpp +++ b/src/decoder_allocators.hpp @@ -44,25 +44,25 @@ class c_single_allocator { public: explicit c_single_allocator (std::size_t bufsize_) : - bufsize (bufsize_), - buf (static_cast (std::malloc (bufsize))) + _buf_size (bufsize_), + _buf (static_cast (std::malloc (_buf_size))) { - alloc_assert (buf); + alloc_assert (_buf); } - ~c_single_allocator () { std::free (buf); } + ~c_single_allocator () { std::free (_buf); } - unsigned char *allocate () { return buf; } + unsigned char *allocate () { return _buf; } void deallocate () {} - std::size_t size () const { return bufsize; } + std::size_t size () const { return _buf_size; } - void resize (std::size_t new_size_) { bufsize = new_size_; } + void resize (std::size_t new_size_) { _buf_size = new_size_; } private: - std::size_t bufsize; - unsigned char *buf; + std::size_t _buf_size; + unsigned char *_buf; c_single_allocator (c_single_allocator const &); c_single_allocator &operator= (c_single_allocator const &); @@ -111,22 +111,22 @@ class shared_message_memory_allocator unsigned char *data (); // Return pointer to the first byte of the buffer. - unsigned char *buffer () { return buf; } + unsigned char *buffer () { return _buf; } - void resize (std::size_t new_size_) { bufsize = new_size_; } + void resize (std::size_t new_size_) { _buf_size = new_size_; } - zmq::msg_t::content_t *provide_content () { return msg_content; } + zmq::msg_t::content_t *provide_content () { return _msg_content; } - void advance_content () { msg_content++; } + void advance_content () { _msg_content++; } private: void clear (); - unsigned char *buf; - std::size_t bufsize; - const std::size_t max_size; - zmq::msg_t::content_t *msg_content; - std::size_t maxCounters; + unsigned char *_buf; + std::size_t _buf_size; + const std::size_t _max_size; + zmq::msg_t::content_t *_msg_content; + std::size_t _max_counters; }; } diff --git a/src/dgram.cpp b/src/dgram.cpp index c63bbe2c..cbed5144 100644 --- a/src/dgram.cpp +++ b/src/dgram.cpp @@ -38,9 +38,9 @@ zmq::dgram_t::dgram_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - pipe (NULL), - last_in (NULL), - more_out (false) + _pipe (NULL), + _last_in (NULL), + _more_out (false) { options.type = ZMQ_DGRAM; options.raw_socket = true; @@ -48,7 +48,7 @@ zmq::dgram_t::dgram_t (class ctx_t *parent_, uint32_t tid_, int sid_) : zmq::dgram_t::~dgram_t () { - zmq_assert (!pipe); + zmq_assert (!_pipe); } void zmq::dgram_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) @@ -59,20 +59,20 @@ void zmq::dgram_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) // ZMQ_DGRAM socket can only be connected to a single peer. // The socket rejects any further connection requests. - if (pipe == NULL) - pipe = pipe_; + if (_pipe == NULL) + _pipe = pipe_; else pipe_->terminate (false); } void zmq::dgram_t::xpipe_terminated (pipe_t *pipe_) { - if (pipe_ == pipe) { - if (last_in == pipe) { - saved_credential.set_deep_copy (last_in->get_credential ()); - last_in = NULL; + if (pipe_ == _pipe) { + if (_last_in == _pipe) { + _saved_credential.set_deep_copy (_last_in->get_credential ()); + _last_in = NULL; } - pipe = NULL; + _pipe = NULL; } } @@ -91,7 +91,7 @@ void zmq::dgram_t::xwrite_activated (pipe_t *) int zmq::dgram_t::xsend (msg_t *msg_) { // If there's no out pipe, just drop it. - if (!pipe) { + if (!_pipe) { int rc = msg_->close (); errno_assert (rc == 0); return -1; @@ -99,14 +99,14 @@ int zmq::dgram_t::xsend (msg_t *msg_) // If this is the first part of the message it's the ID of the // peer to send the message to. - if (!more_out) { + if (!_more_out) { if (!(msg_->flags () & msg_t::more)) { errno = EINVAL; return -1; } // Expect one more message frame. - more_out = true; + _more_out = true; } else { // dgram messages are two part only, reject part if more is set if (msg_->flags () & msg_t::more) { @@ -115,17 +115,17 @@ int zmq::dgram_t::xsend (msg_t *msg_) } // This is the last part of the message. - more_out = false; + _more_out = false; } // Push the message into the pipe. - if (!pipe->write (msg_)) { + if (!_pipe->write (msg_)) { errno = EAGAIN; return -1; } if (!(msg_->flags () & msg_t::more)) - pipe->flush (); + _pipe->flush (); // Detach the message from the data buffer. int rc = msg_->init (); @@ -140,7 +140,7 @@ int zmq::dgram_t::xrecv (msg_t *msg_) int rc = msg_->close (); errno_assert (rc == 0); - if (!pipe || !pipe->read (msg_)) { + if (!_pipe || !_pipe->read (msg_)) { // Initialise the output parameter to be a 0-byte message. rc = msg_->init (); errno_assert (rc == 0); @@ -148,28 +148,28 @@ int zmq::dgram_t::xrecv (msg_t *msg_) errno = EAGAIN; return -1; } - last_in = pipe; + _last_in = _pipe; return 0; } bool zmq::dgram_t::xhas_in () { - if (!pipe) + if (!_pipe) return false; - return pipe->check_read (); + return _pipe->check_read (); } bool zmq::dgram_t::xhas_out () { - if (!pipe) + if (!_pipe) return false; - return pipe->check_write (); + return _pipe->check_write (); } const zmq::blob_t &zmq::dgram_t::get_credential () const { - return last_in ? last_in->get_credential () : saved_credential; + return _last_in ? _last_in->get_credential () : _saved_credential; } diff --git a/src/dgram.hpp b/src/dgram.hpp index 731c237f..bc99cd4a 100644 --- a/src/dgram.hpp +++ b/src/dgram.hpp @@ -59,14 +59,14 @@ class dgram_t : public socket_base_t void xpipe_terminated (zmq::pipe_t *pipe_); private: - zmq::pipe_t *pipe; + zmq::pipe_t *_pipe; - zmq::pipe_t *last_in; + zmq::pipe_t *_last_in; - blob_t saved_credential; + blob_t _saved_credential; // If true, more outgoing message parts are expected. - bool more_out; + bool _more_out; dgram_t (const dgram_t &); const dgram_t &operator= (const dgram_t &); diff --git a/src/dish.cpp b/src/dish.cpp index 948e3955..e690abb7 100644 --- a/src/dish.cpp +++ b/src/dish.cpp @@ -36,7 +36,7 @@ zmq::dish_t::dish_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_, true), - has_message (false) + _has_message (false) { options.type = ZMQ_DISH; @@ -44,13 +44,13 @@ zmq::dish_t::dish_t (class ctx_t *parent_, uint32_t tid_, int sid_) : // subscription commands are sent to the wire. options.linger.store (0); - int rc = message.init (); + int rc = _message.init (); errno_assert (rc == 0); } zmq::dish_t::~dish_t () { - int rc = message.close (); + int rc = _message.close (); errno_assert (rc == 0); } @@ -59,8 +59,8 @@ void zmq::dish_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) LIBZMQ_UNUSED (subscribe_to_all_); zmq_assert (pipe_); - fq.attach (pipe_); - dist.attach (pipe_); + _fq.attach (pipe_); + _dist.attach (pipe_); // Send all the cached subscriptions to the new upstream peer. send_subscriptions (pipe_); @@ -68,18 +68,18 @@ void zmq::dish_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) void zmq::dish_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::dish_t::xwrite_activated (pipe_t *pipe_) { - dist.activated (pipe_); + _dist.activated (pipe_); } void zmq::dish_t::xpipe_terminated (pipe_t *pipe_) { - fq.pipe_terminated (pipe_); - dist.pipe_terminated (pipe_); + _fq.pipe_terminated (pipe_); + _dist.pipe_terminated (pipe_); } void zmq::dish_t::xhiccuped (pipe_t *pipe_) @@ -97,15 +97,15 @@ int zmq::dish_t::xjoin (const char *group_) return -1; } - subscriptions_t::iterator it = subscriptions.find (group); + subscriptions_t::iterator it = _subscriptions.find (group); // User cannot join same group twice - if (it != subscriptions.end ()) { + if (it != _subscriptions.end ()) { errno = EINVAL; return -1; } - subscriptions.insert (group); + _subscriptions.insert (group); msg_t msg; int rc = msg.init_join (); @@ -115,7 +115,7 @@ int zmq::dish_t::xjoin (const char *group_) errno_assert (rc == 0); int err = 0; - rc = dist.send_to_all (&msg); + rc = _dist.send_to_all (&msg); if (rc != 0) err = errno; int rc2 = msg.close (); @@ -135,14 +135,14 @@ int zmq::dish_t::xleave (const char *group_) } subscriptions_t::iterator it = - std::find (subscriptions.begin (), subscriptions.end (), group); + std::find (_subscriptions.begin (), _subscriptions.end (), group); - if (it == subscriptions.end ()) { + if (it == _subscriptions.end ()) { errno = EINVAL; return -1; } - subscriptions.erase (it); + _subscriptions.erase (it); msg_t msg; int rc = msg.init_leave (); @@ -152,7 +152,7 @@ int zmq::dish_t::xleave (const char *group_) errno_assert (rc == 0); int err = 0; - rc = dist.send_to_all (&msg); + rc = _dist.send_to_all (&msg); if (rc != 0) err = errno; int rc2 = msg.close (); @@ -179,16 +179,16 @@ int zmq::dish_t::xrecv (msg_t *msg_) { // If there's already a message prepared by a previous call to zmq_poll, // return it straight ahead. - if (has_message) { - int rc = msg_->move (message); + if (_has_message) { + int rc = msg_->move (_message); errno_assert (rc == 0); - has_message = false; + _has_message = false; return 0; } while (true) { // Get a message using fair queueing algorithm. - int rc = fq.recv (msg_); + int rc = _fq.recv (msg_); // If there's no message available, return immediately. // The same when error occurs. @@ -197,8 +197,8 @@ int zmq::dish_t::xrecv (msg_t *msg_) // Filtering non matching messages subscriptions_t::iterator it = - subscriptions.find (std::string (msg_->group ())); - if (it != subscriptions.end ()) + _subscriptions.find (std::string (msg_->group ())); + if (it != _subscriptions.end ()) return 0; } } @@ -207,12 +207,12 @@ bool zmq::dish_t::xhas_in () { // If there's already a message prepared by a previous call to zmq_poll, // return straight ahead. - if (has_message) + if (_has_message) return true; while (true) { // Get a message using fair queueing algorithm. - int rc = fq.recv (&message); + int rc = _fq.recv (&_message); // If there's no message available, return immediately. // The same when error occurs. @@ -223,9 +223,9 @@ bool zmq::dish_t::xhas_in () // Filtering non matching messages subscriptions_t::iterator it = - subscriptions.find (std::string (message.group ())); - if (it != subscriptions.end ()) { - has_message = true; + _subscriptions.find (std::string (_message.group ())); + if (it != _subscriptions.end ()) { + _has_message = true; return true; } } @@ -233,13 +233,13 @@ bool zmq::dish_t::xhas_in () const zmq::blob_t &zmq::dish_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } void zmq::dish_t::send_subscriptions (pipe_t *pipe_) { - for (subscriptions_t::iterator it = subscriptions.begin (); - it != subscriptions.end (); ++it) { + for (subscriptions_t::iterator it = _subscriptions.begin (); + it != _subscriptions.end (); ++it) { msg_t msg; int rc = msg.init_join (); errno_assert (rc == 0); @@ -261,7 +261,7 @@ zmq::dish_session_t::dish_session_t (io_thread_t *io_thread_, const options_t &options_, address_t *addr_) : session_base_t (io_thread_, connect_, socket_, options_, addr_), - state (group) + _state (group) { } @@ -271,7 +271,7 @@ zmq::dish_session_t::~dish_session_t () int zmq::dish_session_t::push_msg (msg_t *msg_) { - if (state == group) { + if (_state == group) { if ((msg_->flags () & msg_t::more) != msg_t::more) { errno = EFAULT; return -1; @@ -282,8 +282,8 @@ int zmq::dish_session_t::push_msg (msg_t *msg_) return -1; } - group_msg = *msg_; - state = body; + _group_msg = *msg_; + _state = body; int rc = msg_->init (); errno_assert (rc == 0); @@ -295,12 +295,12 @@ int zmq::dish_session_t::push_msg (msg_t *msg_) goto has_group; // Set the message group - rc = msg_->set_group (static_cast (group_msg.data ()), - group_msg.size ()); + rc = msg_->set_group (static_cast (_group_msg.data ()), + _group_msg.size ()); errno_assert (rc == 0); // We set the group, so we don't need the group_msg anymore - rc = group_msg.close (); + rc = _group_msg.close (); errno_assert (rc == 0); has_group: // Thread safe socket doesn't support multipart messages @@ -313,7 +313,7 @@ has_group: rc = session_base_t::push_msg (msg_); if (rc == 0) - state = group; + _state = group; return rc; } @@ -363,5 +363,5 @@ int zmq::dish_session_t::pull_msg (msg_t *msg_) void zmq::dish_session_t::reset () { session_base_t::reset (); - state = group; + _state = group; } diff --git a/src/dish.hpp b/src/dish.hpp index 3af46c43..bb966e84 100644 --- a/src/dish.hpp +++ b/src/dish.hpp @@ -70,19 +70,19 @@ class dish_t : public socket_base_t void send_subscriptions (pipe_t *pipe_); // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; // Object for distributing the subscriptions upstream. - dist_t dist; + dist_t _dist; // The repository of subscriptions. typedef std::set subscriptions_t; - subscriptions_t subscriptions; + subscriptions_t _subscriptions; // If true, 'message' contains a matching message to return on the // next recv call. - bool has_message; - msg_t message; + bool _has_message; + msg_t _message; dish_t (const dish_t &); const dish_t &operator= (const dish_t &); @@ -108,9 +108,9 @@ class dish_session_t : public session_base_t { group, body - } state; + } _state; - msg_t group_msg; + msg_t _group_msg; dish_session_t (const dish_session_t &); const dish_session_t &operator= (const dish_session_t &); diff --git a/src/dist.cpp b/src/dist.cpp index f98ba9a5..1b5fa83e 100644 --- a/src/dist.cpp +++ b/src/dist.cpp @@ -34,13 +34,17 @@ #include "msg.hpp" #include "likely.hpp" -zmq::dist_t::dist_t () : matching (0), active (0), eligible (0), more (false) +zmq::dist_t::dist_t () : + _matching (0), + _active (0), + _eligible (0), + _more (false) { } zmq::dist_t::~dist_t () { - zmq_assert (pipes.empty ()); + zmq_assert (_pipes.empty ()); } void zmq::dist_t::attach (pipe_t *pipe_) @@ -48,36 +52,36 @@ void zmq::dist_t::attach (pipe_t *pipe_) // If we are in the middle of sending a message, we'll add new pipe // into the list of eligible pipes. Otherwise we add it to the list // of active pipes. - if (more) { - pipes.push_back (pipe_); - pipes.swap (eligible, pipes.size () - 1); - eligible++; + if (_more) { + _pipes.push_back (pipe_); + _pipes.swap (_eligible, _pipes.size () - 1); + _eligible++; } else { - pipes.push_back (pipe_); - pipes.swap (active, pipes.size () - 1); - active++; - eligible++; + _pipes.push_back (pipe_); + _pipes.swap (_active, _pipes.size () - 1); + _active++; + _eligible++; } } void zmq::dist_t::match (pipe_t *pipe_) { // If pipe is already matching do nothing. - if (pipes.index (pipe_) < matching) + if (_pipes.index (pipe_) < _matching) return; // If the pipe isn't eligible, ignore it. - if (pipes.index (pipe_) >= eligible) + if (_pipes.index (pipe_) >= _eligible) return; // Mark the pipe as matching. - pipes.swap (pipes.index (pipe_), matching); - matching++; + _pipes.swap (_pipes.index (pipe_), _matching); + _matching++; } void zmq::dist_t::reverse_match () { - pipes_t::size_type prev_matching = matching; + pipes_t::size_type prev_matching = _matching; // Reset matching to 0 unmatch (); @@ -86,55 +90,55 @@ void zmq::dist_t::reverse_match () // To do this, push all pipes that are eligible but not // matched - i.e. between "matching" and "eligible" - // to the beginning of the queue. - for (pipes_t::size_type i = prev_matching; i < eligible; ++i) { - pipes.swap (i, matching++); + for (pipes_t::size_type i = prev_matching; i < _eligible; ++i) { + _pipes.swap (i, _matching++); } } void zmq::dist_t::unmatch () { - matching = 0; + _matching = 0; } void zmq::dist_t::pipe_terminated (pipe_t *pipe_) { // Remove the pipe from the list; adjust number of matching, active and/or // eligible pipes accordingly. - if (pipes.index (pipe_) < matching) { - pipes.swap (pipes.index (pipe_), matching - 1); - matching--; + if (_pipes.index (pipe_) < _matching) { + _pipes.swap (_pipes.index (pipe_), _matching - 1); + _matching--; } - if (pipes.index (pipe_) < active) { - pipes.swap (pipes.index (pipe_), active - 1); - active--; + if (_pipes.index (pipe_) < _active) { + _pipes.swap (_pipes.index (pipe_), _active - 1); + _active--; } - if (pipes.index (pipe_) < eligible) { - pipes.swap (pipes.index (pipe_), eligible - 1); - eligible--; + if (_pipes.index (pipe_) < _eligible) { + _pipes.swap (_pipes.index (pipe_), _eligible - 1); + _eligible--; } - pipes.erase (pipe_); + _pipes.erase (pipe_); } void zmq::dist_t::activated (pipe_t *pipe_) { // Move the pipe from passive to eligible state. - if (eligible < pipes.size ()) { - pipes.swap (pipes.index (pipe_), eligible); - eligible++; + if (_eligible < _pipes.size ()) { + _pipes.swap (_pipes.index (pipe_), _eligible); + _eligible++; } // If there's no message being sent at the moment, move it to // the active state. - if (!more && active < pipes.size ()) { - pipes.swap (eligible - 1, active); - active++; + if (!_more && _active < _pipes.size ()) { + _pipes.swap (_eligible - 1, _active); + _active++; } } int zmq::dist_t::send_to_all (msg_t *msg_) { - matching = active; + _matching = _active; return send_to_matching (msg_); } @@ -148,9 +152,9 @@ int zmq::dist_t::send_to_matching (msg_t *msg_) // If multipart message is fully sent, activate all the eligible pipes. if (!msg_more) - active = eligible; + _active = _eligible; - more = msg_more; + _more = msg_more; return 0; } @@ -158,7 +162,7 @@ int zmq::dist_t::send_to_matching (msg_t *msg_) void zmq::dist_t::distribute (msg_t *msg_) { // If there are no matching pipes available, simply drop the message. - if (matching == 0) { + if (_matching == 0) { int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); @@ -167,8 +171,8 @@ void zmq::dist_t::distribute (msg_t *msg_) } if (msg_->is_vsm ()) { - for (pipes_t::size_type i = 0; i < matching; ++i) - if (!write (pipes[i], msg_)) + for (pipes_t::size_type i = 0; i < _matching; ++i) + if (!write (_pipes[i], msg_)) --i; // Retry last write because index will have been swapped int rc = msg_->close (); errno_assert (rc == 0); @@ -179,12 +183,12 @@ void zmq::dist_t::distribute (msg_t *msg_) // Add matching-1 references to the message. We already hold one reference, // that's why -1. - msg_->add_refs (static_cast (matching) - 1); + msg_->add_refs (static_cast (_matching) - 1); // Push copy of the message to each matching pipe. int failed = 0; - for (pipes_t::size_type i = 0; i < matching; ++i) - if (!write (pipes[i], msg_)) { + for (pipes_t::size_type i = 0; i < _matching; ++i) + if (!write (_pipes[i], msg_)) { ++failed; --i; // Retry last write because index will have been swapped } @@ -205,12 +209,12 @@ bool zmq::dist_t::has_out () bool zmq::dist_t::write (pipe_t *pipe_, msg_t *msg_) { if (!pipe_->write (msg_)) { - pipes.swap (pipes.index (pipe_), matching - 1); - matching--; - pipes.swap (pipes.index (pipe_), active - 1); - active--; - pipes.swap (active, eligible - 1); - eligible--; + _pipes.swap (_pipes.index (pipe_), _matching - 1); + _matching--; + _pipes.swap (_pipes.index (pipe_), _active - 1); + _active--; + _pipes.swap (_active, _eligible - 1); + _eligible--; return false; } if (!(msg_->flags () & msg_t::more)) @@ -220,8 +224,8 @@ bool zmq::dist_t::write (pipe_t *pipe_, msg_t *msg_) bool zmq::dist_t::check_hwm () { - for (pipes_t::size_type i = 0; i < matching; ++i) - if (!pipes[i]->check_hwm ()) + for (pipes_t::size_type i = 0; i < _matching; ++i) + if (!_pipes[i]->check_hwm ()) return false; return true; diff --git a/src/dist.hpp b/src/dist.hpp index f7d0245c..12a19d65 100644 --- a/src/dist.hpp +++ b/src/dist.hpp @@ -87,25 +87,25 @@ class dist_t // List of outbound pipes. typedef array_t pipes_t; - pipes_t pipes; + pipes_t _pipes; // Number of all the pipes to send the next message to. - pipes_t::size_type matching; + pipes_t::size_type _matching; // Number of active pipes. All the active pipes are located at the // beginning of the pipes array. These are the pipes the messages // can be sent to at the moment. - pipes_t::size_type active; + pipes_t::size_type _active; // Number of pipes eligible for sending messages to. This includes all // the active pipes plus all the pipes that we can in theory send // messages to (the HWM is not yet reached), but sending a message // to them would result in partial message being delivered, ie. message // with initial parts missing. - pipes_t::size_type eligible; + pipes_t::size_type _eligible; // True if last we are in the middle of a multipart message. - bool more; + bool _more; dist_t (const dist_t &); const dist_t &operator= (const dist_t &); diff --git a/src/encoder.hpp b/src/encoder.hpp index 64320166..93b654bd 100644 --- a/src/encoder.hpp +++ b/src/encoder.hpp @@ -55,28 +55,28 @@ template class encoder_base_t : public i_encoder { public: inline encoder_base_t (size_t bufsize_) : - write_pos (0), - to_write (0), - next (NULL), - new_msg_flag (false), - bufsize (bufsize_), - buf (static_cast (malloc (bufsize_))), + _write_pos (0), + _to_write (0), + _next (NULL), + _new_msg_flag (false), + _buf_size (bufsize_), + _buf (static_cast (malloc (bufsize_))), in_progress (NULL) { - alloc_assert (buf); + alloc_assert (_buf); } // The destructor doesn't have to be virtual. It is made virtual // just to keep ICC and code checking tools from complaining. - inline virtual ~encoder_base_t () { free (buf); } + inline virtual ~encoder_base_t () { free (_buf); } // The function returns a batch of binary data. The data // are filled to a supplied buffer. If no buffer is supplied (data_ // points to NULL) decoder object will provide buffer of its own. inline size_t encode (unsigned char **data_, size_t size_) { - unsigned char *buffer = !*data_ ? buf : *data_; - size_t buffersize = !*data_ ? bufsize : size_; + unsigned char *buffer = !*data_ ? _buf : *data_; + size_t buffersize = !*data_ ? _buf_size : size_; if (in_progress == NULL) return 0; @@ -86,8 +86,8 @@ template class encoder_base_t : public i_encoder // If there are no more data to return, run the state machine. // If there are still no data, return what we already have // in the buffer. - if (!to_write) { - if (new_msg_flag) { + if (!_to_write) { + if (_new_msg_flag) { int rc = in_progress->close (); errno_assert (rc == 0); rc = in_progress->init (); @@ -95,7 +95,7 @@ template class encoder_base_t : public i_encoder in_progress = NULL; break; } - (static_cast (this)->*next) (); + (static_cast (this)->*_next) (); } // If there are no data in the buffer yet and we are able to @@ -108,20 +108,20 @@ template class encoder_base_t : public i_encoder // As a consequence, large messages being sent won't block // other engines running in the same I/O thread for excessive // amounts of time. - if (!pos && !*data_ && to_write >= buffersize) { - *data_ = write_pos; - pos = to_write; - write_pos = NULL; - to_write = 0; + if (!pos && !*data_ && _to_write >= buffersize) { + *data_ = _write_pos; + pos = _to_write; + _write_pos = NULL; + _to_write = 0; return pos; } // Copy data to the buffer. If the buffer is full, return. - size_t to_copy = std::min (to_write, buffersize - pos); - memcpy (buffer + pos, write_pos, to_copy); + size_t to_copy = std::min (_to_write, buffersize - pos); + memcpy (buffer + pos, _write_pos, to_copy); pos += to_copy; - write_pos += to_copy; - to_write -= to_copy; + _write_pos += to_copy; + _to_write -= to_copy; } *data_ = buffer; @@ -132,7 +132,7 @@ template class encoder_base_t : public i_encoder { zmq_assert (in_progress == NULL); in_progress = msg_; - (static_cast (this)->*next) (); + (static_cast (this)->*_next) (); } protected: @@ -146,28 +146,28 @@ template class encoder_base_t : public i_encoder step_t next_, bool new_msg_flag_) { - write_pos = static_cast (write_pos_); - to_write = to_write_; - next = next_; - new_msg_flag = new_msg_flag_; + _write_pos = static_cast (write_pos_); + _to_write = to_write_; + _next = next_; + _new_msg_flag = new_msg_flag_; } private: // Where to get the data to write from. - unsigned char *write_pos; + unsigned char *_write_pos; // How much data to write before next step should be executed. - size_t to_write; + size_t _to_write; // Next step. If set to NULL, it means that associated data stream // is dead. - step_t next; + step_t _next; - bool new_msg_flag; + bool _new_msg_flag; // The buffer for encoded data. - const size_t bufsize; - unsigned char *const buf; + const size_t _buf_size; + unsigned char *const _buf; encoder_base_t (const encoder_base_t &); void operator= (const encoder_base_t &); diff --git a/src/epoll.cpp b/src/epoll.cpp index 5f814ec1..cf19a452 100644 --- a/src/epoll.cpp +++ b/src/epoll.cpp @@ -58,11 +58,11 @@ zmq::epoll_t::epoll_t (const zmq::thread_ctx_t &ctx_) : // Setting this option result in sane behaviour when exec() functions // are used. Old sockets are closed and don't block TCP ports, avoid // leaks, etc. - epoll_fd = epoll_create1 (EPOLL_CLOEXEC); + _epoll_fd = epoll_create1 (EPOLL_CLOEXEC); #else - epoll_fd = epoll_create (1); + _epoll_fd = epoll_create (1); #endif - errno_assert (epoll_fd != epoll_retired_fd); + errno_assert (_epoll_fd != epoll_retired_fd); } zmq::epoll_t::~epoll_t () @@ -71,11 +71,11 @@ zmq::epoll_t::~epoll_t () stop_worker (); #ifdef ZMQ_HAVE_WINDOWS - epoll_close (epoll_fd); + epoll_close (_epoll_fd); #else - close (epoll_fd); + close (_epoll_fd); #endif - for (retired_t::iterator it = retired.begin (); it != retired.end (); + for (retired_t::iterator it = _retired.begin (); it != _retired.end (); ++it) { LIBZMQ_DELETE (*it); } @@ -96,7 +96,7 @@ zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_) pe->ev.data.ptr = pe; pe->events = events_; - int rc = epoll_ctl (epoll_fd, EPOLL_CTL_ADD, fd_, &pe->ev); + int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_ADD, fd_, &pe->ev); errno_assert (rc != -1); // Increase the load metric of the thread. @@ -109,12 +109,12 @@ void zmq::epoll_t::rm_fd (handle_t handle_) { check_thread (); poll_entry_t *pe = (poll_entry_t *) handle_; - int rc = epoll_ctl (epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev); + int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_DEL, pe->fd, &pe->ev); errno_assert (rc != -1); pe->fd = retired_fd; - retired_sync.lock (); - retired.push_back (pe); - retired_sync.unlock (); + _retired_sync.lock (); + _retired.push_back (pe); + _retired_sync.unlock (); // Decrease the load metric of the thread. adjust_load (-1); @@ -125,7 +125,7 @@ void zmq::epoll_t::set_pollin (handle_t handle_) check_thread (); poll_entry_t *pe = (poll_entry_t *) handle_; pe->ev.events |= EPOLLIN; - int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); + int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); errno_assert (rc != -1); } @@ -134,7 +134,7 @@ void zmq::epoll_t::reset_pollin (handle_t handle_) check_thread (); poll_entry_t *pe = (poll_entry_t *) handle_; pe->ev.events &= ~((short) EPOLLIN); - int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); + int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); errno_assert (rc != -1); } @@ -143,7 +143,7 @@ void zmq::epoll_t::set_pollout (handle_t handle_) check_thread (); poll_entry_t *pe = (poll_entry_t *) handle_; pe->ev.events |= EPOLLOUT; - int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); + int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); errno_assert (rc != -1); } @@ -152,7 +152,7 @@ void zmq::epoll_t::reset_pollout (handle_t handle_) check_thread (); poll_entry_t *pe = (poll_entry_t *) handle_; pe->ev.events &= ~((short) EPOLLOUT); - int rc = epoll_ctl (epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); + int rc = epoll_ctl (_epoll_fd, EPOLL_CTL_MOD, pe->fd, &pe->ev); errno_assert (rc != -1); } @@ -183,7 +183,7 @@ void zmq::epoll_t::loop () } // Wait for events. - int n = epoll_wait (epoll_fd, &ev_buf[0], max_io_events, + int n = epoll_wait (_epoll_fd, &ev_buf[0], max_io_events, timeout ? timeout : -1); if (n == -1) { errno_assert (errno == EINTR); @@ -208,13 +208,13 @@ void zmq::epoll_t::loop () } // Destroy retired event sources. - retired_sync.lock (); - for (retired_t::iterator it = retired.begin (); it != retired.end (); + _retired_sync.lock (); + for (retired_t::iterator it = _retired.begin (); it != _retired.end (); ++it) { LIBZMQ_DELETE (*it); } - retired.clear (); - retired_sync.unlock (); + _retired.clear (); + _retired_sync.unlock (); } } diff --git a/src/epoll.hpp b/src/epoll.hpp index ab6d6542..9d16e147 100644 --- a/src/epoll.hpp +++ b/src/epoll.hpp @@ -90,7 +90,7 @@ class epoll_t : public worker_poller_base_t void loop (); // Main epoll file descriptor - epoll_fd_t epoll_fd; + epoll_fd_t _epoll_fd; struct poll_entry_t { @@ -101,13 +101,13 @@ class epoll_t : public worker_poller_base_t // List of retired event sources. typedef std::vector retired_t; - retired_t retired; + retired_t _retired; // Handle of the physical thread doing the I/O work. - thread_t worker; + thread_t _worker; // Synchronisation of retired event sources - mutex_t retired_sync; + mutex_t _retired_sync; epoll_t (const epoll_t &); const epoll_t &operator= (const epoll_t &); diff --git a/src/fq.cpp b/src/fq.cpp index cb1337fb..dae29017 100644 --- a/src/fq.cpp +++ b/src/fq.cpp @@ -33,47 +33,47 @@ #include "err.hpp" #include "msg.hpp" -zmq::fq_t::fq_t () : active (0), last_in (NULL), current (0), more (false) +zmq::fq_t::fq_t () : _active (0), _last_in (NULL), _current (0), _more (false) { } zmq::fq_t::~fq_t () { - zmq_assert (pipes.empty ()); + zmq_assert (_pipes.empty ()); } void zmq::fq_t::attach (pipe_t *pipe_) { - pipes.push_back (pipe_); - pipes.swap (active, pipes.size () - 1); - active++; + _pipes.push_back (pipe_); + _pipes.swap (_active, _pipes.size () - 1); + _active++; } void zmq::fq_t::pipe_terminated (pipe_t *pipe_) { - const pipes_t::size_type index = pipes.index (pipe_); + const pipes_t::size_type index = _pipes.index (pipe_); // Remove the pipe from the list; adjust number of active pipes // accordingly. - if (index < active) { - active--; - pipes.swap (index, active); - if (current == active) - current = 0; + if (index < _active) { + _active--; + _pipes.swap (index, _active); + if (_current == _active) + _current = 0; } - pipes.erase (pipe_); + _pipes.erase (pipe_); - if (last_in == pipe_) { - saved_credential.set_deep_copy (last_in->get_credential ()); - last_in = NULL; + if (_last_in == pipe_) { + _saved_credential.set_deep_copy (_last_in->get_credential ()); + _last_in = NULL; } } void zmq::fq_t::activated (pipe_t *pipe_) { // Move the pipe to the list of active pipes. - pipes.swap (pipes.index (pipe_), active); - active++; + _pipes.swap (_pipes.index (pipe_), _active); + _active++; } int zmq::fq_t::recv (msg_t *msg_) @@ -88,21 +88,21 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_) errno_assert (rc == 0); // Round-robin over the pipes to get the next message. - while (active > 0) { + while (_active > 0) { // Try to fetch new message. If we've already read part of the message // subsequent part should be immediately available. - bool fetched = pipes[current]->read (msg_); + bool fetched = _pipes[_current]->read (msg_); // Note that when message is not fetched, current pipe is deactivated // and replaced by another active pipe. Thus we don't have to increase // the 'current' pointer. if (fetched) { if (pipe_) - *pipe_ = pipes[current]; - more = (msg_->flags () & msg_t::more) != 0; - if (!more) { - last_in = pipes[current]; - current = (current + 1) % active; + *pipe_ = _pipes[_current]; + _more = (msg_->flags () & msg_t::more) != 0; + if (!_more) { + _last_in = _pipes[_current]; + _current = (_current + 1) % _active; } return 0; } @@ -110,12 +110,12 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_) // Check the atomicity of the message. // If we've already received the first part of the message // we should get the remaining parts without blocking. - zmq_assert (!more); + zmq_assert (!_more); - active--; - pipes.swap (current, active); - if (current == active) - current = 0; + _active--; + _pipes.swap (_current, _active); + if (_current == _active) + _current = 0; } // No message is available. Initialise the output parameter @@ -129,22 +129,22 @@ int zmq::fq_t::recvpipe (msg_t *msg_, pipe_t **pipe_) bool zmq::fq_t::has_in () { // There are subsequent parts of the partly-read message available. - if (more) + if (_more) return true; // Note that messing with current doesn't break the fairness of fair // queueing algorithm. If there are no messages available current will // get back to its original value. Otherwise it'll point to the first // pipe holding messages, skipping only pipes with no messages available. - while (active > 0) { - if (pipes[current]->check_read ()) + while (_active > 0) { + if (_pipes[_current]->check_read ()) return true; // Deactivate the pipe. - active--; - pipes.swap (current, active); - if (current == active) - current = 0; + _active--; + _pipes.swap (_current, _active); + if (_current == _active) + _current = 0; } return false; @@ -152,5 +152,5 @@ bool zmq::fq_t::has_in () const zmq::blob_t &zmq::fq_t::get_credential () const { - return last_in ? last_in->get_credential () : saved_credential; + return _last_in ? _last_in->get_credential () : _saved_credential; } diff --git a/src/fq.hpp b/src/fq.hpp index 0386a862..18e6d9fc 100644 --- a/src/fq.hpp +++ b/src/fq.hpp @@ -60,26 +60,26 @@ class fq_t private: // Inbound pipes. typedef array_t pipes_t; - pipes_t pipes; + pipes_t _pipes; // Number of active pipes. All the active pipes are located at the // beginning of the pipes array. - pipes_t::size_type active; + pipes_t::size_type _active; // Pointer to the last pipe we received message from. // NULL when no message has been received or the pipe // has terminated. - pipe_t *last_in; + pipe_t *_last_in; // Index of the next bound pipe to read a message from. - pipes_t::size_type current; + pipes_t::size_type _current; // If true, part of a multipart message was already received, but // there are following parts still waiting in the current pipe. - bool more; + bool _more; // Holds credential after the last_active_pipe has terminated. - blob_t saved_credential; + blob_t _saved_credential; fq_t (const fq_t &); const fq_t &operator= (const fq_t &); diff --git a/src/gather.cpp b/src/gather.cpp index 00d19359..aec39359 100644 --- a/src/gather.cpp +++ b/src/gather.cpp @@ -49,34 +49,34 @@ void zmq::gather_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) LIBZMQ_UNUSED (subscribe_to_all_); zmq_assert (pipe_); - fq.attach (pipe_); + _fq.attach (pipe_); } void zmq::gather_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::gather_t::xpipe_terminated (pipe_t *pipe_) { - fq.pipe_terminated (pipe_); + _fq.pipe_terminated (pipe_); } int zmq::gather_t::xrecv (msg_t *msg_) { - int rc = fq.recvpipe (msg_, NULL); + int rc = _fq.recvpipe (msg_, NULL); // Drop any messages with more flag while (rc == 0 && msg_->flags () & msg_t::more) { // drop all frames of the current multi-frame message - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); while (rc == 0 && msg_->flags () & msg_t::more) - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); // get the new message if (rc == 0) - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); } return rc; @@ -84,10 +84,10 @@ int zmq::gather_t::xrecv (msg_t *msg_) bool zmq::gather_t::xhas_in () { - return fq.has_in (); + return _fq.has_in (); } const zmq::blob_t &zmq::gather_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } diff --git a/src/gather.hpp b/src/gather.hpp index 9d05869a..2c3c8033 100644 --- a/src/gather.hpp +++ b/src/gather.hpp @@ -56,7 +56,7 @@ class gather_t : public socket_base_t private: // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; gather_t (const gather_t &); const gather_t &operator= (const gather_t &); diff --git a/src/generic_mtrie.hpp b/src/generic_mtrie.hpp index 623eabf5..521de66d 100644 --- a/src/generic_mtrie.hpp +++ b/src/generic_mtrie.hpp @@ -96,16 +96,16 @@ template class generic_mtrie_t bool is_redundant () const; typedef std::set pipes_t; - pipes_t *pipes; + pipes_t *_pipes; - unsigned char min; - unsigned short count; - unsigned short live_nodes; + unsigned char _min; + unsigned short _count; + unsigned short _live_nodes; union { class generic_mtrie_t *node; class generic_mtrie_t **table; - } next; + } _next; generic_mtrie_t (const generic_mtrie_t &); const generic_mtrie_t & diff --git a/src/generic_mtrie_impl.hpp b/src/generic_mtrie_impl.hpp index 386b5985..36245495 100644 --- a/src/generic_mtrie_impl.hpp +++ b/src/generic_mtrie_impl.hpp @@ -43,25 +43,25 @@ along with this program. If not, see . template zmq::generic_mtrie_t::generic_mtrie_t () : - pipes (0), - min (0), - count (0), - live_nodes (0) + _pipes (0), + _min (0), + _count (0), + _live_nodes (0) { } template zmq::generic_mtrie_t::~generic_mtrie_t () { - LIBZMQ_DELETE (pipes); + LIBZMQ_DELETE (_pipes); - if (count == 1) { - zmq_assert (next.node); - LIBZMQ_DELETE (next.node); - } else if (count > 1) { - for (unsigned short i = 0; i != count; ++i) { - LIBZMQ_DELETE (next.table[i]); + if (_count == 1) { + zmq_assert (_next.node); + LIBZMQ_DELETE (_next.node); + } else if (_count > 1) { + for (unsigned short i = 0; i != _count; ++i) { + LIBZMQ_DELETE (_next.table[i]); } - free (next.table); + free (_next.table); } } @@ -80,73 +80,73 @@ bool zmq::generic_mtrie_t::add_helper (prefix_t prefix_, { // We are at the node corresponding to the prefix. We are done. if (!size_) { - bool result = !pipes; - if (!pipes) { - pipes = new (std::nothrow) pipes_t; - alloc_assert (pipes); + bool result = !_pipes; + if (!_pipes) { + _pipes = new (std::nothrow) pipes_t; + alloc_assert (_pipes); } - pipes->insert (pipe_); + _pipes->insert (pipe_); return result; } unsigned char c = *prefix_; - if (c < min || c >= min + count) { + if (c < _min || c >= _min + _count) { // The character is out of range of currently handled // characters. We have to extend the table. - if (!count) { - min = c; - count = 1; - next.node = NULL; - } else if (count == 1) { - unsigned char oldc = min; - generic_mtrie_t *oldp = next.node; - count = (min < c ? c - min : min - c) + 1; - next.table = - (generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * count); - alloc_assert (next.table); - for (unsigned short i = 0; i != count; ++i) - next.table[i] = 0; - min = std::min (min, c); - next.table[oldc - min] = oldp; - } else if (min < c) { + if (!_count) { + _min = c; + _count = 1; + _next.node = NULL; + } else if (_count == 1) { + unsigned char oldc = _min; + generic_mtrie_t *oldp = _next.node; + _count = (_min < c ? c - _min : _min - c) + 1; + _next.table = + (generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * _count); + alloc_assert (_next.table); + for (unsigned short i = 0; i != _count; ++i) + _next.table[i] = 0; + _min = std::min (_min, c); + _next.table[oldc - _min] = oldp; + } else if (_min < c) { // The new character is above the current character range. - unsigned short old_count = count; - count = c - min + 1; - next.table = (generic_mtrie_t **) realloc ( - next.table, sizeof (generic_mtrie_t *) * count); - alloc_assert (next.table); - for (unsigned short i = old_count; i != count; i++) - next.table[i] = NULL; + unsigned short old_count = _count; + _count = c - _min + 1; + _next.table = (generic_mtrie_t **) realloc ( + _next.table, sizeof (generic_mtrie_t *) * _count); + alloc_assert (_next.table); + for (unsigned short i = old_count; i != _count; i++) + _next.table[i] = NULL; } else { // The new character is below the current character range. - unsigned short old_count = count; - count = (min + old_count) - c; - next.table = (generic_mtrie_t **) realloc ( - next.table, sizeof (generic_mtrie_t *) * count); - alloc_assert (next.table); - memmove (next.table + min - c, next.table, + unsigned short old_count = _count; + _count = (_min + old_count) - c; + _next.table = (generic_mtrie_t **) realloc ( + _next.table, sizeof (generic_mtrie_t *) * _count); + alloc_assert (_next.table); + memmove (_next.table + _min - c, _next.table, old_count * sizeof (generic_mtrie_t *)); - for (unsigned short i = 0; i != min - c; i++) - next.table[i] = NULL; - min = c; + for (unsigned short i = 0; i != _min - c; i++) + _next.table[i] = NULL; + _min = c; } } // If next node does not exist, create one. - if (count == 1) { - if (!next.node) { - next.node = new (std::nothrow) generic_mtrie_t; - alloc_assert (next.node); - ++live_nodes; + if (_count == 1) { + if (!_next.node) { + _next.node = new (std::nothrow) generic_mtrie_t; + alloc_assert (_next.node); + ++_live_nodes; } - return next.node->add_helper (prefix_ + 1, size_ - 1, pipe_); + return _next.node->add_helper (prefix_ + 1, size_ - 1, pipe_); } - if (!next.table[c - min]) { - next.table[c - min] = new (std::nothrow) generic_mtrie_t; - alloc_assert (next.table[c - min]); - ++live_nodes; + if (!_next.table[c - _min]) { + _next.table[c - _min] = new (std::nothrow) generic_mtrie_t; + alloc_assert (_next.table[c - _min]); + ++_live_nodes; } - return next.table[c - min]->add_helper (prefix_ + 1, size_ - 1, pipe_); + return _next.table[c - _min]->add_helper (prefix_ + 1, size_ - 1, pipe_); } @@ -177,13 +177,13 @@ void zmq::generic_mtrie_t::rm_helper (value_t *pipe_, bool call_on_uniq_) { // Remove the subscription from this node. - if (pipes && pipes->erase (pipe_)) { - if (!call_on_uniq_ || pipes->empty ()) { + if (_pipes && _pipes->erase (pipe_)) { + if (!call_on_uniq_ || _pipes->empty ()) { func_ (*buff_, buffsize_, arg_); } - if (pipes->empty ()) { - LIBZMQ_DELETE (pipes); + if (_pipes->empty ()) { + LIBZMQ_DELETE (_pipes); } } @@ -195,22 +195,22 @@ void zmq::generic_mtrie_t::rm_helper (value_t *pipe_, } // If there are no subnodes in the trie, return. - if (count == 0) + if (_count == 0) return; // If there's one subnode (optimisation). - if (count == 1) { - (*buff_)[buffsize_] = min; + if (_count == 1) { + (*buff_)[buffsize_] = _min; buffsize_++; - next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_, func_, - arg_, call_on_uniq_); + _next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_, func_, + arg_, call_on_uniq_); // Prune the node if it was made redundant by the removal - if (next.node->is_redundant ()) { - LIBZMQ_DELETE (next.node); - count = 0; - --live_nodes; - zmq_assert (live_nodes == 0); + if (_next.node->is_redundant ()) { + LIBZMQ_DELETE (_next.node); + _count = 0; + --_live_nodes; + zmq_assert (_live_nodes == 0); } return; } @@ -218,21 +218,22 @@ void zmq::generic_mtrie_t::rm_helper (value_t *pipe_, // If there are multiple subnodes. // // New min non-null character in the node table after the removal - unsigned char new_min = min + count - 1; + unsigned char new_min = _min + _count - 1; // New max non-null character in the node table after the removal - unsigned char new_max = min; - for (unsigned short c = 0; c != count; c++) { - (*buff_)[buffsize_] = min + c; - if (next.table[c]) { - next.table[c]->rm_helper (pipe_, buff_, buffsize_ + 1, maxbuffsize_, - func_, arg_, call_on_uniq_); + unsigned char new_max = _min; + for (unsigned short c = 0; c != _count; c++) { + (*buff_)[buffsize_] = _min + c; + if (_next.table[c]) { + _next.table[c]->rm_helper (pipe_, buff_, buffsize_ + 1, + maxbuffsize_, func_, arg_, + call_on_uniq_); // Prune redundant nodes from the mtrie - if (next.table[c]->is_redundant ()) { - LIBZMQ_DELETE (next.table[c]); + if (_next.table[c]->is_redundant ()) { + LIBZMQ_DELETE (_next.table[c]); - zmq_assert (live_nodes > 0); - --live_nodes; + zmq_assert (_live_nodes > 0); + --_live_nodes; } else { // The node is not redundant, so it's a candidate for being // the new min/max node. @@ -241,54 +242,54 @@ void zmq::generic_mtrie_t::rm_helper (value_t *pipe_, // first non-null, non-redundant node encountered is the new // minimum index. Conversely, the last non-redundant, non-null // node encountered is the new maximum index. - if (c + min < new_min) - new_min = c + min; - if (c + min > new_max) - new_max = c + min; + if (c + _min < new_min) + new_min = c + _min; + if (c + _min > new_max) + new_max = c + _min; } } } - zmq_assert (count > 1); + zmq_assert (_count > 1); // Free the node table if it's no longer used. - if (live_nodes == 0) { - free (next.table); - next.table = NULL; - count = 0; + if (_live_nodes == 0) { + free (_next.table); + _next.table = NULL; + _count = 0; } // Compact the node table if possible - else if (live_nodes == 1) { + else if (_live_nodes == 1) { // If there's only one live node in the table we can // switch to using the more compact single-node // representation zmq_assert (new_min == new_max); - zmq_assert (new_min >= min && new_min < min + count); - generic_mtrie_t *node = next.table[new_min - min]; + zmq_assert (new_min >= _min && new_min < _min + _count); + generic_mtrie_t *node = _next.table[new_min - _min]; zmq_assert (node); - free (next.table); - next.node = node; - count = 1; - min = new_min; - } else if (new_min > min || new_max < min + count - 1) { + free (_next.table); + _next.node = node; + _count = 1; + _min = new_min; + } else if (new_min > _min || new_max < _min + _count - 1) { zmq_assert (new_max - new_min + 1 > 1); - generic_mtrie_t **old_table = next.table; - zmq_assert (new_min > min || new_max < min + count - 1); - zmq_assert (new_min >= min); - zmq_assert (new_max <= min + count - 1); - zmq_assert (new_max - new_min + 1 < count); + generic_mtrie_t **old_table = _next.table; + zmq_assert (new_min > _min || new_max < _min + _count - 1); + zmq_assert (new_min >= _min); + zmq_assert (new_max <= _min + _count - 1); + zmq_assert (new_max - new_min + 1 < _count); - count = new_max - new_min + 1; - next.table = - (generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * count); - alloc_assert (next.table); + _count = new_max - new_min + 1; + _next.table = + (generic_mtrie_t **) malloc (sizeof (generic_mtrie_t *) * _count); + alloc_assert (_next.table); - memmove (next.table, old_table + (new_min - min), - sizeof (generic_mtrie_t *) * count); + memmove (_next.table, old_table + (new_min - _min), + sizeof (generic_mtrie_t *) * _count); free (old_table); - min = new_min; + _min = new_min; } } @@ -304,23 +305,24 @@ typename zmq::generic_mtrie_t::rm_result zmq::generic_mtrie_t::rm_helper ( prefix_t prefix_, size_t size_, value_t *pipe_) { if (!size_) { - if (!pipes) + if (!_pipes) return not_found; - typename pipes_t::size_type erased = pipes->erase (pipe_); - if (pipes->empty ()) { + typename pipes_t::size_type erased = _pipes->erase (pipe_); + if (_pipes->empty ()) { zmq_assert (erased == 1); - LIBZMQ_DELETE (pipes); + LIBZMQ_DELETE (_pipes); return last_value_removed; } return (erased == 1) ? values_remain : not_found; } unsigned char c = *prefix_; - if (!count || c < min || c >= min + count) + if (!_count || c < _min || c >= _min + _count) return not_found; - generic_mtrie_t *next_node = count == 1 ? next.node : next.table[c - min]; + generic_mtrie_t *next_node = + _count == 1 ? _next.node : _next.table[c - _min]; if (!next_node) return not_found; @@ -329,66 +331,66 @@ typename zmq::generic_mtrie_t::rm_result zmq::generic_mtrie_t::rm_helper ( if (next_node->is_redundant ()) { LIBZMQ_DELETE (next_node); - zmq_assert (count > 0); + zmq_assert (_count > 0); - if (count == 1) { - next.node = 0; - count = 0; - --live_nodes; - zmq_assert (live_nodes == 0); + if (_count == 1) { + _next.node = 0; + _count = 0; + --_live_nodes; + zmq_assert (_live_nodes == 0); } else { - next.table[c - min] = 0; - zmq_assert (live_nodes > 1); - --live_nodes; + _next.table[c - _min] = 0; + zmq_assert (_live_nodes > 1); + --_live_nodes; // Compact the table if possible - if (live_nodes == 1) { + if (_live_nodes == 1) { // If there's only one live node in the table we can // switch to using the more compact single-node // representation unsigned short i; - for (i = 0; i < count; ++i) - if (next.table[i]) + for (i = 0; i < _count; ++i) + if (_next.table[i]) break; - zmq_assert (i < count); - min += i; - count = 1; - generic_mtrie_t *oldp = next.table[i]; - free (next.table); - next.node = oldp; - } else if (c == min) { + zmq_assert (i < _count); + _min += i; + _count = 1; + generic_mtrie_t *oldp = _next.table[i]; + free (_next.table); + _next.node = oldp; + } else if (c == _min) { // We can compact the table "from the left" unsigned short i; - for (i = 1; i < count; ++i) - if (next.table[i]) + for (i = 1; i < _count; ++i) + if (_next.table[i]) break; - zmq_assert (i < count); - min += i; - count -= i; - generic_mtrie_t **old_table = next.table; - next.table = (generic_mtrie_t **) malloc ( - sizeof (generic_mtrie_t *) * count); - alloc_assert (next.table); - memmove (next.table, old_table + i, - sizeof (generic_mtrie_t *) * count); + zmq_assert (i < _count); + _min += i; + _count -= i; + generic_mtrie_t **old_table = _next.table; + _next.table = (generic_mtrie_t **) malloc ( + sizeof (generic_mtrie_t *) * _count); + alloc_assert (_next.table); + memmove (_next.table, old_table + i, + sizeof (generic_mtrie_t *) * _count); free (old_table); - } else if (c == min + count - 1) { + } else if (c == _min + _count - 1) { // We can compact the table "from the right" unsigned short i; - for (i = 1; i < count; ++i) - if (next.table[count - 1 - i]) + for (i = 1; i < _count; ++i) + if (_next.table[_count - 1 - i]) break; - zmq_assert (i < count); - count -= i; - generic_mtrie_t **old_table = next.table; - next.table = (generic_mtrie_t **) malloc ( - sizeof (generic_mtrie_t *) * count); - alloc_assert (next.table); - memmove (next.table, old_table, - sizeof (generic_mtrie_t *) * count); + zmq_assert (i < _count); + _count -= i; + generic_mtrie_t **old_table = _next.table; + _next.table = (generic_mtrie_t **) malloc ( + sizeof (generic_mtrie_t *) * _count); + alloc_assert (_next.table); + memmove (_next.table, old_table, + sizeof (generic_mtrie_t *) * _count); free (old_table); } } @@ -407,9 +409,9 @@ void zmq::generic_mtrie_t::match (prefix_t data_, generic_mtrie_t *current = this; while (true) { // Signal the pipes attached to this node. - if (current->pipes) { - for (typename pipes_t::iterator it = current->pipes->begin (); - it != current->pipes->end (); ++it) + if (current->_pipes) { + for (typename pipes_t::iterator it = current->_pipes->begin (); + it != current->_pipes->end (); ++it) func_ (*it, arg_); } @@ -418,26 +420,26 @@ void zmq::generic_mtrie_t::match (prefix_t data_, break; // If there are no subnodes in the trie, return. - if (current->count == 0) + if (current->_count == 0) break; // If there's one subnode (optimisation). - if (current->count == 1) { - if (data_[0] != current->min) + if (current->_count == 1) { + if (data_[0] != current->_min) break; - current = current->next.node; + current = current->_next.node; data_++; size_--; continue; } // If there are multiple subnodes. - if (data_[0] < current->min - || data_[0] >= current->min + current->count) + if (data_[0] < current->_min + || data_[0] >= current->_min + current->_count) break; - if (!current->next.table[data_[0] - current->min]) + if (!current->_next.table[data_[0] - current->_min]) break; - current = current->next.table[data_[0] - current->min]; + current = current->_next.table[data_[0] - current->_min]; data_++; size_--; } @@ -445,7 +447,7 @@ void zmq::generic_mtrie_t::match (prefix_t data_, template bool zmq::generic_mtrie_t::is_redundant () const { - return !pipes && live_nodes == 0; + return !_pipes && _live_nodes == 0; } diff --git a/src/io_object.cpp b/src/io_object.cpp index 992cd577..aa466c08 100644 --- a/src/io_object.cpp +++ b/src/io_object.cpp @@ -32,7 +32,7 @@ #include "io_thread.hpp" #include "err.hpp" -zmq::io_object_t::io_object_t (io_thread_t *io_thread_) : poller (NULL) +zmq::io_object_t::io_object_t (io_thread_t *io_thread_) : _poller (NULL) { if (io_thread_) plug (io_thread_); @@ -45,59 +45,59 @@ zmq::io_object_t::~io_object_t () void zmq::io_object_t::plug (io_thread_t *io_thread_) { zmq_assert (io_thread_); - zmq_assert (!poller); + zmq_assert (!_poller); // Retrieve the poller from the thread we are running in. - poller = io_thread_->get_poller (); + _poller = io_thread_->get_poller (); } void zmq::io_object_t::unplug () { - zmq_assert (poller); + zmq_assert (_poller); // Forget about old poller in preparation to be migrated // to a different I/O thread. - poller = NULL; + _poller = NULL; } zmq::io_object_t::handle_t zmq::io_object_t::add_fd (fd_t fd_) { - return poller->add_fd (fd_, this); + return _poller->add_fd (fd_, this); } void zmq::io_object_t::rm_fd (handle_t handle_) { - poller->rm_fd (handle_); + _poller->rm_fd (handle_); } void zmq::io_object_t::set_pollin (handle_t handle_) { - poller->set_pollin (handle_); + _poller->set_pollin (handle_); } void zmq::io_object_t::reset_pollin (handle_t handle_) { - poller->reset_pollin (handle_); + _poller->reset_pollin (handle_); } void zmq::io_object_t::set_pollout (handle_t handle_) { - poller->set_pollout (handle_); + _poller->set_pollout (handle_); } void zmq::io_object_t::reset_pollout (handle_t handle_) { - poller->reset_pollout (handle_); + _poller->reset_pollout (handle_); } void zmq::io_object_t::add_timer (int timeout_, int id_) { - poller->add_timer (timeout_, this, id_); + _poller->add_timer (timeout_, this, id_); } void zmq::io_object_t::cancel_timer (int id_) { - poller->cancel_timer (this, id_); + _poller->cancel_timer (this, id_); } void zmq::io_object_t::in_event () diff --git a/src/io_object.hpp b/src/io_object.hpp index b6a61a17..4d006152 100644 --- a/src/io_object.hpp +++ b/src/io_object.hpp @@ -74,7 +74,7 @@ class io_object_t : public i_poll_events void timer_event (int id_); private: - poller_t *poller; + poller_t *_poller; io_object_t (const io_object_t &); const io_object_t &operator= (const io_object_t &); diff --git a/src/io_thread.cpp b/src/io_thread.cpp index af6bddd0..bcbcaa8e 100644 --- a/src/io_thread.cpp +++ b/src/io_thread.cpp @@ -38,26 +38,26 @@ zmq::io_thread_t::io_thread_t (ctx_t *ctx_, uint32_t tid_) : object_t (ctx_, tid_), - mailbox_handle (static_cast (NULL)) + _mailbox_handle (static_cast (NULL)) { - poller = new (std::nothrow) poller_t (*ctx_); - alloc_assert (poller); + _poller = new (std::nothrow) poller_t (*ctx_); + alloc_assert (_poller); - if (mailbox.get_fd () != retired_fd) { - mailbox_handle = poller->add_fd (mailbox.get_fd (), this); - poller->set_pollin (mailbox_handle); + if (_mailbox.get_fd () != retired_fd) { + _mailbox_handle = _poller->add_fd (_mailbox.get_fd (), this); + _poller->set_pollin (_mailbox_handle); } } zmq::io_thread_t::~io_thread_t () { - LIBZMQ_DELETE (poller); + LIBZMQ_DELETE (_poller); } void zmq::io_thread_t::start () { // Start the underlying I/O thread. - poller->start (); + _poller->start (); } void zmq::io_thread_t::stop () @@ -67,12 +67,12 @@ void zmq::io_thread_t::stop () zmq::mailbox_t *zmq::io_thread_t::get_mailbox () { - return &mailbox; + return &_mailbox; } int zmq::io_thread_t::get_load () { - return poller->get_load (); + return _poller->get_load (); } void zmq::io_thread_t::in_event () @@ -81,12 +81,12 @@ void zmq::io_thread_t::in_event () // process in a single go? command_t cmd; - int rc = mailbox.recv (&cmd, 0); + int rc = _mailbox.recv (&cmd, 0); while (rc == 0 || errno == EINTR) { if (rc == 0) cmd.destination->process_command (cmd); - rc = mailbox.recv (&cmd, 0); + rc = _mailbox.recv (&cmd, 0); } errno_assert (rc != 0 && errno == EAGAIN); @@ -106,13 +106,13 @@ void zmq::io_thread_t::timer_event (int) zmq::poller_t *zmq::io_thread_t::get_poller () { - zmq_assert (poller); - return poller; + zmq_assert (_poller); + return _poller; } void zmq::io_thread_t::process_stop () { - zmq_assert (mailbox_handle); - poller->rm_fd (mailbox_handle); - poller->stop (); + zmq_assert (_mailbox_handle); + _poller->rm_fd (_mailbox_handle); + _poller->stop (); } diff --git a/src/io_thread.hpp b/src/io_thread.hpp index 5fcc4896..cdf227d9 100644 --- a/src/io_thread.hpp +++ b/src/io_thread.hpp @@ -77,13 +77,13 @@ class io_thread_t : public object_t, public i_poll_events private: // I/O thread accesses incoming commands via this mailbox. - mailbox_t mailbox; + mailbox_t _mailbox; // Handle associated with mailbox' file descriptor. - poller_t::handle_t mailbox_handle; + poller_t::handle_t _mailbox_handle; // I/O multiplexing is performed using a poller object. - poller_t *poller; + poller_t *_poller; io_thread_t (const io_thread_t &); const io_thread_t &operator= (const io_thread_t &); diff --git a/src/ip_resolver.cpp b/src/ip_resolver.cpp index 93bfb9e2..9b7910a7 100644 --- a/src/ip_resolver.cpp +++ b/src/ip_resolver.cpp @@ -95,18 +95,18 @@ zmq::ip_addr_t zmq::ip_addr_t::any (int family_) } zmq::ip_resolver_options_t::ip_resolver_options_t () : - bindable_wanted (false), - nic_name_allowed (false), - ipv6_wanted (false), - port_expected (false), - dns_allowed (false) + _bindable_wanted (false), + _nic_name_allowed (false), + _ipv6_wanted (false), + _port_expected (false), + _dns_allowed (false) { } zmq::ip_resolver_options_t & zmq::ip_resolver_options_t::bindable (bool bindable_) { - bindable_wanted = bindable_; + _bindable_wanted = bindable_; return *this; } @@ -114,14 +114,14 @@ zmq::ip_resolver_options_t::bindable (bool bindable_) zmq::ip_resolver_options_t & zmq::ip_resolver_options_t::allow_nic_name (bool allow_) { - nic_name_allowed = allow_; + _nic_name_allowed = allow_; return *this; } zmq::ip_resolver_options_t &zmq::ip_resolver_options_t::ipv6 (bool ipv6_) { - ipv6_wanted = ipv6_; + _ipv6_wanted = ipv6_; return *this; } @@ -131,45 +131,45 @@ zmq::ip_resolver_options_t &zmq::ip_resolver_options_t::ipv6 (bool ipv6_) zmq::ip_resolver_options_t & zmq::ip_resolver_options_t::expect_port (bool expect_) { - port_expected = expect_; + _port_expected = expect_; return *this; } zmq::ip_resolver_options_t &zmq::ip_resolver_options_t::allow_dns (bool allow_) { - dns_allowed = allow_; + _dns_allowed = allow_; return *this; } bool zmq::ip_resolver_options_t::bindable () { - return bindable_wanted; + return _bindable_wanted; } bool zmq::ip_resolver_options_t::allow_nic_name () { - return nic_name_allowed; + return _nic_name_allowed; } bool zmq::ip_resolver_options_t::ipv6 () { - return ipv6_wanted; + return _ipv6_wanted; } bool zmq::ip_resolver_options_t::expect_port () { - return port_expected; + return _port_expected; } bool zmq::ip_resolver_options_t::allow_dns () { - return dns_allowed; + return _dns_allowed; } zmq::ip_resolver_t::ip_resolver_t (ip_resolver_options_t opts_) : - options (opts_) + _options (opts_) { } @@ -178,7 +178,7 @@ int zmq::ip_resolver_t::resolve (ip_addr_t *ip_addr_, const char *name_) std::string addr; uint16_t port; - if (options.expect_port ()) { + if (_options.expect_port ()) { // We expect 'addr:port'. It's important to use str*r*chr to only get // the latest colon since IPv6 addresses use colons as delemiters. const char *delim = strrchr (name_, ':'); @@ -192,7 +192,7 @@ int zmq::ip_resolver_t::resolve (ip_addr_t *ip_addr_, const char *name_) std::string port_str = std::string (delim + 1); if (port_str == "*") { - if (options.bindable ()) { + if (_options.bindable ()) { // Resolve wildcard to 0 to allow autoselection of port port = 0; } else { @@ -248,13 +248,13 @@ int zmq::ip_resolver_t::resolve (ip_addr_t *ip_addr_, const char *name_) bool resolved = false; const char *addr_str = addr.c_str (); - if (options.bindable () && addr == "*") { + if (_options.bindable () && addr == "*") { // Return an ANY address - *ip_addr_ = ip_addr_t::any (options.ipv6 () ? AF_INET6 : AF_INET); + *ip_addr_ = ip_addr_t::any (_options.ipv6 () ? AF_INET6 : AF_INET); resolved = true; } - if (!resolved && options.allow_nic_name ()) { + if (!resolved && _options.allow_nic_name ()) { // Try to resolve the string as a NIC name. int rc = resolve_nic_name (ip_addr_, addr_str); @@ -303,18 +303,18 @@ int zmq::ip_resolver_t::resolve_getaddrinfo (ip_addr_t *ip_addr_, // Choose IPv4 or IPv6 protocol family. Note that IPv6 allows for // IPv4-in-IPv6 addresses. - req.ai_family = options.ipv6 () ? AF_INET6 : AF_INET; + req.ai_family = _options.ipv6 () ? AF_INET6 : AF_INET; // Arbitrary, not used in the output, but avoids duplicate results. req.ai_socktype = SOCK_STREAM; req.ai_flags = 0; - if (options.bindable ()) { + if (_options.bindable ()) { req.ai_flags |= AI_PASSIVE; } - if (!options.allow_dns ()) { + if (!_options.allow_dns ()) { req.ai_flags |= AI_NUMERICHOST; } @@ -355,7 +355,7 @@ int zmq::ip_resolver_t::resolve_getaddrinfo (ip_addr_t *ip_addr_, errno = ENOMEM; break; default: - if (options.bindable ()) { + if (_options.bindable ()) { errno = ENODEV; } else { errno = EINVAL; @@ -444,7 +444,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_) { #if defined ZMQ_HAVE_AIX || defined ZMQ_HAVE_HPUX // IPv6 support not implemented for AIX or HP/UX. - if (options.ipv6 ()) { + if (_options.ipv6 ()) { errno = ENODEV; return -1; } @@ -452,7 +452,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_) // Create a socket. const int sd = - open_socket (options.ipv6 () ? AF_INET6 : AF_INET, SOCK_DGRAM, 0); + open_socket (_options.ipv6 () ? AF_INET6 : AF_INET, SOCK_DGRAM, 0); errno_assert (sd != -1); struct ifreq ifr; @@ -472,7 +472,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_) } const int family = ifr.ifr_addr.sa_family; - if (family == (options.ipv6 () ? AF_INET6 : AF_INET) + if (family == (_options.ipv6 () ? AF_INET6 : AF_INET) && !strcmp (nic_, ifr.ifr_name)) { memcpy (ip_addr_, &ifr.ifr_addr, (family == AF_INET) ? sizeof (struct sockaddr_in) @@ -524,7 +524,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_) continue; const int family = ifp->ifa_addr->sa_family; - if (family == (options.ipv6 () ? AF_INET6 : AF_INET) + if (family == (_options.ipv6 () ? AF_INET6 : AF_INET) && !strcmp (nic_, ifp->ifa_name)) { memcpy (ip_addr_, ifp->ifa_addr, (family == AF_INET) ? sizeof (struct sockaddr_in) @@ -647,7 +647,7 @@ int zmq::ip_resolver_t::resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_) ADDRESS_FAMILY family = current_unicast_address->Address.lpSockaddr->sa_family; - if (family == (options.ipv6 () ? AF_INET6 : AF_INET)) { + if (family == (_options.ipv6 () ? AF_INET6 : AF_INET)) { memcpy ( ip_addr_, current_unicast_address->Address.lpSockaddr, (family == AF_INET) ? sizeof (struct sockaddr_in) diff --git a/src/ip_resolver.hpp b/src/ip_resolver.hpp index 6380ab7f..161f8885 100644 --- a/src/ip_resolver.hpp +++ b/src/ip_resolver.hpp @@ -73,11 +73,11 @@ class ip_resolver_options_t bool allow_dns (); private: - bool bindable_wanted; - bool nic_name_allowed; - bool ipv6_wanted; - bool port_expected; - bool dns_allowed; + bool _bindable_wanted; + bool _nic_name_allowed; + bool _ipv6_wanted; + bool _port_expected; + bool _dns_allowed; }; class ip_resolver_t @@ -88,7 +88,7 @@ class ip_resolver_t int resolve (ip_addr_t *ip_addr_, const char *name_); protected: - ip_resolver_options_t options; + ip_resolver_options_t _options; int resolve_nic_name (ip_addr_t *ip_addr_, const char *nic_); int resolve_getaddrinfo (ip_addr_t *ip_addr_, const char *addr_); diff --git a/src/lb.cpp b/src/lb.cpp index a53cab3b..4ce785a1 100644 --- a/src/lb.cpp +++ b/src/lb.cpp @@ -33,46 +33,46 @@ #include "err.hpp" #include "msg.hpp" -zmq::lb_t::lb_t () : active (0), current (0), more (false), dropping (false) +zmq::lb_t::lb_t () : _active (0), _current (0), _more (false), _dropping (false) { } zmq::lb_t::~lb_t () { - zmq_assert (pipes.empty ()); + zmq_assert (_pipes.empty ()); } void zmq::lb_t::attach (pipe_t *pipe_) { - pipes.push_back (pipe_); + _pipes.push_back (pipe_); activated (pipe_); } void zmq::lb_t::pipe_terminated (pipe_t *pipe_) { - pipes_t::size_type index = pipes.index (pipe_); + pipes_t::size_type index = _pipes.index (pipe_); // If we are in the middle of multipart message and current pipe // have disconnected, we have to drop the remainder of the message. - if (index == current && more) - dropping = true; + if (index == _current && _more) + _dropping = true; // Remove the pipe from the list; adjust number of active pipes // accordingly. - if (index < active) { - active--; - pipes.swap (index, active); - if (current == active) - current = 0; + if (index < _active) { + _active--; + _pipes.swap (index, _active); + if (_current == _active) + _current = 0; } - pipes.erase (pipe_); + _pipes.erase (pipe_); } void zmq::lb_t::activated (pipe_t *pipe_) { // Move the pipe to the list of active pipes. - pipes.swap (pipes.index (pipe_), active); - active++; + _pipes.swap (_pipes.index (pipe_), _active); + _active++; } int zmq::lb_t::send (msg_t *msg_) @@ -84,9 +84,9 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_) { // Drop the message if required. If we are at the end of the message // switch back to non-dropping mode. - if (dropping) { - more = (msg_->flags () & msg_t::more) != 0; - dropping = more; + if (_dropping) { + _more = (msg_->flags () & msg_t::more) != 0; + _dropping = _more; int rc = msg_->close (); errno_assert (rc == 0); @@ -95,44 +95,44 @@ int zmq::lb_t::sendpipe (msg_t *msg_, pipe_t **pipe_) return 0; } - while (active > 0) { - if (pipes[current]->write (msg_)) { + while (_active > 0) { + if (_pipes[_current]->write (msg_)) { if (pipe_) - *pipe_ = pipes[current]; + *pipe_ = _pipes[_current]; break; } // If send fails for multi-part msg rollback other // parts sent earlier and return EAGAIN. // Application should handle this as suitable - if (more) { - pipes[current]->rollback (); - more = false; + if (_more) { + _pipes[_current]->rollback (); + _more = false; errno = EAGAIN; return -1; } - active--; - if (current < active) - pipes.swap (current, active); + _active--; + if (_current < _active) + _pipes.swap (_current, _active); else - current = 0; + _current = 0; } // If there are no pipes we cannot send the message. - if (active == 0) { + if (_active == 0) { errno = EAGAIN; return -1; } // If it's final part of the message we can flush it downstream and // continue round-robining (load balance). - more = (msg_->flags () & msg_t::more) != 0; - if (!more) { - pipes[current]->flush (); + _more = (msg_->flags () & msg_t::more) != 0; + if (!_more) { + _pipes[_current]->flush (); - if (++current >= active) - current = 0; + if (++_current >= _active) + _current = 0; } // Detach the message from the data buffer. @@ -146,19 +146,19 @@ bool zmq::lb_t::has_out () { // If one part of the message was already written we can definitely // write the rest of the message. - if (more) + if (_more) return true; - while (active > 0) { + while (_active > 0) { // Check whether a pipe has room for another message. - if (pipes[current]->check_write ()) + if (_pipes[_current]->check_write ()) return true; // Deactivate the pipe. - active--; - pipes.swap (current, active); - if (current == active) - current = 0; + _active--; + _pipes.swap (_current, _active); + if (_current == _active) + _current = 0; } return false; diff --git a/src/lb.hpp b/src/lb.hpp index c18399b5..e9ca4ff6 100644 --- a/src/lb.hpp +++ b/src/lb.hpp @@ -63,20 +63,20 @@ class lb_t private: // List of outbound pipes. typedef array_t pipes_t; - pipes_t pipes; + pipes_t _pipes; // Number of active pipes. All the active pipes are located at the // beginning of the pipes array. - pipes_t::size_type active; + pipes_t::size_type _active; // Points to the last pipe that the most recent message was sent to. - pipes_t::size_type current; + pipes_t::size_type _current; // True if last we are in the middle of a multipart message. - bool more; + bool _more; // True if we are dropping current message. - bool dropping; + bool _dropping; lb_t (const lb_t &); const lb_t &operator= (const lb_t &); diff --git a/src/mailbox.cpp b/src/mailbox.cpp index 12a55624..2fb0fe0c 100644 --- a/src/mailbox.cpp +++ b/src/mailbox.cpp @@ -36,71 +36,71 @@ zmq::mailbox_t::mailbox_t () // Get the pipe into passive state. That way, if the users starts by // polling on the associated file descriptor it will get woken up when // new command is posted. - const bool ok = cpipe.check_read (); + const bool ok = _cpipe.check_read (); zmq_assert (!ok); - active = false; + _active = false; } zmq::mailbox_t::~mailbox_t () { - // TODO: Retrieve and deallocate commands inside the cpipe. + // TODO: Retrieve and deallocate commands inside the _cpipe. // Work around problem that other threads might still be in our // send() method, by waiting on the mutex before disappearing. - sync.lock (); - sync.unlock (); + _sync.lock (); + _sync.unlock (); } zmq::fd_t zmq::mailbox_t::get_fd () const { - return signaler.get_fd (); + return _signaler.get_fd (); } void zmq::mailbox_t::send (const command_t &cmd_) { - sync.lock (); - cpipe.write (cmd_, false); - const bool ok = cpipe.flush (); - sync.unlock (); + _sync.lock (); + _cpipe.write (cmd_, false); + const bool ok = _cpipe.flush (); + _sync.unlock (); if (!ok) - signaler.send (); + _signaler.send (); } int zmq::mailbox_t::recv (command_t *cmd_, int timeout_) { // Try to get the command straight away. - if (active) { - if (cpipe.read (cmd_)) + if (_active) { + if (_cpipe.read (cmd_)) return 0; // If there are no more commands available, switch into passive state. - active = false; + _active = false; } // Wait for signal from the command sender. - int rc = signaler.wait (timeout_); + int rc = _signaler.wait (timeout_); if (rc == -1) { errno_assert (errno == EAGAIN || errno == EINTR); return -1; } // Receive the signal. - rc = signaler.recv_failable (); + rc = _signaler.recv_failable (); if (rc == -1) { errno_assert (errno == EAGAIN); return -1; } // Switch into active state. - active = true; + _active = true; // Get a command. - const bool ok = cpipe.read (cmd_); + const bool ok = _cpipe.read (cmd_); zmq_assert (ok); return 0; } bool zmq::mailbox_t::valid () const { - return signaler.valid (); + return _signaler.valid (); } diff --git a/src/mailbox.hpp b/src/mailbox.hpp index 596a443a..6c9ca36e 100644 --- a/src/mailbox.hpp +++ b/src/mailbox.hpp @@ -58,26 +58,26 @@ class mailbox_t : public i_mailbox // close the file descriptors in the signaller. This is used in a forked // child process to close the file descriptors so that they do not interfere // with the context in the parent process. - void forked () { signaler.forked (); } + void forked () { _signaler.forked (); } #endif private: // The pipe to store actual commands. typedef ypipe_t cpipe_t; - cpipe_t cpipe; + cpipe_t _cpipe; // Signaler to pass signals from writer thread to reader thread. - signaler_t signaler; + signaler_t _signaler; // There's only one thread receiving from the mailbox, but there // is arbitrary number of threads sending. Given that ypipe requires // synchronised access on both of its endpoints, we have to synchronise // the sending side. - mutex_t sync; + mutex_t _sync; // True if the underlying pipe is active, ie. when we are allowed to // read commands from it. - bool active; + bool _active; // Disable copying of mailbox_t object. mailbox_t (const mailbox_t &); diff --git a/src/mailbox_safe.cpp b/src/mailbox_safe.cpp index 1676e26c..189a7716 100644 --- a/src/mailbox_safe.cpp +++ b/src/mailbox_safe.cpp @@ -32,12 +32,12 @@ #include "clock.hpp" #include "err.hpp" -zmq::mailbox_safe_t::mailbox_safe_t (mutex_t *sync_) : sync (sync_) +zmq::mailbox_safe_t::mailbox_safe_t (mutex_t *sync_) : _sync (sync_) { // Get the pipe into passive state. That way, if the users starts by // polling on the associated file descriptor it will get woken up when // new command is posted. - const bool ok = cpipe.check_read (); + const bool ok = _cpipe.check_read (); zmq_assert (!ok); } @@ -47,66 +47,66 @@ zmq::mailbox_safe_t::~mailbox_safe_t () // Work around problem that other threads might still be in our // send() method, by waiting on the mutex before disappearing. - sync->lock (); - sync->unlock (); + _sync->lock (); + _sync->unlock (); } void zmq::mailbox_safe_t::add_signaler (signaler_t *signaler_) { - signalers.push_back (signaler_); + _signalers.push_back (signaler_); } void zmq::mailbox_safe_t::remove_signaler (signaler_t *signaler_) { - std::vector::iterator it = signalers.begin (); + std::vector::iterator it = _signalers.begin (); // TODO: make a copy of array and signal outside the lock - for (; it != signalers.end (); ++it) { + for (; it != _signalers.end (); ++it) { if (*it == signaler_) break; } - if (it != signalers.end ()) - signalers.erase (it); + if (it != _signalers.end ()) + _signalers.erase (it); } void zmq::mailbox_safe_t::clear_signalers () { - signalers.clear (); + _signalers.clear (); } void zmq::mailbox_safe_t::send (const command_t &cmd_) { - sync->lock (); - cpipe.write (cmd_, false); - const bool ok = cpipe.flush (); + _sync->lock (); + _cpipe.write (cmd_, false); + const bool ok = _cpipe.flush (); if (!ok) { - cond_var.broadcast (); - for (std::vector::iterator it = signalers.begin (); - it != signalers.end (); ++it) { + _cond_var.broadcast (); + for (std::vector::iterator it = _signalers.begin (); + it != _signalers.end (); ++it) { (*it)->send (); } } - sync->unlock (); + _sync->unlock (); } int zmq::mailbox_safe_t::recv (command_t *cmd_, int timeout_) { // Try to get the command straight away. - if (cpipe.read (cmd_)) + if (_cpipe.read (cmd_)) return 0; // Wait for signal from the command sender. - int rc = cond_var.wait (sync, timeout_); + int rc = _cond_var.wait (_sync, timeout_); if (rc == -1) { errno_assert (errno == EAGAIN || errno == EINTR); return -1; } // Another thread may already fetch the command - const bool ok = cpipe.read (cmd_); + const bool ok = _cpipe.read (cmd_); if (!ok) { errno = EAGAIN; diff --git a/src/mailbox_safe.hpp b/src/mailbox_safe.hpp index 210b9851..f4f2f75d 100644 --- a/src/mailbox_safe.hpp +++ b/src/mailbox_safe.hpp @@ -71,15 +71,15 @@ class mailbox_safe_t : public i_mailbox private: // The pipe to store actual commands. typedef ypipe_t cpipe_t; - cpipe_t cpipe; + cpipe_t _cpipe; // Condition variable to pass signals from writer thread to reader thread. - condition_variable_t cond_var; + condition_variable_t _cond_var; // Synchronize access to the mailbox from receivers and senders - mutex_t *const sync; + mutex_t *const _sync; - std::vector signalers; + std::vector _signalers; // Disable copying of mailbox_t object. mailbox_safe_t (const mailbox_safe_t &); diff --git a/src/mechanism.cpp b/src/mechanism.cpp index 71aa20a1..aed7d0f8 100644 --- a/src/mechanism.cpp +++ b/src/mechanism.cpp @@ -48,20 +48,20 @@ zmq::mechanism_t::~mechanism_t () void zmq::mechanism_t::set_peer_routing_id (const void *id_ptr_, size_t id_size_) { - routing_id.set (static_cast (id_ptr_), id_size_); + _routing_id.set (static_cast (id_ptr_), id_size_); } void zmq::mechanism_t::peer_routing_id (msg_t *msg_) { - const int rc = msg_->init_size (routing_id.size ()); + const int rc = msg_->init_size (_routing_id.size ()); errno_assert (rc == 0); - memcpy (msg_->data (), routing_id.data (), routing_id.size ()); + memcpy (msg_->data (), _routing_id.data (), _routing_id.size ()); msg_->set_flags (msg_t::routing_id); } void zmq::mechanism_t::set_user_id (const void *data_, size_t size_) { - user_id.set (static_cast (data_), size_); + _user_id.set (static_cast (data_), size_); zap_properties.ZMQ_MAP_INSERT_OR_EMPLACE ( std::string (ZMQ_MSG_PROPERTY_USER_ID), std::string ((char *) data_, size_)); @@ -69,7 +69,7 @@ void zmq::mechanism_t::set_user_id (const void *data_, size_t size_) const zmq::blob_t &zmq::mechanism_t::get_user_id () const { - return user_id; + return _user_id; } const char socket_type_pair[] = "PAIR"; diff --git a/src/mechanism.hpp b/src/mechanism.hpp index 1926027f..b96a9d8e 100644 --- a/src/mechanism.hpp +++ b/src/mechanism.hpp @@ -132,9 +132,9 @@ class mechanism_t const options_t options; private: - blob_t routing_id; + blob_t _routing_id; - blob_t user_id; + blob_t _user_id; // Returns true iff socket associated with the mechanism // is compatible with a given socket type 'type_'. diff --git a/src/metadata.cpp b/src/metadata.cpp index ee40322d..ca3cb86e 100644 --- a/src/metadata.cpp +++ b/src/metadata.cpp @@ -30,14 +30,14 @@ #include "precompiled.hpp" #include "metadata.hpp" -zmq::metadata_t::metadata_t (const dict_t &dict_) : ref_cnt (1), dict (dict_) +zmq::metadata_t::metadata_t (const dict_t &dict_) : _ref_cnt (1), _dict (dict_) { } const char *zmq::metadata_t::get (const std::string &property_) const { - dict_t::const_iterator it = dict.find (property_); - if (it == dict.end ()) { + dict_t::const_iterator it = _dict.find (property_); + if (it == _dict.end ()) { /** \todo remove this when support for the deprecated name "Identity" is dropped */ if (property_ == "Identity") return get (ZMQ_MSG_PROPERTY_ROUTING_ID); @@ -49,10 +49,10 @@ const char *zmq::metadata_t::get (const std::string &property_) const void zmq::metadata_t::add_ref () { - ref_cnt.add (1); + _ref_cnt.add (1); } bool zmq::metadata_t::drop_ref () { - return !ref_cnt.sub (1); + return !_ref_cnt.sub (1); } diff --git a/src/metadata.hpp b/src/metadata.hpp index 3ccb3c8f..21479522 100644 --- a/src/metadata.hpp +++ b/src/metadata.hpp @@ -59,10 +59,10 @@ class metadata_t metadata_t &operator= (const metadata_t &); // Reference counter. - atomic_counter_t ref_cnt; + atomic_counter_t _ref_cnt; // Dictionary holding metadata. - const dict_t dict; + const dict_t _dict; }; } diff --git a/src/msg.cpp b/src/msg.cpp index 9fb8d04a..8e9ae3c9 100644 --- a/src/msg.cpp +++ b/src/msg.cpp @@ -49,7 +49,7 @@ typedef char bool zmq::msg_t::check () const { - return u.base.type >= type_min && u.base.type <= type_max; + return _u.base.type >= type_min && _u.base.type <= type_max; } int zmq::msg_t::init (void *data_, @@ -76,44 +76,44 @@ int zmq::msg_t::init (void *data_, int zmq::msg_t::init () { - u.vsm.metadata = NULL; - u.vsm.type = type_vsm; - u.vsm.flags = 0; - u.vsm.size = 0; - u.vsm.group[0] = '\0'; - u.vsm.routing_id = 0; + _u.vsm.metadata = NULL; + _u.vsm.type = type_vsm; + _u.vsm.flags = 0; + _u.vsm.size = 0; + _u.vsm.group[0] = '\0'; + _u.vsm.routing_id = 0; return 0; } int zmq::msg_t::init_size (size_t size_) { if (size_ <= max_vsm_size) { - u.vsm.metadata = NULL; - u.vsm.type = type_vsm; - u.vsm.flags = 0; - u.vsm.size = static_cast (size_); - u.vsm.group[0] = '\0'; - u.vsm.routing_id = 0; + _u.vsm.metadata = NULL; + _u.vsm.type = type_vsm; + _u.vsm.flags = 0; + _u.vsm.size = static_cast (size_); + _u.vsm.group[0] = '\0'; + _u.vsm.routing_id = 0; } else { - u.lmsg.metadata = NULL; - u.lmsg.type = type_lmsg; - u.lmsg.flags = 0; - u.lmsg.group[0] = '\0'; - u.lmsg.routing_id = 0; - u.lmsg.content = NULL; + _u.lmsg.metadata = NULL; + _u.lmsg.type = type_lmsg; + _u.lmsg.flags = 0; + _u.lmsg.group[0] = '\0'; + _u.lmsg.routing_id = 0; + _u.lmsg.content = NULL; if (sizeof (content_t) + size_ > size_) - u.lmsg.content = + _u.lmsg.content = static_cast (malloc (sizeof (content_t) + size_)); - if (unlikely (!u.lmsg.content)) { + if (unlikely (!_u.lmsg.content)) { errno = ENOMEM; return -1; } - u.lmsg.content->data = u.lmsg.content + 1; - u.lmsg.content->size = size_; - u.lmsg.content->ffn = NULL; - u.lmsg.content->hint = NULL; - new (&u.lmsg.content->refcnt) zmq::atomic_counter_t (); + _u.lmsg.content->data = _u.lmsg.content + 1; + _u.lmsg.content->size = size_; + _u.lmsg.content->ffn = NULL; + _u.lmsg.content->hint = NULL; + new (&_u.lmsg.content->refcnt) zmq::atomic_counter_t (); } return 0; } @@ -127,18 +127,18 @@ int zmq::msg_t::init_external_storage (content_t *content_, zmq_assert (NULL != data_); zmq_assert (NULL != content_); - u.zclmsg.metadata = NULL; - u.zclmsg.type = type_zclmsg; - u.zclmsg.flags = 0; - u.zclmsg.group[0] = '\0'; - u.zclmsg.routing_id = 0; + _u.zclmsg.metadata = NULL; + _u.zclmsg.type = type_zclmsg; + _u.zclmsg.flags = 0; + _u.zclmsg.group[0] = '\0'; + _u.zclmsg.routing_id = 0; - u.zclmsg.content = content_; - u.zclmsg.content->data = data_; - u.zclmsg.content->size = size_; - u.zclmsg.content->ffn = ffn_; - u.zclmsg.content->hint = hint_; - new (&u.zclmsg.content->refcnt) zmq::atomic_counter_t (); + _u.zclmsg.content = content_; + _u.zclmsg.content->data = data_; + _u.zclmsg.content->size = size_; + _u.zclmsg.content->ffn = ffn_; + _u.zclmsg.content->hint = hint_; + new (&_u.zclmsg.content->refcnt) zmq::atomic_counter_t (); return 0; } @@ -154,61 +154,62 @@ int zmq::msg_t::init_data (void *data_, // Initialize constant message if there's no need to deallocate if (ffn_ == NULL) { - u.cmsg.metadata = NULL; - u.cmsg.type = type_cmsg; - u.cmsg.flags = 0; - u.cmsg.data = data_; - u.cmsg.size = size_; - u.cmsg.group[0] = '\0'; - u.cmsg.routing_id = 0; + _u.cmsg.metadata = NULL; + _u.cmsg.type = type_cmsg; + _u.cmsg.flags = 0; + _u.cmsg.data = data_; + _u.cmsg.size = size_; + _u.cmsg.group[0] = '\0'; + _u.cmsg.routing_id = 0; } else { - u.lmsg.metadata = NULL; - u.lmsg.type = type_lmsg; - u.lmsg.flags = 0; - u.lmsg.group[0] = '\0'; - u.lmsg.routing_id = 0; - u.lmsg.content = static_cast (malloc (sizeof (content_t))); - if (!u.lmsg.content) { + _u.lmsg.metadata = NULL; + _u.lmsg.type = type_lmsg; + _u.lmsg.flags = 0; + _u.lmsg.group[0] = '\0'; + _u.lmsg.routing_id = 0; + _u.lmsg.content = + static_cast (malloc (sizeof (content_t))); + if (!_u.lmsg.content) { errno = ENOMEM; return -1; } - u.lmsg.content->data = data_; - u.lmsg.content->size = size_; - u.lmsg.content->ffn = ffn_; - u.lmsg.content->hint = hint_; - new (&u.lmsg.content->refcnt) zmq::atomic_counter_t (); + _u.lmsg.content->data = data_; + _u.lmsg.content->size = size_; + _u.lmsg.content->ffn = ffn_; + _u.lmsg.content->hint = hint_; + new (&_u.lmsg.content->refcnt) zmq::atomic_counter_t (); } return 0; } int zmq::msg_t::init_delimiter () { - u.delimiter.metadata = NULL; - u.delimiter.type = type_delimiter; - u.delimiter.flags = 0; - u.delimiter.group[0] = '\0'; - u.delimiter.routing_id = 0; + _u.delimiter.metadata = NULL; + _u.delimiter.type = type_delimiter; + _u.delimiter.flags = 0; + _u.delimiter.group[0] = '\0'; + _u.delimiter.routing_id = 0; return 0; } int zmq::msg_t::init_join () { - u.base.metadata = NULL; - u.base.type = type_join; - u.base.flags = 0; - u.base.group[0] = '\0'; - u.base.routing_id = 0; + _u.base.metadata = NULL; + _u.base.type = type_join; + _u.base.flags = 0; + _u.base.group[0] = '\0'; + _u.base.routing_id = 0; return 0; } int zmq::msg_t::init_leave () { - u.base.metadata = NULL; - u.base.type = type_leave; - u.base.flags = 0; - u.base.group[0] = '\0'; - u.base.routing_id = 0; + _u.base.metadata = NULL; + _u.base.type = type_leave; + _u.base.flags = 0; + _u.base.group[0] = '\0'; + _u.base.routing_id = 0; return 0; } @@ -220,47 +221,47 @@ int zmq::msg_t::close () return -1; } - if (u.base.type == type_lmsg) { + if (_u.base.type == type_lmsg) { // If the content is not shared, or if it is shared and the reference // count has dropped to zero, deallocate it. - if (!(u.lmsg.flags & msg_t::shared) - || !u.lmsg.content->refcnt.sub (1)) { + if (!(_u.lmsg.flags & msg_t::shared) + || !_u.lmsg.content->refcnt.sub (1)) { // We used "placement new" operator to initialize the reference // counter so we call the destructor explicitly now. - u.lmsg.content->refcnt.~atomic_counter_t (); + _u.lmsg.content->refcnt.~atomic_counter_t (); - if (u.lmsg.content->ffn) - u.lmsg.content->ffn (u.lmsg.content->data, - u.lmsg.content->hint); - free (u.lmsg.content); + if (_u.lmsg.content->ffn) + _u.lmsg.content->ffn (_u.lmsg.content->data, + _u.lmsg.content->hint); + free (_u.lmsg.content); } } if (is_zcmsg ()) { - zmq_assert (u.zclmsg.content->ffn); + zmq_assert (_u.zclmsg.content->ffn); // If the content is not shared, or if it is shared and the reference // count has dropped to zero, deallocate it. - if (!(u.zclmsg.flags & msg_t::shared) - || !u.zclmsg.content->refcnt.sub (1)) { + if (!(_u.zclmsg.flags & msg_t::shared) + || !_u.zclmsg.content->refcnt.sub (1)) { // We used "placement new" operator to initialize the reference // counter so we call the destructor explicitly now. - u.zclmsg.content->refcnt.~atomic_counter_t (); + _u.zclmsg.content->refcnt.~atomic_counter_t (); - u.zclmsg.content->ffn (u.zclmsg.content->data, - u.zclmsg.content->hint); + _u.zclmsg.content->ffn (_u.zclmsg.content->data, + _u.zclmsg.content->hint); } } - if (u.base.metadata != NULL) { - if (u.base.metadata->drop_ref ()) { - LIBZMQ_DELETE (u.base.metadata); + if (_u.base.metadata != NULL) { + if (_u.base.metadata->drop_ref ()) { + LIBZMQ_DELETE (_u.base.metadata); } - u.base.metadata = NULL; + _u.base.metadata = NULL; } // Make the message invalid. - u.base.type = 0; + _u.base.type = 0; return 0; } @@ -298,29 +299,29 @@ int zmq::msg_t::copy (msg_t &src_) if (unlikely (rc < 0)) return rc; - if (src_.u.base.type == type_lmsg) { + if (src_._u.base.type == type_lmsg) { // One reference is added to shared messages. Non-shared messages // are turned into shared messages and reference count is set to 2. - if (src_.u.lmsg.flags & msg_t::shared) - src_.u.lmsg.content->refcnt.add (1); + if (src_._u.lmsg.flags & msg_t::shared) + src_._u.lmsg.content->refcnt.add (1); else { - src_.u.lmsg.flags |= msg_t::shared; - src_.u.lmsg.content->refcnt.set (2); + src_._u.lmsg.flags |= msg_t::shared; + src_._u.lmsg.content->refcnt.set (2); } } if (src_.is_zcmsg ()) { // One reference is added to shared messages. Non-shared messages // are turned into shared messages and reference count is set to 2. - if (src_.u.zclmsg.flags & msg_t::shared) + if (src_._u.zclmsg.flags & msg_t::shared) src_.refcnt ()->add (1); else { - src_.u.zclmsg.flags |= msg_t::shared; + src_._u.zclmsg.flags |= msg_t::shared; src_.refcnt ()->set (2); } } - if (src_.u.base.metadata != NULL) - src_.u.base.metadata->add_ref (); + if (src_._u.base.metadata != NULL) + src_._u.base.metadata->add_ref (); *this = src_; @@ -332,15 +333,15 @@ void *zmq::msg_t::data () // Check the validity of the message. zmq_assert (check ()); - switch (u.base.type) { + switch (_u.base.type) { case type_vsm: - return u.vsm.data; + return _u.vsm.data; case type_lmsg: - return u.lmsg.content->data; + return _u.lmsg.content->data; case type_cmsg: - return u.cmsg.data; + return _u.cmsg.data; case type_zclmsg: - return u.zclmsg.content->data; + return _u.zclmsg.content->data; default: zmq_assert (false); return NULL; @@ -352,15 +353,15 @@ size_t zmq::msg_t::size () const // Check the validity of the message. zmq_assert (check ()); - switch (u.base.type) { + switch (_u.base.type) { case type_vsm: - return u.vsm.size; + return _u.vsm.size; case type_lmsg: - return u.lmsg.content->size; + return _u.lmsg.content->size; case type_zclmsg: - return u.zclmsg.content->size; + return _u.zclmsg.content->size; case type_cmsg: - return u.cmsg.size; + return _u.cmsg.size; default: zmq_assert (false); return 0; @@ -369,80 +370,80 @@ size_t zmq::msg_t::size () const unsigned char zmq::msg_t::flags () const { - return u.base.flags; + return _u.base.flags; } void zmq::msg_t::set_flags (unsigned char flags_) { - u.base.flags |= flags_; + _u.base.flags |= flags_; } void zmq::msg_t::reset_flags (unsigned char flags_) { - u.base.flags &= ~flags_; + _u.base.flags &= ~flags_; } zmq::metadata_t *zmq::msg_t::metadata () const { - return u.base.metadata; + return _u.base.metadata; } void zmq::msg_t::set_metadata (zmq::metadata_t *metadata_) { assert (metadata_ != NULL); - assert (u.base.metadata == NULL); + assert (_u.base.metadata == NULL); metadata_->add_ref (); - u.base.metadata = metadata_; + _u.base.metadata = metadata_; } void zmq::msg_t::reset_metadata () { - if (u.base.metadata) { - if (u.base.metadata->drop_ref ()) { - LIBZMQ_DELETE (u.base.metadata); + if (_u.base.metadata) { + if (_u.base.metadata->drop_ref ()) { + LIBZMQ_DELETE (_u.base.metadata); } - u.base.metadata = NULL; + _u.base.metadata = NULL; } } bool zmq::msg_t::is_routing_id () const { - return (u.base.flags & routing_id) == routing_id; + return (_u.base.flags & routing_id) == routing_id; } bool zmq::msg_t::is_credential () const { - return (u.base.flags & credential) == credential; + return (_u.base.flags & credential) == credential; } bool zmq::msg_t::is_delimiter () const { - return u.base.type == type_delimiter; + return _u.base.type == type_delimiter; } bool zmq::msg_t::is_vsm () const { - return u.base.type == type_vsm; + return _u.base.type == type_vsm; } bool zmq::msg_t::is_cmsg () const { - return u.base.type == type_cmsg; + return _u.base.type == type_cmsg; } bool zmq::msg_t::is_zcmsg () const { - return u.base.type == type_zclmsg; + return _u.base.type == type_zclmsg; } bool zmq::msg_t::is_join () const { - return u.base.type == type_join; + return _u.base.type == type_join; } bool zmq::msg_t::is_leave () const { - return u.base.type == type_leave; + return _u.base.type == type_leave; } void zmq::msg_t::add_refs (int refs_) @@ -450,7 +451,7 @@ void zmq::msg_t::add_refs (int refs_) zmq_assert (refs_ >= 0); // Operation not supported for messages with metadata. - zmq_assert (u.base.metadata == NULL); + zmq_assert (_u.base.metadata == NULL); // No copies required. if (!refs_) @@ -458,12 +459,12 @@ void zmq::msg_t::add_refs (int refs_) // VSMs, CMSGS and delimiters can be copied straight away. The only // message type that needs special care are long messages. - if (u.base.type == type_lmsg || is_zcmsg ()) { - if (u.base.flags & msg_t::shared) + if (_u.base.type == type_lmsg || is_zcmsg ()) { + if (_u.base.flags & msg_t::shared) refcnt ()->add (refs_); else { refcnt ()->set (refs_ + 1); - u.base.flags |= msg_t::shared; + _u.base.flags |= msg_t::shared; } } } @@ -473,37 +474,37 @@ bool zmq::msg_t::rm_refs (int refs_) zmq_assert (refs_ >= 0); // Operation not supported for messages with metadata. - zmq_assert (u.base.metadata == NULL); + zmq_assert (_u.base.metadata == NULL); // No copies required. if (!refs_) return true; // If there's only one reference close the message. - if ((u.base.type != type_zclmsg && u.base.type != type_lmsg) - || !(u.base.flags & msg_t::shared)) { + if ((_u.base.type != type_zclmsg && _u.base.type != type_lmsg) + || !(_u.base.flags & msg_t::shared)) { close (); return false; } // The only message type that needs special care are long and zcopy messages. - if (u.base.type == type_lmsg && !u.lmsg.content->refcnt.sub (refs_)) { + if (_u.base.type == type_lmsg && !_u.lmsg.content->refcnt.sub (refs_)) { // We used "placement new" operator to initialize the reference // counter so we call the destructor explicitly now. - u.lmsg.content->refcnt.~atomic_counter_t (); + _u.lmsg.content->refcnt.~atomic_counter_t (); - if (u.lmsg.content->ffn) - u.lmsg.content->ffn (u.lmsg.content->data, u.lmsg.content->hint); - free (u.lmsg.content); + if (_u.lmsg.content->ffn) + _u.lmsg.content->ffn (_u.lmsg.content->data, _u.lmsg.content->hint); + free (_u.lmsg.content); return false; } - if (is_zcmsg () && !u.zclmsg.content->refcnt.sub (refs_)) { + if (is_zcmsg () && !_u.zclmsg.content->refcnt.sub (refs_)) { // storage for rfcnt is provided externally - if (u.zclmsg.content->ffn) { - u.zclmsg.content->ffn (u.zclmsg.content->data, - u.zclmsg.content->hint); + if (_u.zclmsg.content->ffn) { + _u.zclmsg.content->ffn (_u.zclmsg.content->data, + _u.zclmsg.content->hint); } return false; @@ -514,13 +515,13 @@ bool zmq::msg_t::rm_refs (int refs_) uint32_t zmq::msg_t::get_routing_id () { - return u.base.routing_id; + return _u.base.routing_id; } int zmq::msg_t::set_routing_id (uint32_t routing_id_) { if (routing_id_) { - u.base.routing_id = routing_id_; + _u.base.routing_id = routing_id_; return 0; } errno = EINVAL; @@ -529,13 +530,13 @@ int zmq::msg_t::set_routing_id (uint32_t routing_id_) int zmq::msg_t::reset_routing_id () { - u.base.routing_id = 0; + _u.base.routing_id = 0; return 0; } const char *zmq::msg_t::group () { - return u.base.group; + return _u.base.group; } int zmq::msg_t::set_group (const char *group_) @@ -550,19 +551,19 @@ int zmq::msg_t::set_group (const char *group_, size_t length_) return -1; } - strncpy (u.base.group, group_, length_); - u.base.group[length_] = '\0'; + strncpy (_u.base.group, group_, length_); + _u.base.group[length_] = '\0'; return 0; } zmq::atomic_counter_t *zmq::msg_t::refcnt () { - switch (u.base.type) { + switch (_u.base.type) { case type_lmsg: - return &u.lmsg.content->refcnt; + return &_u.lmsg.content->refcnt; case type_zclmsg: - return &u.zclmsg.content->refcnt; + return &_u.zclmsg.content->refcnt; default: zmq_assert (false); return NULL; diff --git a/src/msg.hpp b/src/msg.hpp index 1ee783bb..2193c819 100644 --- a/src/msg.hpp +++ b/src/msg.hpp @@ -249,7 +249,7 @@ class msg_t char group[16]; uint32_t routing_id; } delimiter; - } u; + } _u; }; inline int close_and_return (zmq::msg_t *msg_, int echo_) diff --git a/src/mutex.hpp b/src/mutex.hpp index 3f591543..3c0e0f03 100644 --- a/src/mutex.hpp +++ b/src/mutex.hpp @@ -43,23 +43,23 @@ namespace zmq class mutex_t { public: - inline mutex_t () { InitializeCriticalSection (&cs); } + inline mutex_t () { InitializeCriticalSection (&_cs); } - inline ~mutex_t () { DeleteCriticalSection (&cs); } + inline ~mutex_t () { DeleteCriticalSection (&_cs); } - inline void lock () { EnterCriticalSection (&cs); } + inline void lock () { EnterCriticalSection (&_cs); } inline bool try_lock () { - return (TryEnterCriticalSection (&cs)) ? true : false; + return (TryEnterCriticalSection (&_cs)) ? true : false; } - inline void unlock () { LeaveCriticalSection (&cs); } + inline void unlock () { LeaveCriticalSection (&_cs); } - inline CRITICAL_SECTION *get_cs () { return &cs; } + inline CRITICAL_SECTION *get_cs () { return &_cs; } private: - CRITICAL_SECTION cs; + CRITICAL_SECTION _cs; // Disable copy construction and assignment. mutex_t (const mutex_t &); @@ -79,26 +79,26 @@ class mutex_t public: inline mutex_t () { - m_semId = + _semId = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE); } - inline ~mutex_t () { semDelete (m_semId); } + inline ~mutex_t () { semDelete (_semId); } - inline void lock () { semTake (m_semId, WAIT_FOREVER); } + inline void lock () { semTake (_semId, WAIT_FOREVER); } inline bool try_lock () { - if (semTake (m_semId, NO_WAIT) == OK) { + if (semTake (_semId, NO_WAIT) == OK) { return true; } return false; } - inline void unlock () { semGive (m_semId); } + inline void unlock () { semGive (_semId); } private: - SEM_ID m_semId; + SEM_ID _semId; // Disable copy construction and assignment. mutex_t (const mutex_t &); @@ -117,34 +117,34 @@ class mutex_t public: inline mutex_t () { - int rc = pthread_mutexattr_init (&attr); + int rc = pthread_mutexattr_init (&_attr); posix_assert (rc); - rc = pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); + rc = pthread_mutexattr_settype (&_attr, PTHREAD_MUTEX_RECURSIVE); posix_assert (rc); - rc = pthread_mutex_init (&mutex, &attr); + rc = pthread_mutex_init (&_mutex, &_attr); posix_assert (rc); } inline ~mutex_t () { - int rc = pthread_mutex_destroy (&mutex); + int rc = pthread_mutex_destroy (&_mutex); posix_assert (rc); - rc = pthread_mutexattr_destroy (&attr); + rc = pthread_mutexattr_destroy (&_attr); posix_assert (rc); } inline void lock () { - int rc = pthread_mutex_lock (&mutex); + int rc = pthread_mutex_lock (&_mutex); posix_assert (rc); } inline bool try_lock () { - int rc = pthread_mutex_trylock (&mutex); + int rc = pthread_mutex_trylock (&_mutex); if (rc == EBUSY) return false; @@ -154,15 +154,15 @@ class mutex_t inline void unlock () { - int rc = pthread_mutex_unlock (&mutex); + int rc = pthread_mutex_unlock (&_mutex); posix_assert (rc); } - inline pthread_mutex_t *get_mutex () { return &mutex; } + inline pthread_mutex_t *get_mutex () { return &_mutex; } private: - pthread_mutex_t mutex; - pthread_mutexattr_t attr; + pthread_mutex_t _mutex; + pthread_mutexattr_t _attr; // Disable copy construction and assignment. mutex_t (const mutex_t &); @@ -177,12 +177,12 @@ namespace zmq { struct scoped_lock_t { - scoped_lock_t (mutex_t &mutex_) : mutex (mutex_) { mutex.lock (); } + scoped_lock_t (mutex_t &mutex_) : _mutex (mutex_) { _mutex.lock (); } - ~scoped_lock_t () { mutex.unlock (); } + ~scoped_lock_t () { _mutex.unlock (); } private: - mutex_t &mutex; + mutex_t &_mutex; // Disable copy construction and assignment. scoped_lock_t (const scoped_lock_t &); @@ -192,20 +192,20 @@ struct scoped_lock_t struct scoped_optional_lock_t { - scoped_optional_lock_t (mutex_t *mutex_) : mutex (mutex_) + scoped_optional_lock_t (mutex_t *mutex_) : _mutex (mutex_) { - if (mutex != NULL) - mutex->lock (); + if (_mutex != NULL) + _mutex->lock (); } ~scoped_optional_lock_t () { - if (mutex != NULL) - mutex->unlock (); + if (_mutex != NULL) + _mutex->unlock (); } private: - mutex_t *mutex; + mutex_t *_mutex; // Disable copy construction and assignment. scoped_optional_lock_t (const scoped_lock_t &); diff --git a/src/null_mechanism.cpp b/src/null_mechanism.cpp index 5e064d86..26a41f73 100644 --- a/src/null_mechanism.cpp +++ b/src/null_mechanism.cpp @@ -43,12 +43,12 @@ zmq::null_mechanism_t::null_mechanism_t (session_base_t *session_, const options_t &options_) : mechanism_base_t (session_, options_), zap_client_t (session_, peer_address_, options_), - ready_command_sent (false), - error_command_sent (false), - ready_command_received (false), - error_command_received (false), - zap_request_sent (false), - zap_reply_received (false) + _ready_command_sent (false), + _error_command_sent (false), + _ready_command_received (false), + _error_command_received (false), + _zap_request_sent (false), + _zap_reply_received (false) { } @@ -58,13 +58,13 @@ zmq::null_mechanism_t::~null_mechanism_t () int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_) { - if (ready_command_sent || error_command_sent) { + if (_ready_command_sent || _error_command_sent) { errno = EAGAIN; return -1; } - if (zap_required () && !zap_reply_received) { - if (zap_request_sent) { + if (zap_required () && !_zap_reply_received) { + if (_zap_request_sent) { errno = EAGAIN; return -1; } @@ -78,7 +78,7 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_) } if (rc == 0) { send_zap_request (); - zap_request_sent = true; + _zap_request_sent = true; // TODO actually, it is quite unlikely that we can read the ZAP // reply already, but removing this has some strange side-effect @@ -88,12 +88,12 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_) if (rc != 0) return -1; - zap_reply_received = true; + _zap_reply_received = true; } } - if (zap_reply_received && status_code != "200") { - error_command_sent = true; + if (_zap_reply_received && status_code != "200") { + _error_command_sent = true; if (status_code != "300") { const size_t status_code_len = 3; const int rc = msg_->init_size (6 + 1 + status_code_len); @@ -111,14 +111,14 @@ int zmq::null_mechanism_t::next_handshake_command (msg_t *msg_) make_command_with_basic_properties (msg_, "\5READY", 6); - ready_command_sent = true; + _ready_command_sent = true; return 0; } int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_) { - if (ready_command_received || error_command_received) { + if (_ready_command_received || _error_command_received) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND); errno = EPROTO; @@ -153,7 +153,7 @@ int zmq::null_mechanism_t::process_handshake_command (msg_t *msg_) int zmq::null_mechanism_t::process_ready_command ( const unsigned char *cmd_data_, size_t data_size_) { - ready_command_received = true; + _ready_command_received = true; return parse_metadata (cmd_data_ + 6, data_size_ - 6); } @@ -179,29 +179,29 @@ int zmq::null_mechanism_t::process_error_command ( } const char *error_reason = reinterpret_cast (cmd_data_) + 7; handle_error_reason (error_reason, error_reason_len); - error_command_received = true; + _error_command_received = true; return 0; } int zmq::null_mechanism_t::zap_msg_available () { - if (zap_reply_received) { + if (_zap_reply_received) { errno = EFSM; return -1; } const int rc = receive_and_process_zap_reply (); if (rc == 0) - zap_reply_received = true; + _zap_reply_received = true; return rc == -1 ? -1 : 0; } zmq::mechanism_t::status_t zmq::null_mechanism_t::status () const { - const bool command_sent = ready_command_sent || error_command_sent; + const bool command_sent = _ready_command_sent || _error_command_sent; const bool command_received = - ready_command_received || error_command_received; + _ready_command_received || _error_command_received; - if (ready_command_sent && ready_command_received) + if (_ready_command_sent && _ready_command_received) return mechanism_t::ready; if (command_sent && command_received) return error; diff --git a/src/null_mechanism.hpp b/src/null_mechanism.hpp index 046b5057..b3f32c55 100644 --- a/src/null_mechanism.hpp +++ b/src/null_mechanism.hpp @@ -54,12 +54,12 @@ class null_mechanism_t : public zap_client_t virtual status_t status () const; private: - bool ready_command_sent; - bool error_command_sent; - bool ready_command_received; - bool error_command_received; - bool zap_request_sent; - bool zap_reply_received; + bool _ready_command_sent; + bool _error_command_sent; + bool _ready_command_received; + bool _error_command_received; + bool _zap_request_sent; + bool _zap_reply_received; int process_ready_command (const unsigned char *cmd_data_, size_t data_size_); diff --git a/src/object.cpp b/src/object.cpp index a10a510e..eb264427 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -39,13 +39,13 @@ #include "session_base.hpp" #include "socket_base.hpp" -zmq::object_t::object_t (ctx_t *ctx_, uint32_t tid_) : ctx (ctx_), tid (tid_) +zmq::object_t::object_t (ctx_t *ctx_, uint32_t tid_) : _ctx (ctx_), _tid (tid_) { } zmq::object_t::object_t (object_t *parent_) : - ctx (parent_->ctx), - tid (parent_->tid) + _ctx (parent_->_ctx), + _tid (parent_->_tid) { } @@ -55,17 +55,17 @@ zmq::object_t::~object_t () uint32_t zmq::object_t::get_tid () { - return tid; + return _tid; } void zmq::object_t::set_tid (uint32_t id_) { - tid = id_; + _tid = id_; } zmq::ctx_t *zmq::object_t::get_ctx () { - return ctx; + return _ctx; } void zmq::object_t::process_command (command_t &cmd_) @@ -157,46 +157,46 @@ void zmq::object_t::process_command (command_t &cmd_) int zmq::object_t::register_endpoint (const char *addr_, const endpoint_t &endpoint_) { - return ctx->register_endpoint (addr_, endpoint_); + return _ctx->register_endpoint (addr_, endpoint_); } int zmq::object_t::unregister_endpoint (const std::string &addr_, socket_base_t *socket_) { - return ctx->unregister_endpoint (addr_, socket_); + return _ctx->unregister_endpoint (addr_, socket_); } void zmq::object_t::unregister_endpoints (socket_base_t *socket_) { - return ctx->unregister_endpoints (socket_); + return _ctx->unregister_endpoints (socket_); } zmq::endpoint_t zmq::object_t::find_endpoint (const char *addr_) { - return ctx->find_endpoint (addr_); + return _ctx->find_endpoint (addr_); } void zmq::object_t::pend_connection (const std::string &addr_, const endpoint_t &endpoint_, pipe_t **pipes_) { - ctx->pend_connection (addr_, endpoint_, pipes_); + _ctx->pend_connection (addr_, endpoint_, pipes_); } void zmq::object_t::connect_pending (const char *addr_, zmq::socket_base_t *bind_socket_) { - return ctx->connect_pending (addr_, bind_socket_); + return _ctx->connect_pending (addr_, bind_socket_); } void zmq::object_t::destroy_socket (socket_base_t *socket_) { - ctx->destroy_socket (socket_); + _ctx->destroy_socket (socket_); } zmq::io_thread_t *zmq::object_t::choose_io_thread (uint64_t affinity_) { - return ctx->choose_io_thread (affinity_); + return _ctx->choose_io_thread (affinity_); } void zmq::object_t::send_stop () @@ -206,7 +206,7 @@ void zmq::object_t::send_stop () command_t cmd; cmd.destination = this; cmd.type = command_t::stop; - ctx->send_command (tid, cmd); + _ctx->send_command (_tid, cmd); } void zmq::object_t::send_plug (own_t *destination_, bool inc_seqnum_) @@ -352,7 +352,7 @@ void zmq::object_t::send_term_endpoint (own_t *destination_, void zmq::object_t::send_reap (class socket_base_t *socket_) { command_t cmd; - cmd.destination = ctx->get_reaper (); + cmd.destination = _ctx->get_reaper (); cmd.type = command_t::reap; cmd.args.reap.socket = socket_; send_command (cmd); @@ -361,7 +361,7 @@ void zmq::object_t::send_reap (class socket_base_t *socket_) void zmq::object_t::send_reaped () { command_t cmd; - cmd.destination = ctx->get_reaper (); + cmd.destination = _ctx->get_reaper (); cmd.type = command_t::reaped; send_command (cmd); } @@ -379,7 +379,7 @@ void zmq::object_t::send_done () command_t cmd; cmd.destination = NULL; cmd.type = command_t::done; - ctx->send_command (ctx_t::term_tid, cmd); + _ctx->send_command (ctx_t::term_tid, cmd); } void zmq::object_t::process_stop () @@ -474,5 +474,5 @@ void zmq::object_t::process_seqnum () void zmq::object_t::send_command (command_t &cmd_) { - ctx->send_command (cmd_.destination->get_tid (), cmd_); + _ctx->send_command (cmd_.destination->get_tid (), cmd_); } diff --git a/src/object.hpp b/src/object.hpp index 59501db3..10d7bffe 100644 --- a/src/object.hpp +++ b/src/object.hpp @@ -134,10 +134,10 @@ class object_t private: // Context provides access to the global state. - zmq::ctx_t *const ctx; + zmq::ctx_t *const _ctx; // Thread ID of the thread the object belongs to. - uint32_t tid; + uint32_t _tid; void send_command (command_t &cmd_); diff --git a/src/own.cpp b/src/own.cpp index 82f41872..9874a41e 100644 --- a/src/own.cpp +++ b/src/own.cpp @@ -34,22 +34,22 @@ zmq::own_t::own_t (class ctx_t *parent_, uint32_t tid_) : object_t (parent_, tid_), - terminating (false), - sent_seqnum (0), - processed_seqnum (0), - owner (NULL), - term_acks (0) + _terminating (false), + _sent_seqnum (0), + _processed_seqnum (0), + _owner (NULL), + _term_acks (0) { } zmq::own_t::own_t (io_thread_t *io_thread_, const options_t &options_) : object_t (io_thread_), options (options_), - terminating (false), - sent_seqnum (0), - processed_seqnum (0), - owner (NULL), - term_acks (0) + _terminating (false), + _sent_seqnum (0), + _processed_seqnum (0), + _owner (NULL), + _term_acks (0) { } @@ -59,20 +59,20 @@ zmq::own_t::~own_t () void zmq::own_t::set_owner (own_t *owner_) { - zmq_assert (!owner); - owner = owner_; + zmq_assert (!_owner); + _owner = owner_; } void zmq::own_t::inc_seqnum () { // This function may be called from a different thread! - sent_seqnum.add (1); + _sent_seqnum.add (1); } void zmq::own_t::process_seqnum () { // Catch up with counter of processed commands. - processed_seqnum++; + _processed_seqnum++; // We may have catched up and still have pending terms acks. check_term_acks (); @@ -99,18 +99,18 @@ void zmq::own_t::process_term_req (own_t *object_) { // When shutting down we can ignore termination requests from owned // objects. The termination request was already sent to the object. - if (terminating) + if (_terminating) return; // If I/O object is well and alive let's ask it to terminate. - owned_t::iterator it = std::find (owned.begin (), owned.end (), object_); + owned_t::iterator it = std::find (_owned.begin (), _owned.end (), object_); // If not found, we assume that termination request was already sent to // the object so we can safely ignore the request. - if (it == owned.end ()) + if (it == _owned.end ()) return; - owned.erase (it); + _owned.erase (it); register_term_acks (1); // Note that this object is the root of the (partial shutdown) thus, its @@ -122,65 +122,65 @@ void zmq::own_t::process_own (own_t *object_) { // If the object is already being shut down, new owned objects are // immediately asked to terminate. Note that linger is set to zero. - if (terminating) { + if (_terminating) { register_term_acks (1); send_term (object_, 0); return; } // Store the reference to the owned object. - owned.insert (object_); + _owned.insert (object_); } void zmq::own_t::terminate () { // If termination is already underway, there's no point // in starting it anew. - if (terminating) + if (_terminating) return; // As for the root of the ownership tree, there's no one to terminate it, // so it has to terminate itself. - if (!owner) { + if (!_owner) { process_term (options.linger.load ()); return; } // If I am an owned object, I'll ask my owner to terminate me. - send_term_req (owner, this); + send_term_req (_owner, this); } bool zmq::own_t::is_terminating () { - return terminating; + return _terminating; } void zmq::own_t::process_term (int linger_) { // Double termination should never happen. - zmq_assert (!terminating); + zmq_assert (!_terminating); // Send termination request to all owned objects. - for (owned_t::iterator it = owned.begin (); it != owned.end (); ++it) + for (owned_t::iterator it = _owned.begin (); it != _owned.end (); ++it) send_term (*it, linger_); - register_term_acks (static_cast (owned.size ())); - owned.clear (); + register_term_acks (static_cast (_owned.size ())); + _owned.clear (); // Start termination process and check whether by chance we cannot // terminate immediately. - terminating = true; + _terminating = true; check_term_acks (); } void zmq::own_t::register_term_acks (int count_) { - term_acks += count_; + _term_acks += count_; } void zmq::own_t::unregister_term_ack () { - zmq_assert (term_acks > 0); - term_acks--; + zmq_assert (_term_acks > 0); + _term_acks--; // This may be a last ack we are waiting for before termination... check_term_acks (); @@ -193,15 +193,15 @@ void zmq::own_t::process_term_ack () void zmq::own_t::check_term_acks () { - if (terminating && processed_seqnum == sent_seqnum.get () - && term_acks == 0) { + if (_terminating && _processed_seqnum == _sent_seqnum.get () + && _term_acks == 0) { // Sanity check. There should be no active children at this point. - zmq_assert (owned.empty ()); + zmq_assert (_owned.empty ()); // The root object has nobody to confirm the termination to. // Other nodes will confirm the termination to the owner. - if (owner) - send_term_ack (owner); + if (_owner) + send_term_ack (_owner); // Deallocate the resources. process_destroy (); diff --git a/src/own.hpp b/src/own.hpp index 82bcf297..b82af2af 100644 --- a/src/own.hpp +++ b/src/own.hpp @@ -120,25 +120,25 @@ class own_t : public object_t // True if termination was already initiated. If so, we can destroy // the object if there are no more child objects or pending term acks. - bool terminating; + bool _terminating; // Sequence number of the last command sent to this object. - atomic_counter_t sent_seqnum; + atomic_counter_t _sent_seqnum; // Sequence number of the last command processed by this object. - uint64_t processed_seqnum; + uint64_t _processed_seqnum; // Socket owning this object. It's responsible for shutting down // this object. - own_t *owner; + own_t *_owner; // List of all objects owned by this socket. We are responsible // for deallocating them before we quit. typedef std::set owned_t; - owned_t owned; + owned_t _owned; // Number of events we have to get before we can destroy the object. - int term_acks; + int _term_acks; own_t (const own_t &); const own_t &operator= (const own_t &); diff --git a/src/pair.cpp b/src/pair.cpp index cb60c858..25d4c235 100644 --- a/src/pair.cpp +++ b/src/pair.cpp @@ -36,15 +36,15 @@ zmq::pair_t::pair_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - pipe (NULL), - last_in (NULL) + _pipe (NULL), + _last_in (NULL) { options.type = ZMQ_PAIR; } zmq::pair_t::~pair_t () { - zmq_assert (!pipe); + zmq_assert (!_pipe); } void zmq::pair_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) @@ -55,20 +55,20 @@ void zmq::pair_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) // ZMQ_PAIR socket can only be connected to a single peer. // The socket rejects any further connection requests. - if (pipe == NULL) - pipe = pipe_; + if (_pipe == NULL) + _pipe = pipe_; else pipe_->terminate (false); } void zmq::pair_t::xpipe_terminated (pipe_t *pipe_) { - if (pipe_ == pipe) { - if (last_in == pipe) { - saved_credential.set_deep_copy (last_in->get_credential ()); - last_in = NULL; + if (pipe_ == _pipe) { + if (_last_in == _pipe) { + _saved_credential.set_deep_copy (_last_in->get_credential ()); + _last_in = NULL; } - pipe = NULL; + _pipe = NULL; } } @@ -86,13 +86,13 @@ void zmq::pair_t::xwrite_activated (pipe_t *) int zmq::pair_t::xsend (msg_t *msg_) { - if (!pipe || !pipe->write (msg_)) { + if (!_pipe || !_pipe->write (msg_)) { errno = EAGAIN; return -1; } if (!(msg_->flags () & msg_t::more)) - pipe->flush (); + _pipe->flush (); // Detach the original message from the data buffer. int rc = msg_->init (); @@ -107,7 +107,7 @@ int zmq::pair_t::xrecv (msg_t *msg_) int rc = msg_->close (); errno_assert (rc == 0); - if (!pipe || !pipe->read (msg_)) { + if (!_pipe || !_pipe->read (msg_)) { // Initialise the output parameter to be a 0-byte message. rc = msg_->init (); errno_assert (rc == 0); @@ -115,27 +115,27 @@ int zmq::pair_t::xrecv (msg_t *msg_) errno = EAGAIN; return -1; } - last_in = pipe; + _last_in = _pipe; return 0; } bool zmq::pair_t::xhas_in () { - if (!pipe) + if (!_pipe) return false; - return pipe->check_read (); + return _pipe->check_read (); } bool zmq::pair_t::xhas_out () { - if (!pipe) + if (!_pipe) return false; - return pipe->check_write (); + return _pipe->check_write (); } const zmq::blob_t &zmq::pair_t::get_credential () const { - return last_in ? last_in->get_credential () : saved_credential; + return _last_in ? _last_in->get_credential () : _saved_credential; } diff --git a/src/pair.hpp b/src/pair.hpp index e641d7c4..63625b3b 100644 --- a/src/pair.hpp +++ b/src/pair.hpp @@ -59,11 +59,11 @@ class pair_t : public socket_base_t void xpipe_terminated (zmq::pipe_t *pipe_); private: - zmq::pipe_t *pipe; + zmq::pipe_t *_pipe; - zmq::pipe_t *last_in; + zmq::pipe_t *_last_in; - blob_t saved_credential; + blob_t _saved_credential; pair_t (const pair_t &); const pair_t &operator= (const pair_t &); diff --git a/src/pipe.cpp b/src/pipe.cpp index 4e062e7a..c37cf903 100644 --- a/src/pipe.cpp +++ b/src/pipe.cpp @@ -83,23 +83,23 @@ zmq::pipe_t::pipe_t (object_t *parent_, int outhwm_, bool conflate_) : object_t (parent_), - inpipe (inpipe_), - outpipe (outpipe_), - in_active (true), - out_active (true), - hwm (outhwm_), - lwm (compute_lwm (inhwm_)), - inhwmboost (-1), - outhwmboost (-1), - msgs_read (0), - msgs_written (0), - peers_msgs_read (0), - peer (NULL), - sink (NULL), - state (active), - delay (true), - server_socket_routing_id (0), - conflate (conflate_) + _in_pipe (inpipe_), + _out_pipe (outpipe_), + _in_active (true), + _out_active (true), + _hwm (outhwm_), + _lwm (compute_lwm (inhwm_)), + _in_hwm_boost (-1), + _out_hwm_boost (-1), + _msgs_read (0), + _msgs_written (0), + _peers_msgs_read (0), + _peer (NULL), + _sink (NULL), + _state (active), + _delay (true), + _server_socket_routing_id (0), + _conflate (conflate_) { } @@ -110,62 +110,62 @@ zmq::pipe_t::~pipe_t () void zmq::pipe_t::set_peer (pipe_t *peer_) { // Peer can be set once only. - zmq_assert (!peer); - peer = peer_; + zmq_assert (!_peer); + _peer = peer_; } void zmq::pipe_t::set_event_sink (i_pipe_events *sink_) { // Sink can be set once only. - zmq_assert (!sink); - sink = sink_; + zmq_assert (!_sink); + _sink = sink_; } void zmq::pipe_t::set_server_socket_routing_id ( uint32_t server_socket_routing_id_) { - server_socket_routing_id = server_socket_routing_id_; + _server_socket_routing_id = server_socket_routing_id_; } uint32_t zmq::pipe_t::get_server_socket_routing_id () { - return server_socket_routing_id; + return _server_socket_routing_id; } void zmq::pipe_t::set_router_socket_routing_id ( const blob_t &router_socket_routing_id_) { - router_socket_routing_id.set_deep_copy (router_socket_routing_id_); + _router_socket_routing_id.set_deep_copy (router_socket_routing_id_); } const zmq::blob_t &zmq::pipe_t::get_routing_id () { - return router_socket_routing_id; + return _router_socket_routing_id; } const zmq::blob_t &zmq::pipe_t::get_credential () const { - return credential; + return _credential; } bool zmq::pipe_t::check_read () { - if (unlikely (!in_active)) + if (unlikely (!_in_active)) return false; - if (unlikely (state != active && state != waiting_for_delimiter)) + if (unlikely (_state != active && _state != waiting_for_delimiter)) return false; // Check if there's an item in the pipe. - if (!inpipe->check_read ()) { - in_active = false; + if (!_in_pipe->check_read ()) { + _in_active = false; return false; } // If the next item in the pipe is message delimiter, // initiate termination process. - if (inpipe->probe (is_delimiter)) { + if (_in_pipe->probe (is_delimiter)) { msg_t msg; - bool ok = inpipe->read (&msg); + bool ok = _in_pipe->read (&msg); zmq_assert (ok); process_delimiter (); return false; @@ -176,14 +176,14 @@ bool zmq::pipe_t::check_read () bool zmq::pipe_t::read (msg_t *msg_) { - if (unlikely (!in_active)) + if (unlikely (!_in_active)) return false; - if (unlikely (state != active && state != waiting_for_delimiter)) + if (unlikely (_state != active && _state != waiting_for_delimiter)) return false; for (bool payload_read = false; !payload_read;) { - if (!inpipe->read (msg_)) { - in_active = false; + if (!_in_pipe->read (msg_)) { + _in_active = false; return false; } @@ -191,7 +191,7 @@ bool zmq::pipe_t::read (msg_t *msg_) if (unlikely (msg_->is_credential ())) { const unsigned char *data = static_cast (msg_->data ()); - credential.set (data, msg_->size ()); + _credential.set (data, msg_->size ()); const int rc = msg_->close (); zmq_assert (rc == 0); } else @@ -205,23 +205,23 @@ bool zmq::pipe_t::read (msg_t *msg_) } if (!(msg_->flags () & msg_t::more) && !msg_->is_routing_id ()) - msgs_read++; + _msgs_read++; - if (lwm > 0 && msgs_read % lwm == 0) - send_activate_write (peer, msgs_read); + if (_lwm > 0 && _msgs_read % _lwm == 0) + send_activate_write (_peer, _msgs_read); return true; } bool zmq::pipe_t::check_write () { - if (unlikely (!out_active || state != active)) + if (unlikely (!_out_active || _state != active)) return false; bool full = !check_hwm (); if (unlikely (full)) { - out_active = false; + _out_active = false; return false; } @@ -235,9 +235,9 @@ bool zmq::pipe_t::write (msg_t *msg_) bool more = (msg_->flags () & msg_t::more) != 0; const bool is_routing_id = msg_->is_routing_id (); - outpipe->write (*msg_, more); + _out_pipe->write (*msg_, more); if (!more && !is_routing_id) - msgs_written++; + _msgs_written++; return true; } @@ -246,8 +246,8 @@ void zmq::pipe_t::rollback () { // Remove incomplete message from the outbound pipe. msg_t msg; - if (outpipe) { - while (outpipe->unwrite (&msg)) { + if (_out_pipe) { + while (_out_pipe->unwrite (&msg)) { zmq_assert (msg.flags () & msg_t::more); int rc = msg.close (); errno_assert (rc == 0); @@ -258,29 +258,29 @@ void zmq::pipe_t::rollback () void zmq::pipe_t::flush () { // The peer does not exist anymore at this point. - if (state == term_ack_sent) + if (_state == term_ack_sent) return; - if (outpipe && !outpipe->flush ()) - send_activate_read (peer); + if (_out_pipe && !_out_pipe->flush ()) + send_activate_read (_peer); } void zmq::pipe_t::process_activate_read () { - if (!in_active && (state == active || state == waiting_for_delimiter)) { - in_active = true; - sink->read_activated (this); + if (!_in_active && (_state == active || _state == waiting_for_delimiter)) { + _in_active = true; + _sink->read_activated (this); } } void zmq::pipe_t::process_activate_write (uint64_t msgs_read_) { // Remember the peer's message sequence number. - peers_msgs_read = msgs_read_; + _peers_msgs_read = msgs_read_; - if (!out_active && state == active) { - out_active = true; - sink->write_activated (this); + if (!_out_active && _state == active) { + _out_active = true; + _sink->write_activated (this); } } @@ -288,80 +288,80 @@ void zmq::pipe_t::process_hiccup (void *pipe_) { // Destroy old outpipe. Note that the read end of the pipe was already // migrated to this thread. - zmq_assert (outpipe); - outpipe->flush (); + zmq_assert (_out_pipe); + _out_pipe->flush (); msg_t msg; - while (outpipe->read (&msg)) { + while (_out_pipe->read (&msg)) { if (!(msg.flags () & msg_t::more)) - msgs_written--; + _msgs_written--; int rc = msg.close (); errno_assert (rc == 0); } - LIBZMQ_DELETE (outpipe); + LIBZMQ_DELETE (_out_pipe); // Plug in the new outpipe. zmq_assert (pipe_); - outpipe = static_cast (pipe_); - out_active = true; + _out_pipe = static_cast (pipe_); + _out_active = true; // If appropriate, notify the user about the hiccup. - if (state == active) - sink->hiccuped (this); + if (_state == active) + _sink->hiccuped (this); } void zmq::pipe_t::process_pipe_term () { - zmq_assert (state == active || state == delimiter_received - || state == term_req_sent1); + zmq_assert (_state == active || _state == delimiter_received + || _state == term_req_sent1); // This is the simple case of peer-induced termination. If there are no // more pending messages to read, or if the pipe was configured to drop // pending messages, we can move directly to the term_ack_sent state. // Otherwise we'll hang up in waiting_for_delimiter state till all // pending messages are read. - if (state == active) { - if (delay) - state = waiting_for_delimiter; + if (_state == active) { + if (_delay) + _state = waiting_for_delimiter; else { - state = term_ack_sent; - outpipe = NULL; - send_pipe_term_ack (peer); + _state = term_ack_sent; + _out_pipe = NULL; + send_pipe_term_ack (_peer); } } // Delimiter happened to arrive before the term command. Now we have the // term command as well, so we can move straight to term_ack_sent state. - else if (state == delimiter_received) { - state = term_ack_sent; - outpipe = NULL; - send_pipe_term_ack (peer); + else if (_state == delimiter_received) { + _state = term_ack_sent; + _out_pipe = NULL; + send_pipe_term_ack (_peer); } // This is the case where both ends of the pipe are closed in parallel. // We simply reply to the request by ack and continue waiting for our // own ack. - else if (state == term_req_sent1) { - state = term_req_sent2; - outpipe = NULL; - send_pipe_term_ack (peer); + else if (_state == term_req_sent1) { + _state = term_req_sent2; + _out_pipe = NULL; + send_pipe_term_ack (_peer); } } void zmq::pipe_t::process_pipe_term_ack () { // Notify the user that all the references to the pipe should be dropped. - zmq_assert (sink); - sink->pipe_terminated (this); + zmq_assert (_sink); + _sink->pipe_terminated (this); // In term_ack_sent and term_req_sent2 states there's nothing to do. // Simply deallocate the pipe. In term_req_sent1 state we have to ack // the peer before deallocating this side of the pipe. // All the other states are invalid. - if (state == term_req_sent1) { - outpipe = NULL; - send_pipe_term_ack (peer); + if (_state == term_req_sent1) { + _out_pipe = NULL; + send_pipe_term_ack (_peer); } else - zmq_assert (state == term_ack_sent || state == term_req_sent2); + zmq_assert (_state == term_ack_sent || _state == term_req_sent2); // We'll deallocate the inbound pipe, the peer will deallocate the outbound // pipe (which is an inbound pipe from its point of view). @@ -369,15 +369,15 @@ void zmq::pipe_t::process_pipe_term_ack () // hand because msg_t doesn't have automatic destructor. Then deallocate // the ypipe itself. - if (!conflate) { + if (!_conflate) { msg_t msg; - while (inpipe->read (&msg)) { + while (_in_pipe->read (&msg)) { int rc = msg.close (); errno_assert (rc == 0); } } - LIBZMQ_DELETE (inpipe); + LIBZMQ_DELETE (_in_pipe); // Deallocate the pipe object delete this; @@ -390,47 +390,47 @@ void zmq::pipe_t::process_pipe_hwm (int inhwm_, int outhwm_) void zmq::pipe_t::set_nodelay () { - this->delay = false; + this->_delay = false; } void zmq::pipe_t::terminate (bool delay_) { // Overload the value specified at pipe creation. - delay = delay_; + _delay = delay_; // If terminate was already called, we can ignore the duplicate invocation. - if (state == term_req_sent1 || state == term_req_sent2) { + if (_state == term_req_sent1 || _state == term_req_sent2) { return; } // If the pipe is in the final phase of async termination, it's going to // closed anyway. No need to do anything special here. - if (state == term_ack_sent) { + if (_state == term_ack_sent) { return; } // The simple sync termination case. Ask the peer to terminate and wait // for the ack. - else if (state == active) { - send_pipe_term (peer); - state = term_req_sent1; + else if (_state == active) { + send_pipe_term (_peer); + _state = term_req_sent1; } // There are still pending messages available, but the user calls // 'terminate'. We can act as if all the pending messages were read. - else if (state == waiting_for_delimiter && !delay) { + else if (_state == waiting_for_delimiter && !_delay) { // Drop any unfinished outbound messages. rollback (); - outpipe = NULL; - send_pipe_term_ack (peer); - state = term_ack_sent; + _out_pipe = NULL; + send_pipe_term_ack (_peer); + _state = term_ack_sent; } // If there are pending messages still available, do nothing. - else if (state == waiting_for_delimiter) { + else if (_state == waiting_for_delimiter) { } // We've already got delimiter, but not term command yet. We can ignore // the delimiter and ack synchronously terminate as if we were in // active state. - else if (state == delimiter_received) { - send_pipe_term (peer); - state = term_req_sent1; + else if (_state == delimiter_received) { + send_pipe_term (_peer); + _state = term_req_sent1; } // There are no other states. else { @@ -438,9 +438,9 @@ void zmq::pipe_t::terminate (bool delay_) } // Stop outbound flow of messages. - out_active = false; + _out_active = false; - if (outpipe) { + if (_out_pipe) { // Drop any unfinished outbound messages. rollback (); @@ -448,7 +448,7 @@ void zmq::pipe_t::terminate (bool delay_) // checked; thus the delimiter can be written even when the pipe is full. msg_t msg; msg.init_delimiter (); - outpipe->write (msg, false); + _out_pipe->write (msg, false); flush (); } } @@ -483,69 +483,70 @@ int zmq::pipe_t::compute_lwm (int hwm_) void zmq::pipe_t::process_delimiter () { - zmq_assert (state == active || state == waiting_for_delimiter); + zmq_assert (_state == active || _state == waiting_for_delimiter); - if (state == active) - state = delimiter_received; + if (_state == active) + _state = delimiter_received; else { - outpipe = NULL; - send_pipe_term_ack (peer); - state = term_ack_sent; + _out_pipe = NULL; + send_pipe_term_ack (_peer); + _state = term_ack_sent; } } void zmq::pipe_t::hiccup () { // If termination is already under way do nothing. - if (state != active) + if (_state != active) return; // We'll drop the pointer to the inpipe. From now on, the peer is // responsible for deallocating it. - inpipe = NULL; + _in_pipe = NULL; // Create new inpipe. - if (conflate) - inpipe = new (std::nothrow) ypipe_conflate_t (); + if (_conflate) + _in_pipe = new (std::nothrow) ypipe_conflate_t (); else - inpipe = new (std::nothrow) ypipe_t (); + _in_pipe = + new (std::nothrow) ypipe_t (); - alloc_assert (inpipe); - in_active = true; + alloc_assert (_in_pipe); + _in_active = true; // Notify the peer about the hiccup. - send_hiccup (peer, (void *) inpipe); + send_hiccup (_peer, (void *) _in_pipe); } void zmq::pipe_t::set_hwms (int inhwm_, int outhwm_) { - int in = inhwm_ + (inhwmboost > 0 ? inhwmboost : 0); - int out = outhwm_ + (outhwmboost > 0 ? outhwmboost : 0); + int in = inhwm_ + (_in_hwm_boost > 0 ? _in_hwm_boost : 0); + int out = outhwm_ + (_out_hwm_boost > 0 ? _out_hwm_boost : 0); // if either send or recv side has hwm <= 0 it means infinite so we should set hwms infinite - if (inhwm_ <= 0 || inhwmboost == 0) + if (inhwm_ <= 0 || _in_hwm_boost == 0) in = 0; - if (outhwm_ <= 0 || outhwmboost == 0) + if (outhwm_ <= 0 || _out_hwm_boost == 0) out = 0; - lwm = compute_lwm (in); - hwm = out; + _lwm = compute_lwm (in); + _hwm = out; } void zmq::pipe_t::set_hwms_boost (int inhwmboost_, int outhwmboost_) { - inhwmboost = inhwmboost_; - outhwmboost = outhwmboost_; + _in_hwm_boost = inhwmboost_; + _out_hwm_boost = outhwmboost_; } bool zmq::pipe_t::check_hwm () const { - bool full = hwm > 0 && msgs_written - peers_msgs_read >= uint64_t (hwm); + bool full = _hwm > 0 && _msgs_written - _peers_msgs_read >= uint64_t (_hwm); return (!full); } void zmq::pipe_t::send_hwms_to_peer (int inhwm_, int outhwm_) { - send_pipe_hwm (peer, inhwm_, outhwm_); + send_pipe_hwm (_peer, inhwm_, outhwm_); } diff --git a/src/pipe.hpp b/src/pipe.hpp index 9c9d9192..3376bb29 100644 --- a/src/pipe.hpp +++ b/src/pipe.hpp @@ -174,36 +174,36 @@ class pipe_t : public object_t, ~pipe_t (); // Underlying pipes for both directions. - upipe_t *inpipe; - upipe_t *outpipe; + upipe_t *_in_pipe; + upipe_t *_out_pipe; // Can the pipe be read from / written to? - bool in_active; - bool out_active; + bool _in_active; + bool _out_active; // High watermark for the outbound pipe. - int hwm; + int _hwm; // Low watermark for the inbound pipe. - int lwm; + int _lwm; // boosts for high and low watermarks, used with inproc sockets so hwm are sum of send and recv hmws on each side of pipe - int inhwmboost; - int outhwmboost; + int _in_hwm_boost; + int _out_hwm_boost; // Number of messages read and written so far. - uint64_t msgs_read; - uint64_t msgs_written; + uint64_t _msgs_read; + uint64_t _msgs_written; // Last received peer's msgs_read. The actual number in the peer // can be higher at the moment. - uint64_t peers_msgs_read; + uint64_t _peers_msgs_read; // The pipe object on the other side of the pipepair. - pipe_t *peer; + pipe_t *_peer; // Sink to send events to. - i_pipe_events *sink; + i_pipe_events *_sink; // States of the pipe endpoint: // active: common state before any termination begins, @@ -224,21 +224,21 @@ class pipe_t : public object_t, term_ack_sent, term_req_sent1, term_req_sent2 - } state; + } _state; // If true, we receive all the pending inbound messages before // terminating. If false, we terminate immediately when the peer // asks us to. - bool delay; + bool _delay; // Routing id of the writer. Used uniquely by the reader side. - blob_t router_socket_routing_id; + blob_t _router_socket_routing_id; // Routing id of the writer. Used uniquely by the reader side. - int server_socket_routing_id; + int _server_socket_routing_id; // Pipe's credential. - blob_t credential; + blob_t _credential; // Returns true if the message is delimiter; false otherwise. static bool is_delimiter (const msg_t &msg_); @@ -246,7 +246,7 @@ class pipe_t : public object_t, // Computes appropriate low watermark from the given high watermark. static int compute_lwm (int hwm_); - const bool conflate; + const bool _conflate; // Disable copying. pipe_t (const pipe_t &); diff --git a/src/plain_client.cpp b/src/plain_client.cpp index ac75eb9c..17635139 100644 --- a/src/plain_client.cpp +++ b/src/plain_client.cpp @@ -40,7 +40,7 @@ zmq::plain_client_t::plain_client_t (session_base_t *const session_, const options_t &options_) : mechanism_base_t (session_, options_), - state (sending_hello) + _state (sending_hello) { } @@ -52,16 +52,16 @@ int zmq::plain_client_t::next_handshake_command (msg_t *msg_) { int rc = 0; - switch (state) { + switch (_state) { case sending_hello: rc = produce_hello (msg_); if (rc == 0) - state = waiting_for_welcome; + _state = waiting_for_welcome; break; case sending_initiate: rc = produce_initiate (msg_); if (rc == 0) - state = waiting_for_ready; + _state = waiting_for_ready; break; default: errno = EAGAIN; @@ -102,9 +102,9 @@ int zmq::plain_client_t::process_handshake_command (msg_t *msg_) zmq::mechanism_t::status_t zmq::plain_client_t::status () const { - if (state == ready) + if (_state == ready) return mechanism_t::ready; - if (state == error_command_received) + if (_state == error_command_received) return mechanism_t::error; else return mechanism_t::handshaking; @@ -143,7 +143,7 @@ int zmq::plain_client_t::process_welcome (const unsigned char *cmd_data_, { LIBZMQ_UNUSED (cmd_data_); - if (state != waiting_for_welcome) { + if (_state != waiting_for_welcome) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND); errno = EPROTO; @@ -156,7 +156,7 @@ int zmq::plain_client_t::process_welcome (const unsigned char *cmd_data_, errno = EPROTO; return -1; } - state = sending_initiate; + _state = sending_initiate; return 0; } @@ -170,7 +170,7 @@ int zmq::plain_client_t::produce_initiate (msg_t *msg_) const int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_, size_t data_size_) { - if (state != waiting_for_ready) { + if (_state != waiting_for_ready) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND); errno = EPROTO; @@ -178,7 +178,7 @@ int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_, } const int rc = parse_metadata (cmd_data_ + 6, data_size_ - 6); if (rc == 0) - state = ready; + _state = ready; else session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_INVALID_METADATA); @@ -189,7 +189,7 @@ int zmq::plain_client_t::process_ready (const unsigned char *cmd_data_, int zmq::plain_client_t::process_error (const unsigned char *cmd_data_, size_t data_size_) { - if (state != waiting_for_welcome && state != waiting_for_ready) { + if (_state != waiting_for_welcome && _state != waiting_for_ready) { session->get_socket ()->event_handshake_failed_protocol ( session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND); errno = EPROTO; @@ -212,6 +212,6 @@ int zmq::plain_client_t::process_error (const unsigned char *cmd_data_, } const char *error_reason = reinterpret_cast (cmd_data_) + 7; handle_error_reason (error_reason, error_reason_len); - state = error_command_received; + _state = error_command_received; return 0; } diff --git a/src/plain_client.hpp b/src/plain_client.hpp index afef0c54..cb275b61 100644 --- a/src/plain_client.hpp +++ b/src/plain_client.hpp @@ -59,7 +59,7 @@ class plain_client_t : public mechanism_base_t ready }; - state_t state; + state_t _state; int produce_hello (msg_t *msg_) const; int produce_initiate (msg_t *msg_) const; diff --git a/src/poller_base.cpp b/src/poller_base.cpp index 307ab579..913456fa 100644 --- a/src/poller_base.cpp +++ b/src/poller_base.cpp @@ -44,30 +44,30 @@ zmq::poller_base_t::~poller_base_t () int zmq::poller_base_t::get_load () const { - return load.get (); + return _load.get (); } void zmq::poller_base_t::adjust_load (int amount_) { if (amount_ > 0) - load.add (amount_); + _load.add (amount_); else if (amount_ < 0) - load.sub (-amount_); + _load.sub (-amount_); } void zmq::poller_base_t::add_timer (int timeout_, i_poll_events *sink_, int id_) { - uint64_t expiration = clock.now_ms () + timeout_; + uint64_t expiration = _clock.now_ms () + timeout_; timer_info_t info = {sink_, id_}; - timers.insert (timers_t::value_type (expiration, info)); + _timers.insert (timers_t::value_type (expiration, info)); } void zmq::poller_base_t::cancel_timer (i_poll_events *sink_, int id_) { // Complexity of this operation is O(n). We assume it is rarely used. - for (timers_t::iterator it = timers.begin (); it != timers.end (); ++it) + for (timers_t::iterator it = _timers.begin (); it != _timers.end (); ++it) if (it->second.sink == sink_ && it->second.id == id_) { - timers.erase (it); + _timers.erase (it); return; } @@ -78,15 +78,15 @@ void zmq::poller_base_t::cancel_timer (i_poll_events *sink_, int id_) uint64_t zmq::poller_base_t::execute_timers () { // Fast track. - if (timers.empty ()) + if (_timers.empty ()) return 0; // Get the current time. - uint64_t current = clock.now_ms (); + uint64_t current = _clock.now_ms (); // Execute the timers that are already due. - timers_t::iterator it = timers.begin (); - while (it != timers.end ()) { + timers_t::iterator it = _timers.begin (); + while (it != _timers.end ()) { // If we have to wait to execute the item, same will be true about // all the following items (multimap is sorted). Thus we can stop // checking the subsequent timers and return the time to wait for @@ -100,7 +100,7 @@ uint64_t zmq::poller_base_t::execute_timers () // Remove it from the list of active timers. timers_t::iterator o = it; ++it; - timers.erase (o); + _timers.erase (o); } // There are no more timers. @@ -108,25 +108,25 @@ uint64_t zmq::poller_base_t::execute_timers () } zmq::worker_poller_base_t::worker_poller_base_t (const thread_ctx_t &ctx_) : - ctx (ctx_) + _ctx (ctx_) { } void zmq::worker_poller_base_t::stop_worker () { - worker.stop (); + _worker.stop (); } void zmq::worker_poller_base_t::start () { zmq_assert (get_load () > 0); - ctx.start_thread (worker, worker_routine, this); + _ctx.start_thread (_worker, worker_routine, this); } void zmq::worker_poller_base_t::check_thread () { #ifdef _DEBUG - zmq_assert (!worker.get_started () || worker.is_current_thread ()); + zmq_assert (!_worker.get_started () || _worker.is_current_thread ()); #endif } diff --git a/src/poller_base.hpp b/src/poller_base.hpp index 15e2cb11..1a8544db 100644 --- a/src/poller_base.hpp +++ b/src/poller_base.hpp @@ -140,7 +140,7 @@ class poller_base_t private: // Clock instance private to this I/O thread. - clock_t clock; + clock_t _clock; // List of active timers. struct timer_info_t @@ -149,11 +149,11 @@ class poller_base_t int id; }; typedef std::multimap timers_t; - timers_t timers; + timers_t _timers; // Load of the poller. Currently the number of file descriptors // registered. - atomic_counter_t load; + atomic_counter_t _load; poller_base_t (const poller_base_t &); const poller_base_t &operator= (const poller_base_t &); @@ -186,10 +186,10 @@ class worker_poller_base_t : public poller_base_t virtual void loop () = 0; // Reference to ZMQ context. - const thread_ctx_t &ctx; + const thread_ctx_t &_ctx; // Handle of the physical thread doing the I/O work. - thread_t worker; + thread_t _worker; }; } diff --git a/src/pull.cpp b/src/pull.cpp index 12620fab..d25249fb 100644 --- a/src/pull.cpp +++ b/src/pull.cpp @@ -49,30 +49,30 @@ void zmq::pull_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) LIBZMQ_UNUSED (subscribe_to_all_); zmq_assert (pipe_); - fq.attach (pipe_); + _fq.attach (pipe_); } void zmq::pull_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::pull_t::xpipe_terminated (pipe_t *pipe_) { - fq.pipe_terminated (pipe_); + _fq.pipe_terminated (pipe_); } int zmq::pull_t::xrecv (msg_t *msg_) { - return fq.recv (msg_); + return _fq.recv (msg_); } bool zmq::pull_t::xhas_in () { - return fq.has_in (); + return _fq.has_in (); } const zmq::blob_t &zmq::pull_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } diff --git a/src/pull.hpp b/src/pull.hpp index 154c0d34..992fa437 100644 --- a/src/pull.hpp +++ b/src/pull.hpp @@ -58,7 +58,7 @@ class pull_t : public socket_base_t private: // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; pull_t (const pull_t &); const pull_t &operator= (const pull_t &); diff --git a/src/push.cpp b/src/push.cpp index 0eb05cf7..6062a631 100644 --- a/src/push.cpp +++ b/src/push.cpp @@ -53,25 +53,25 @@ void zmq::push_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) pipe_->set_nodelay (); zmq_assert (pipe_); - lb.attach (pipe_); + _lb.attach (pipe_); } void zmq::push_t::xwrite_activated (pipe_t *pipe_) { - lb.activated (pipe_); + _lb.activated (pipe_); } void zmq::push_t::xpipe_terminated (pipe_t *pipe_) { - lb.pipe_terminated (pipe_); + _lb.pipe_terminated (pipe_); } int zmq::push_t::xsend (msg_t *msg_) { - return lb.send (msg_); + return _lb.send (msg_); } bool zmq::push_t::xhas_out () { - return lb.has_out (); + return _lb.has_out (); } diff --git a/src/push.hpp b/src/push.hpp index 7f20f863..2dd6401d 100644 --- a/src/push.hpp +++ b/src/push.hpp @@ -57,7 +57,7 @@ class push_t : public socket_base_t private: // Load balancer managing the outbound pipes. - lb_t lb; + lb_t _lb; push_t (const push_t &); const push_t &operator= (const push_t &); diff --git a/src/radio.cpp b/src/radio.cpp index b18f3bc1..703eb619 100644 --- a/src/radio.cpp +++ b/src/radio.cpp @@ -38,7 +38,7 @@ zmq::radio_t::radio_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_, true), - lossy (true) + _lossy (true) { options.type = ZMQ_RADIO; } @@ -57,10 +57,10 @@ void zmq::radio_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) // to receive the delimiter. pipe_->set_nodelay (); - dist.attach (pipe_); + _dist.attach (pipe_); if (subscribe_to_all_) - udp_pipes.push_back (pipe_); + _udp_pipes.push_back (pipe_); // The pipe is active when attached. Let's read the subscriptions from // it, if any. else @@ -77,16 +77,16 @@ void zmq::radio_t::xread_activated (pipe_t *pipe_) std::string group = std::string (msg.group ()); if (msg.is_join ()) - subscriptions.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (group), - pipe_); + _subscriptions.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (group), + pipe_); else { std::pair - range = subscriptions.equal_range (group); + range = _subscriptions.equal_range (group); for (subscriptions_t::iterator it = range.first; it != range.second; ++it) { if (it->second == pipe_) { - subscriptions.erase (it); + _subscriptions.erase (it); break; } } @@ -98,7 +98,7 @@ void zmq::radio_t::xread_activated (pipe_t *pipe_) void zmq::radio_t::xwrite_activated (pipe_t *pipe_) { - dist.activated (pipe_); + _dist.activated (pipe_); } int zmq::radio_t::xsetsockopt (int option_, const void *optval_, @@ -109,7 +109,7 @@ int zmq::radio_t::xsetsockopt (int option_, return -1; } if (option_ == ZMQ_XPUB_NODROP) - lossy = (*static_cast (optval_) == 0); + _lossy = (*static_cast (optval_) == 0); else { errno = EINVAL; return -1; @@ -121,21 +121,21 @@ void zmq::radio_t::xpipe_terminated (pipe_t *pipe_) { // NOTE: erase invalidates an iterator, and that's why it's not incrementing in post-loop // read-after-free caught by Valgrind, see https://github.com/zeromq/libzmq/pull/1771 - for (subscriptions_t::iterator it = subscriptions.begin (); - it != subscriptions.end ();) { + for (subscriptions_t::iterator it = _subscriptions.begin (); + it != _subscriptions.end ();) { if (it->second == pipe_) { - subscriptions.erase (it++); + _subscriptions.erase (it++); } else { ++it; } } udp_pipes_t::iterator it = - std::find (udp_pipes.begin (), udp_pipes.end (), pipe_); - if (it != udp_pipes.end ()) - udp_pipes.erase (it); + std::find (_udp_pipes.begin (), _udp_pipes.end (), pipe_); + if (it != _udp_pipes.end ()) + _udp_pipes.erase (it); - dist.pipe_terminated (pipe_); + _dist.pipe_terminated (pipe_); } int zmq::radio_t::xsend (msg_t *msg_) @@ -146,21 +146,21 @@ int zmq::radio_t::xsend (msg_t *msg_) return -1; } - dist.unmatch (); + _dist.unmatch (); std::pair range = - subscriptions.equal_range (std::string (msg_->group ())); + _subscriptions.equal_range (std::string (msg_->group ())); for (subscriptions_t::iterator it = range.first; it != range.second; ++it) - dist.match (it->second); + _dist.match (it->second); - for (udp_pipes_t::iterator it = udp_pipes.begin (); it != udp_pipes.end (); - ++it) - dist.match (*it); + for (udp_pipes_t::iterator it = _udp_pipes.begin (); + it != _udp_pipes.end (); ++it) + _dist.match (*it); int rc = -1; - if (lossy || dist.check_hwm ()) { - if (dist.send_to_matching (msg_) == 0) { + if (_lossy || _dist.check_hwm ()) { + if (_dist.send_to_matching (msg_) == 0) { rc = 0; // Yay, sent successfully } } else @@ -171,7 +171,7 @@ int zmq::radio_t::xsend (msg_t *msg_) bool zmq::radio_t::xhas_out () { - return dist.has_out (); + return _dist.has_out (); } int zmq::radio_t::xrecv (msg_t *msg_) @@ -193,7 +193,7 @@ zmq::radio_session_t::radio_session_t (io_thread_t *io_thread_, const options_t &options_, address_t *addr_) : session_base_t (io_thread_, connect_, socket_, options_, addr_), - state (group) + _state (group) { } @@ -246,12 +246,12 @@ int zmq::radio_session_t::push_msg (msg_t *msg_) int zmq::radio_session_t::pull_msg (msg_t *msg_) { - if (state == group) { - int rc = session_base_t::pull_msg (&pending_msg); + if (_state == group) { + int rc = session_base_t::pull_msg (&_pending_msg); if (rc != 0) return rc; - const char *group = pending_msg.group (); + const char *group = _pending_msg.group (); int length = static_cast (strlen (group)); // First frame is the group @@ -261,16 +261,16 @@ int zmq::radio_session_t::pull_msg (msg_t *msg_) memcpy (msg_->data (), group, length); // Next status is the body - state = body; + _state = body; return 0; } - *msg_ = pending_msg; - state = group; + *msg_ = _pending_msg; + _state = group; return 0; } void zmq::radio_session_t::reset () { session_base_t::reset (); - state = group; + _state = group; } diff --git a/src/radio.hpp b/src/radio.hpp index c311e128..9679c9c6 100644 --- a/src/radio.hpp +++ b/src/radio.hpp @@ -65,17 +65,17 @@ class radio_t : public socket_base_t private: // List of all subscriptions mapped to corresponding pipes. typedef std::multimap subscriptions_t; - subscriptions_t subscriptions; + subscriptions_t _subscriptions; // List of udp pipes typedef std::vector udp_pipes_t; - udp_pipes_t udp_pipes; + udp_pipes_t _udp_pipes; // Distributor of messages holding the list of outbound pipes. - dist_t dist; + dist_t _dist; // Drop messages if HWM reached, otherwise return with EAGAIN - bool lossy; + bool _lossy; radio_t (const radio_t &); const radio_t &operator= (const radio_t &); @@ -101,9 +101,9 @@ class radio_session_t : public session_base_t { group, body - } state; + } _state; - msg_t pending_msg; + msg_t _pending_msg; radio_session_t (const radio_session_t &); const radio_session_t &operator= (const radio_session_t &); diff --git a/src/raw_decoder.cpp b/src/raw_decoder.cpp index 18a0ae7b..9fbdd73c 100644 --- a/src/raw_decoder.cpp +++ b/src/raw_decoder.cpp @@ -34,22 +34,22 @@ #include "raw_decoder.hpp" #include "err.hpp" -zmq::raw_decoder_t::raw_decoder_t (size_t bufsize_) : allocator (bufsize_, 1) +zmq::raw_decoder_t::raw_decoder_t (size_t bufsize_) : _allocator (bufsize_, 1) { - int rc = in_progress.init (); + int rc = _in_progress.init (); errno_assert (rc == 0); } zmq::raw_decoder_t::~raw_decoder_t () { - int rc = in_progress.close (); + int rc = _in_progress.close (); errno_assert (rc == 0); } void zmq::raw_decoder_t::get_buffer (unsigned char **data_, size_t *size_) { - *data_ = allocator.allocate (); - *size_ = allocator.size (); + *data_ = _allocator.allocate (); + *size_ = _allocator.size (); } int zmq::raw_decoder_t::decode (const uint8_t *data_, @@ -57,15 +57,15 @@ int zmq::raw_decoder_t::decode (const uint8_t *data_, size_t &bytes_used_) { int rc = - in_progress.init (const_cast (data_), size_, - shared_message_memory_allocator::call_dec_ref, - allocator.buffer (), allocator.provide_content ()); + _in_progress.init (const_cast (data_), size_, + shared_message_memory_allocator::call_dec_ref, + _allocator.buffer (), _allocator.provide_content ()); // if the buffer serves as memory for a zero-copy message, release it // and allocate a new buffer in get_buffer for the next decode - if (in_progress.is_zcmsg ()) { - allocator.advance_content (); - allocator.release (); + if (_in_progress.is_zcmsg ()) { + _allocator.advance_content (); + _allocator.release (); } errno_assert (rc != -1); diff --git a/src/raw_decoder.hpp b/src/raw_decoder.hpp index 56e310a7..7e56ec93 100644 --- a/src/raw_decoder.hpp +++ b/src/raw_decoder.hpp @@ -52,14 +52,14 @@ class raw_decoder_t : public i_decoder virtual int decode (const unsigned char *data_, size_t size_, size_t &processed_); - virtual msg_t *msg () { return &in_progress; } + virtual msg_t *msg () { return &_in_progress; } virtual void resize_buffer (size_t) {} private: - msg_t in_progress; + msg_t _in_progress; - shared_message_memory_allocator allocator; + shared_message_memory_allocator _allocator; raw_decoder_t (const raw_decoder_t &); void operator= (const raw_decoder_t &); diff --git a/src/reaper.cpp b/src/reaper.cpp index bc96403a..edb244ac 100644 --- a/src/reaper.cpp +++ b/src/reaper.cpp @@ -35,43 +35,43 @@ zmq::reaper_t::reaper_t (class ctx_t *ctx_, uint32_t tid_) : object_t (ctx_, tid_), - mailbox_handle (static_cast (NULL)), - poller (NULL), - sockets (0), - terminating (false) + _mailbox_handle (static_cast (NULL)), + _poller (NULL), + _sockets (0), + _terminating (false) { - if (!mailbox.valid ()) + if (!_mailbox.valid ()) return; - poller = new (std::nothrow) poller_t (*ctx_); - alloc_assert (poller); + _poller = new (std::nothrow) poller_t (*ctx_); + alloc_assert (_poller); - if (mailbox.get_fd () != retired_fd) { - mailbox_handle = poller->add_fd (mailbox.get_fd (), this); - poller->set_pollin (mailbox_handle); + if (_mailbox.get_fd () != retired_fd) { + _mailbox_handle = _poller->add_fd (_mailbox.get_fd (), this); + _poller->set_pollin (_mailbox_handle); } #ifdef HAVE_FORK - pid = getpid (); + _pid = getpid (); #endif } zmq::reaper_t::~reaper_t () { - LIBZMQ_DELETE (poller); + LIBZMQ_DELETE (_poller); } zmq::mailbox_t *zmq::reaper_t::get_mailbox () { - return &mailbox; + return &_mailbox; } void zmq::reaper_t::start () { - zmq_assert (mailbox.valid ()); + zmq_assert (_mailbox.valid ()); // Start the thread. - poller->start (); + _poller->start (); } void zmq::reaper_t::stop () @@ -85,7 +85,7 @@ void zmq::reaper_t::in_event () { while (true) { #ifdef HAVE_FORK - if (unlikely (pid != getpid ())) { + if (unlikely (_pid != getpid ())) { //printf("zmq::reaper_t::in_event return in child process %d\n", (int)getpid()); return; } @@ -93,7 +93,7 @@ void zmq::reaper_t::in_event () // Get the next command. If there is none, exit. command_t cmd; - int rc = mailbox.recv (&cmd, 0); + int rc = _mailbox.recv (&cmd, 0); if (rc != 0 && errno == EINTR) continue; if (rc != 0 && errno == EAGAIN) @@ -117,33 +117,33 @@ void zmq::reaper_t::timer_event (int) void zmq::reaper_t::process_stop () { - terminating = true; + _terminating = true; // If there are no sockets being reaped finish immediately. - if (!sockets) { + if (!_sockets) { send_done (); - poller->rm_fd (mailbox_handle); - poller->stop (); + _poller->rm_fd (_mailbox_handle); + _poller->stop (); } } void zmq::reaper_t::process_reap (socket_base_t *socket_) { // Add the socket to the poller. - socket_->start_reaping (poller); + socket_->start_reaping (_poller); - ++sockets; + ++_sockets; } void zmq::reaper_t::process_reaped () { - --sockets; + --_sockets; // If reaped was already asked to terminate and there are no more sockets, // finish immediately. - if (!sockets && terminating) { + if (!_sockets && _terminating) { send_done (); - poller->rm_fd (mailbox_handle); - poller->stop (); + _poller->rm_fd (_mailbox_handle); + _poller->stop (); } } diff --git a/src/reaper.hpp b/src/reaper.hpp index 1d4f1273..8d203a18 100644 --- a/src/reaper.hpp +++ b/src/reaper.hpp @@ -63,26 +63,26 @@ class reaper_t : public object_t, public i_poll_events void process_reaped (); // Reaper thread accesses incoming commands via this mailbox. - mailbox_t mailbox; + mailbox_t _mailbox; // Handle associated with mailbox' file descriptor. - poller_t::handle_t mailbox_handle; + poller_t::handle_t _mailbox_handle; // I/O multiplexing is performed using a poller object. - poller_t *poller; + poller_t *_poller; // Number of sockets being reaped at the moment. - int sockets; + int _sockets; // If true, we were already asked to terminate. - bool terminating; + bool _terminating; reaper_t (const reaper_t &); const reaper_t &operator= (const reaper_t &); #ifdef HAVE_FORK // the process that created this context. Used to detect forking. - pid_t pid; + pid_t _pid; #endif }; } diff --git a/src/rep.cpp b/src/rep.cpp index 081b379c..fe0efed1 100644 --- a/src/rep.cpp +++ b/src/rep.cpp @@ -34,8 +34,8 @@ zmq::rep_t::rep_t (class ctx_t *parent_, uint32_t tid_, int sid_) : router_t (parent_, tid_, sid_), - sending_reply (false), - request_begins (true) + _sending_reply (false), + _request_begins (true) { options.type = ZMQ_REP; } @@ -47,7 +47,7 @@ zmq::rep_t::~rep_t () int zmq::rep_t::xsend (msg_t *msg_) { // If we are in the middle of receiving a request, we cannot send reply. - if (!sending_reply) { + if (!_sending_reply) { errno = EFSM; return -1; } @@ -61,7 +61,7 @@ int zmq::rep_t::xsend (msg_t *msg_) // If the reply is complete flip the FSM back to request receiving state. if (!more) - sending_reply = false; + _sending_reply = false; return 0; } @@ -69,14 +69,14 @@ int zmq::rep_t::xsend (msg_t *msg_) int zmq::rep_t::xrecv (msg_t *msg_) { // If we are in middle of sending a reply, we cannot receive next request. - if (sending_reply) { + if (_sending_reply) { errno = EFSM; return -1; } // First thing to do when receiving a request is to copy all the labels // to the reply pipe. - if (request_begins) { + if (_request_begins) { while (true) { int rc = router_t::xrecv (msg_); if (rc != 0) @@ -99,7 +99,7 @@ int zmq::rep_t::xrecv (msg_t *msg_) errno_assert (rc == 0); } } - request_begins = false; + _request_begins = false; } // Get next message part to return to the user. @@ -109,8 +109,8 @@ int zmq::rep_t::xrecv (msg_t *msg_) // If whole request is read, flip the FSM to reply-sending state. if (!(msg_->flags () & msg_t::more)) { - sending_reply = true; - request_begins = true; + _sending_reply = true; + _request_begins = true; } return 0; @@ -118,7 +118,7 @@ int zmq::rep_t::xrecv (msg_t *msg_) bool zmq::rep_t::xhas_in () { - if (sending_reply) + if (_sending_reply) return false; return router_t::xhas_in (); @@ -126,7 +126,7 @@ bool zmq::rep_t::xhas_in () bool zmq::rep_t::xhas_out () { - if (!sending_reply) + if (!_sending_reply) return false; return router_t::xhas_out (); diff --git a/src/rep.hpp b/src/rep.hpp index 95d9b61f..ef22eabd 100644 --- a/src/rep.hpp +++ b/src/rep.hpp @@ -54,11 +54,11 @@ class rep_t : public router_t private: // If true, we are in process of sending the reply. If false we are // in process of receiving a request. - bool sending_reply; + bool _sending_reply; // If true, we are starting to receive a request. The beginning // of the request is the backtrace stack. - bool request_begins; + bool _request_begins; rep_t (const rep_t &); const rep_t &operator= (const rep_t &); diff --git a/src/req.cpp b/src/req.cpp index 4d0b210c..09a736c2 100644 --- a/src/req.cpp +++ b/src/req.cpp @@ -46,12 +46,12 @@ static void free_id (void *data_, void *hint_) zmq::req_t::req_t (class ctx_t *parent_, uint32_t tid_, int sid_) : dealer_t (parent_, tid_, sid_), - receiving_reply (false), - message_begins (true), - reply_pipe (NULL), - request_id_frames_enabled (false), - request_id (generate_random ()), - strict (true) + _receiving_reply (false), + _message_begins (true), + _reply_pipe (NULL), + _request_id_frames_enabled (false), + _request_id (generate_random ()), + _strict (true) { options.type = ZMQ_REQ; } @@ -64,29 +64,29 @@ int zmq::req_t::xsend (msg_t *msg_) { // If we've sent a request and we still haven't got the reply, // we can't send another request unless the strict option is disabled. - if (receiving_reply) { - if (strict) { + if (_receiving_reply) { + if (_strict) { errno = EFSM; return -1; } - receiving_reply = false; - message_begins = true; + _receiving_reply = false; + _message_begins = true; } // First part of the request is the request routing id. - if (message_begins) { - reply_pipe = NULL; + if (_message_begins) { + _reply_pipe = NULL; - if (request_id_frames_enabled) { - request_id++; + if (_request_id_frames_enabled) { + _request_id++; // Copy request id before sending (see issue #1695 for details). uint32_t *request_id_copy = static_cast (malloc (sizeof (uint32_t))); zmq_assert (request_id_copy); - *request_id_copy = request_id; + *request_id_copy = _request_id; msg_t id; int rc = @@ -94,7 +94,7 @@ int zmq::req_t::xsend (msg_t *msg_) errno_assert (rc == 0); id.set_flags (msg_t::more); - rc = dealer_t::sendpipe (&id, &reply_pipe); + rc = dealer_t::sendpipe (&id, &_reply_pipe); if (rc != 0) return -1; } @@ -104,12 +104,12 @@ int zmq::req_t::xsend (msg_t *msg_) errno_assert (rc == 0); bottom.set_flags (msg_t::more); - rc = dealer_t::sendpipe (&bottom, &reply_pipe); + rc = dealer_t::sendpipe (&bottom, &_reply_pipe); if (rc != 0) return -1; - zmq_assert (reply_pipe); + zmq_assert (_reply_pipe); - message_begins = false; + _message_begins = false; // Eat all currently available messages before the request is fully // sent. This is done to avoid: @@ -135,8 +135,8 @@ int zmq::req_t::xsend (msg_t *msg_) // If the request was fully sent, flip the FSM into reply-receiving state. if (!more) { - receiving_reply = true; - message_begins = true; + _receiving_reply = true; + _message_begins = true; } return 0; @@ -145,23 +145,23 @@ int zmq::req_t::xsend (msg_t *msg_) int zmq::req_t::xrecv (msg_t *msg_) { // If request wasn't send, we can't wait for reply. - if (!receiving_reply) { + if (!_receiving_reply) { errno = EFSM; return -1; } // Skip messages until one with the right first frames is found. - while (message_begins) { + while (_message_begins) { // If enabled, the first frame must have the correct request_id. - if (request_id_frames_enabled) { + if (_request_id_frames_enabled) { int rc = recv_reply_pipe (msg_); if (rc != 0) return rc; if (unlikely (!(msg_->flags () & msg_t::more) - || msg_->size () != sizeof (request_id) + || msg_->size () != sizeof (_request_id) || *static_cast (msg_->data ()) - != request_id)) { + != _request_id)) { // Skip the remaining frames and try the next message while (msg_->flags () & msg_t::more) { rc = recv_reply_pipe (msg_); @@ -186,7 +186,7 @@ int zmq::req_t::xrecv (msg_t *msg_) continue; } - message_begins = false; + _message_begins = false; } int rc = recv_reply_pipe (msg_); @@ -195,8 +195,8 @@ int zmq::req_t::xrecv (msg_t *msg_) // If the reply is fully received, flip the FSM into request-sending state. if (!(msg_->flags () & msg_t::more)) { - receiving_reply = false; - message_begins = true; + _receiving_reply = false; + _message_begins = true; } return 0; @@ -206,7 +206,7 @@ bool zmq::req_t::xhas_in () { // TODO: Duplicates should be removed here. - if (!receiving_reply) + if (!_receiving_reply) return false; return dealer_t::xhas_in (); @@ -214,7 +214,7 @@ bool zmq::req_t::xhas_in () bool zmq::req_t::xhas_out () { - if (receiving_reply && strict) + if (_receiving_reply && _strict) return false; return dealer_t::xhas_out (); @@ -232,14 +232,14 @@ int zmq::req_t::xsetsockopt (int option_, switch (option_) { case ZMQ_REQ_CORRELATE: if (is_int && value >= 0) { - request_id_frames_enabled = (value != 0); + _request_id_frames_enabled = (value != 0); return 0; } break; case ZMQ_REQ_RELAXED: if (is_int && value >= 0) { - strict = (value == 0); + _strict = (value == 0); return 0; } break; @@ -253,8 +253,8 @@ int zmq::req_t::xsetsockopt (int option_, void zmq::req_t::xpipe_terminated (pipe_t *pipe_) { - if (reply_pipe == pipe_) - reply_pipe = NULL; + if (_reply_pipe == pipe_) + _reply_pipe = NULL; dealer_t::xpipe_terminated (pipe_); } @@ -265,7 +265,7 @@ int zmq::req_t::recv_reply_pipe (msg_t *msg_) int rc = dealer_t::recvpipe (msg_, &pipe); if (rc != 0) return rc; - if (!reply_pipe || pipe == reply_pipe) + if (!_reply_pipe || pipe == _reply_pipe) return 0; } } @@ -276,7 +276,7 @@ zmq::req_session_t::req_session_t (io_thread_t *io_thread_, const options_t &options_, address_t *addr_) : session_base_t (io_thread_, connect_, socket_, options_, addr_), - state (bottom) + _state (bottom) { } @@ -291,25 +291,25 @@ int zmq::req_session_t::push_msg (msg_t *msg_) if (unlikely (msg_->flags () & msg_t::command)) return 0; - switch (state) { + switch (_state) { case bottom: if (msg_->flags () == msg_t::more) { // In case option ZMQ_CORRELATE is on, allow request_id to be // transfered as first frame (would be too cumbersome to check // whether the option is actually on or not). if (msg_->size () == sizeof (uint32_t)) { - state = request_id; + _state = request_id; return session_base_t::push_msg (msg_); } if (msg_->size () == 0) { - state = body; + _state = body; return session_base_t::push_msg (msg_); } } break; case request_id: if (msg_->flags () == msg_t::more && msg_->size () == 0) { - state = body; + _state = body; return session_base_t::push_msg (msg_); } break; @@ -317,7 +317,7 @@ int zmq::req_session_t::push_msg (msg_t *msg_) if (msg_->flags () == msg_t::more) return session_base_t::push_msg (msg_); if (msg_->flags () == 0) { - state = bottom; + _state = bottom; return session_base_t::push_msg (msg_); } break; @@ -329,5 +329,5 @@ int zmq::req_session_t::push_msg (msg_t *msg_) void zmq::req_session_t::reset () { session_base_t::reset (); - state = bottom; + _state = bottom; } diff --git a/src/req.hpp b/src/req.hpp index 226d42b7..8ea527a3 100644 --- a/src/req.hpp +++ b/src/req.hpp @@ -62,26 +62,26 @@ class req_t : public dealer_t private: // If true, request was already sent and reply wasn't received yet or // was received partially. - bool receiving_reply; + bool _receiving_reply; // If true, we are starting to send/recv a message. The first part // of the message must be empty message part (backtrace stack bottom). - bool message_begins; + bool _message_begins; // The pipe the request was sent to and where the reply is expected. - zmq::pipe_t *reply_pipe; + zmq::pipe_t *_reply_pipe; // Whether request id frames shall be sent and expected. - bool request_id_frames_enabled; + bool _request_id_frames_enabled; // The current request id. It is incremented every time before a new // request is sent. - uint32_t request_id; + uint32_t _request_id; // If false, send() will reset its internal state and terminate the // reply_pipe's connection instead of failing if a previous request is // still pending. - bool strict; + bool _strict; req_t (const req_t &); const req_t &operator= (const req_t &); @@ -107,7 +107,7 @@ class req_session_t : public session_base_t bottom, request_id, body - } state; + } _state; req_session_t (const req_session_t &); const req_session_t &operator= (const req_session_t &); diff --git a/src/router.cpp b/src/router.cpp index 53d868f5..918679aa 100644 --- a/src/router.cpp +++ b/src/router.cpp @@ -38,35 +38,35 @@ zmq::router_t::router_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - prefetched (false), - routing_id_sent (false), - current_in (NULL), - terminate_current_in (false), - more_in (false), - current_out (NULL), - more_out (false), - next_integral_routing_id (generate_random ()), - mandatory (false), + _prefetched (false), + _routing_id_sent (false), + _current_in (NULL), + _terminate_current_in (false), + _more_in (false), + _current_out (NULL), + _more_out (false), + _next_integral_routing_id (generate_random ()), + _mandatory (false), // raw_socket functionality in ROUTER is deprecated - raw_socket (false), - probe_router (false), - handover (false) + _raw_socket (false), + _probe_router (false), + _handover (false) { options.type = ZMQ_ROUTER; options.recv_routing_id = true; options.raw_socket = false; - prefetched_id.init (); - prefetched_msg.init (); + _prefetched_id.init (); + _prefetched_msg.init (); } zmq::router_t::~router_t () { - zmq_assert (anonymous_pipes.empty ()); + zmq_assert (_anonymous_pipes.empty ()); ; - zmq_assert (outpipes.empty ()); - prefetched_id.close (); - prefetched_msg.close (); + zmq_assert (_out_pipes.empty ()); + _prefetched_id.close (); + _prefetched_msg.close (); } void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) @@ -75,7 +75,7 @@ void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) zmq_assert (pipe_); - if (probe_router) { + if (_probe_router) { msg_t probe_msg; int rc = probe_msg.init (); errno_assert (rc == 0); @@ -90,9 +90,9 @@ void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) bool routing_id_ok = identify_peer (pipe_); if (routing_id_ok) - fq.attach (pipe_); + _fq.attach (pipe_); else - anonymous_pipes.insert (pipe_); + _anonymous_pipes.insert (pipe_); } int zmq::router_t::xsetsockopt (int option_, @@ -116,8 +116,8 @@ int zmq::router_t::xsetsockopt (int option_, case ZMQ_ROUTER_RAW: if (is_int && value >= 0) { - raw_socket = (value != 0); - if (raw_socket) { + _raw_socket = (value != 0); + if (_raw_socket) { options.recv_routing_id = false; options.raw_socket = true; } @@ -127,21 +127,21 @@ int zmq::router_t::xsetsockopt (int option_, case ZMQ_ROUTER_MANDATORY: if (is_int && value >= 0) { - mandatory = (value != 0); + _mandatory = (value != 0); return 0; } break; case ZMQ_PROBE_ROUTER: if (is_int && value >= 0) { - probe_router = (value != 0); + _probe_router = (value != 0); return 0; } break; case ZMQ_ROUTER_HANDOVER: if (is_int && value >= 0) { - handover = (value != 0); + _handover = (value != 0); return 0; } break; @@ -156,30 +156,30 @@ int zmq::router_t::xsetsockopt (int option_, void zmq::router_t::xpipe_terminated (pipe_t *pipe_) { - std::set::iterator it = anonymous_pipes.find (pipe_); - if (it != anonymous_pipes.end ()) - anonymous_pipes.erase (it); + std::set::iterator it = _anonymous_pipes.find (pipe_); + if (it != _anonymous_pipes.end ()) + _anonymous_pipes.erase (it); else { - outpipes_t::iterator iter = outpipes.find (pipe_->get_routing_id ()); - zmq_assert (iter != outpipes.end ()); - outpipes.erase (iter); - fq.pipe_terminated (pipe_); + outpipes_t::iterator iter = _out_pipes.find (pipe_->get_routing_id ()); + zmq_assert (iter != _out_pipes.end ()); + _out_pipes.erase (iter); + _fq.pipe_terminated (pipe_); pipe_->rollback (); - if (pipe_ == current_out) - current_out = NULL; + if (pipe_ == _current_out) + _current_out = NULL; } } void zmq::router_t::xread_activated (pipe_t *pipe_) { - std::set::iterator it = anonymous_pipes.find (pipe_); - if (it == anonymous_pipes.end ()) - fq.activated (pipe_); + std::set::iterator it = _anonymous_pipes.find (pipe_); + if (it == _anonymous_pipes.end ()) + _fq.activated (pipe_); else { bool routing_id_ok = identify_peer (pipe_); if (routing_id_ok) { - anonymous_pipes.erase (it); - fq.attach (pipe_); + _anonymous_pipes.erase (it); + _fq.attach (pipe_); } } } @@ -187,11 +187,11 @@ void zmq::router_t::xread_activated (pipe_t *pipe_) void zmq::router_t::xwrite_activated (pipe_t *pipe_) { outpipes_t::iterator it; - for (it = outpipes.begin (); it != outpipes.end (); ++it) + for (it = _out_pipes.begin (); it != _out_pipes.end (); ++it) if (it->second.pipe == pipe_) break; - zmq_assert (it != outpipes.end ()); + zmq_assert (it != _out_pipes.end ()); zmq_assert (!it->second.active); it->second.active = true; } @@ -200,34 +200,34 @@ int zmq::router_t::xsend (msg_t *msg_) { // If this is the first part of the message it's the ID of the // peer to send the message to. - if (!more_out) { - zmq_assert (!current_out); + if (!_more_out) { + zmq_assert (!_current_out); // If we have malformed message (prefix with no subsequent message) // then just silently ignore it. // TODO: The connections should be killed instead. if (msg_->flags () & msg_t::more) { - more_out = true; + _more_out = true; // Find the pipe associated with the routing id stored in the prefix. // If there's no such pipe just silently ignore the message, unless // router_mandatory is set. blob_t routing_id (static_cast (msg_->data ()), msg_->size (), zmq::reference_tag_t ()); - outpipes_t::iterator it = outpipes.find (routing_id); + outpipes_t::iterator it = _out_pipes.find (routing_id); - if (it != outpipes.end ()) { - current_out = it->second.pipe; + if (it != _out_pipes.end ()) { + _current_out = it->second.pipe; // Check whether pipe is closed or not - if (!current_out->check_write ()) { + if (!_current_out->check_write ()) { // Check whether pipe is full or not - bool pipe_full = !current_out->check_hwm (); + bool pipe_full = !_current_out->check_hwm (); it->second.active = false; - current_out = NULL; + _current_out = NULL; - if (mandatory) { - more_out = false; + if (_mandatory) { + _more_out = false; if (pipe_full) errno = EAGAIN; else @@ -235,8 +235,8 @@ int zmq::router_t::xsend (msg_t *msg_) return -1; } } - } else if (mandatory) { - more_out = false; + } else if (_mandatory) { + _more_out = false; errno = EHOSTUNREACH; return -1; } @@ -254,36 +254,36 @@ int zmq::router_t::xsend (msg_t *msg_) msg_->reset_flags (msg_t::more); // Check whether this is the last part of the message. - more_out = (msg_->flags () & msg_t::more) != 0; + _more_out = (msg_->flags () & msg_t::more) != 0; // Push the message into the pipe. If there's no out pipe, just drop it. - if (current_out) { + if (_current_out) { // Close the remote connection if user has asked to do so // by sending zero length message. // Pending messages in the pipe will be dropped (on receiving term- ack) - if (raw_socket && msg_->size () == 0) { - current_out->terminate (false); + if (_raw_socket && msg_->size () == 0) { + _current_out->terminate (false); int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); - current_out = NULL; + _current_out = NULL; return 0; } - bool ok = current_out->write (msg_); + bool ok = _current_out->write (msg_); if (unlikely (!ok)) { // Message failed to send - we must close it ourselves. int rc = msg_->close (); errno_assert (rc == 0); // HWM was checked before, so the pipe must be gone. Roll back // messages that were piped, for example REP labels. - current_out->rollback (); - current_out = NULL; + _current_out->rollback (); + _current_out = NULL; } else { - if (!more_out) { - current_out->flush (); - current_out = NULL; + if (!_more_out) { + _current_out->flush (); + _current_out = NULL; } } } else { @@ -300,36 +300,36 @@ int zmq::router_t::xsend (msg_t *msg_) int zmq::router_t::xrecv (msg_t *msg_) { - if (prefetched) { - if (!routing_id_sent) { - int rc = msg_->move (prefetched_id); + if (_prefetched) { + if (!_routing_id_sent) { + int rc = msg_->move (_prefetched_id); errno_assert (rc == 0); - routing_id_sent = true; + _routing_id_sent = true; } else { - int rc = msg_->move (prefetched_msg); + int rc = msg_->move (_prefetched_msg); errno_assert (rc == 0); - prefetched = false; + _prefetched = false; } - more_in = (msg_->flags () & msg_t::more) != 0; + _more_in = (msg_->flags () & msg_t::more) != 0; - if (!more_in) { - if (terminate_current_in) { - current_in->terminate (true); - terminate_current_in = false; + if (!_more_in) { + if (_terminate_current_in) { + _current_in->terminate (true); + _terminate_current_in = false; } - current_in = NULL; + _current_in = NULL; } return 0; } pipe_t *pipe = NULL; - int rc = fq.recvpipe (msg_, &pipe); + int rc = _fq.recvpipe (msg_, &pipe); // It's possible that we receive peer's routing id. That happens // after reconnection. The current implementation assumes that // the peer always uses the same routing id. while (rc == 0 && msg_->is_routing_id ()) - rc = fq.recvpipe (msg_, &pipe); + rc = _fq.recvpipe (msg_, &pipe); if (rc != 0) return -1; @@ -337,33 +337,33 @@ int zmq::router_t::xrecv (msg_t *msg_) zmq_assert (pipe != NULL); // If we are in the middle of reading a message, just return the next part. - if (more_in) { - more_in = (msg_->flags () & msg_t::more) != 0; + if (_more_in) { + _more_in = (msg_->flags () & msg_t::more) != 0; - if (!more_in) { - if (terminate_current_in) { - current_in->terminate (true); - terminate_current_in = false; + if (!_more_in) { + if (_terminate_current_in) { + _current_in->terminate (true); + _terminate_current_in = false; } - current_in = NULL; + _current_in = NULL; } } else { // We are at the beginning of a message. // Keep the message part we have in the prefetch buffer // and return the ID of the peer instead. - rc = prefetched_msg.move (*msg_); + rc = _prefetched_msg.move (*msg_); errno_assert (rc == 0); - prefetched = true; - current_in = pipe; + _prefetched = true; + _current_in = pipe; const blob_t &routing_id = pipe->get_routing_id (); rc = msg_->init_size (routing_id.size ()); errno_assert (rc == 0); memcpy (msg_->data (), routing_id.data (), routing_id.size ()); msg_->set_flags (msg_t::more); - if (prefetched_msg.metadata ()) - msg_->set_metadata (prefetched_msg.metadata ()); - routing_id_sent = true; + if (_prefetched_msg.metadata ()) + msg_->set_metadata (_prefetched_msg.metadata ()); + _routing_id_sent = true; } return 0; @@ -371,10 +371,10 @@ int zmq::router_t::xrecv (msg_t *msg_) int zmq::router_t::rollback () { - if (current_out) { - current_out->rollback (); - current_out = NULL; - more_out = false; + if (_current_out) { + _current_out->rollback (); + _current_out = NULL; + _more_out = false; } return 0; } @@ -383,24 +383,24 @@ bool zmq::router_t::xhas_in () { // If we are in the middle of reading the messages, there are // definitely more parts available. - if (more_in) + if (_more_in) return true; // We may already have a message pre-fetched. - if (prefetched) + if (_prefetched) return true; // Try to read the next message. // The message, if read, is kept in the pre-fetch buffer. pipe_t *pipe = NULL; - int rc = fq.recvpipe (&prefetched_msg, &pipe); + int rc = _fq.recvpipe (&_prefetched_msg, &pipe); // It's possible that we receive peer's routing id. That happens // after reconnection. The current implementation assumes that // the peer always uses the same routing id. // TODO: handle the situation when the peer changes its routing id. - while (rc == 0 && prefetched_msg.is_routing_id ()) - rc = fq.recvpipe (&prefetched_msg, &pipe); + while (rc == 0 && _prefetched_msg.is_routing_id ()) + rc = _fq.recvpipe (&_prefetched_msg, &pipe); if (rc != 0) return false; @@ -408,14 +408,14 @@ bool zmq::router_t::xhas_in () zmq_assert (pipe != NULL); const blob_t &routing_id = pipe->get_routing_id (); - rc = prefetched_id.init_size (routing_id.size ()); + rc = _prefetched_id.init_size (routing_id.size ()); errno_assert (rc == 0); - memcpy (prefetched_id.data (), routing_id.data (), routing_id.size ()); - prefetched_id.set_flags (msg_t::more); + memcpy (_prefetched_id.data (), routing_id.data (), routing_id.size ()); + _prefetched_id.set_flags (msg_t::more); - prefetched = true; - routing_id_sent = false; - current_in = pipe; + _prefetched = true; + _routing_id_sent = false; + _current_in = pipe; return true; } @@ -426,12 +426,12 @@ bool zmq::router_t::xhas_out () // MANDATORY is set). Whether actual attempt to write succeeds depends // on whitch pipe the message is going to be routed to. - if (!mandatory) + if (!_mandatory) return true; bool has_out = false; outpipes_t::iterator it; - for (it = outpipes.begin (); it != outpipes.end (); ++it) + for (it = _out_pipes.begin (); it != _out_pipes.end (); ++it) has_out |= it->second.pipe->check_hwm (); return has_out; @@ -439,7 +439,7 @@ bool zmq::router_t::xhas_out () const zmq::blob_t &zmq::router_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } int zmq::router_t::get_peer_state (const void *routing_id_, @@ -448,13 +448,13 @@ int zmq::router_t::get_peer_state (const void *routing_id_, int res = 0; blob_t routing_id_blob ((unsigned char *) routing_id_, routing_id_size_); - outpipes_t::const_iterator it = outpipes.find (routing_id_blob); - if (it == outpipes.end ()) { + outpipes_t::const_iterator it = _out_pipes.find (routing_id_blob); + if (it == _out_pipes.end ()) { errno = EHOSTUNREACH; return -1; } - const outpipe_t &outpipe = it->second; + const out_pipe_t &outpipe = it->second; if (outpipe.pipe->check_hwm ()) res |= ZMQ_POLLOUT; @@ -473,15 +473,15 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_) routing_id.set ((unsigned char *) connect_routing_id.c_str (), connect_routing_id.length ()); connect_routing_id.clear (); - outpipes_t::iterator it = outpipes.find (routing_id); - if (it != outpipes.end ()) + outpipes_t::iterator it = _out_pipes.find (routing_id); + if (it != _out_pipes.end ()) zmq_assert (false); // Not allowed to duplicate an existing rid } else if ( options .raw_socket) { // Always assign an integral routing id for raw-socket unsigned char buf[5]; buf[0] = 0; - put_uint32 (buf + 1, next_integral_routing_id++); + put_uint32 (buf + 1, _next_integral_routing_id++); routing_id.set (buf, sizeof buf); } else if (!options.raw_socket) { // Pick up handshake cases and also case where next integral routing id is set @@ -494,17 +494,17 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_) // Fall back on the auto-generation unsigned char buf[5]; buf[0] = 0; - put_uint32 (buf + 1, next_integral_routing_id++); + put_uint32 (buf + 1, _next_integral_routing_id++); routing_id.set (buf, sizeof buf); msg.close (); } else { routing_id.set (static_cast (msg.data ()), msg.size ()); - outpipes_t::iterator it = outpipes.find (routing_id); + outpipes_t::iterator it = _out_pipes.find (routing_id); msg.close (); - if (it != outpipes.end ()) { - if (!handover) + if (it != _out_pipes.end ()) { + if (!_handover) // Ignore peers with duplicate ID return false; @@ -513,14 +513,14 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_) // existing pipe so we can terminate it asynchronously. unsigned char buf[5]; buf[0] = 0; - put_uint32 (buf + 1, next_integral_routing_id++); + put_uint32 (buf + 1, _next_integral_routing_id++); blob_t new_routing_id (buf, sizeof buf); it->second.pipe->set_router_socket_routing_id (new_routing_id); - outpipe_t existing_outpipe = {it->second.pipe, - it->second.active}; + out_pipe_t existing_outpipe = {it->second.pipe, + it->second.active}; - ok = outpipes + ok = _out_pipes .ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (new_routing_id), existing_outpipe) .second; @@ -528,10 +528,10 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_) // Remove the existing routing id entry to allow the new // connection to take the routing id. - outpipes.erase (it); + _out_pipes.erase (it); - if (existing_outpipe.pipe == current_in) - terminate_current_in = true; + if (existing_outpipe.pipe == _current_in) + _terminate_current_in = true; else existing_outpipe.pipe->terminate (true); } @@ -540,8 +540,8 @@ bool zmq::router_t::identify_peer (pipe_t *pipe_) pipe_->set_router_socket_routing_id (routing_id); // Add the record into output pipes lookup table - outpipe_t outpipe = {pipe_, true}; - ok = outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe) + out_pipe_t outpipe = {pipe_, true}; + ok = _out_pipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe) .second; zmq_assert (ok); diff --git a/src/router.cpp~RF40cad05.TMP b/src/router.cpp~RF40cad05.TMP new file mode 100644 index 00000000..53d868f5 --- /dev/null +++ b/src/router.cpp~RF40cad05.TMP @@ -0,0 +1,549 @@ +/* + Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file + + This file is part of libzmq, the ZeroMQ core engine in C++. + + libzmq is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License (LGPL) as published + by the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + As a special exception, the Contributors give you permission to link + this library with independent modules to produce an executable, + regardless of the license terms of these independent modules, and to + copy and distribute the resulting executable under terms of your choice, + provided that you also meet, for each linked independent module, the + terms and conditions of the license of that module. An independent + module is a module which is not derived from or based on this library. + If you modify this library, you must extend this exception to your + version of the library. + + libzmq is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this program. If not, see . +*/ + +#include "precompiled.hpp" +#include "macros.hpp" +#include "router.hpp" +#include "pipe.hpp" +#include "wire.hpp" +#include "random.hpp" +#include "likely.hpp" +#include "err.hpp" + +zmq::router_t::router_t (class ctx_t *parent_, uint32_t tid_, int sid_) : + socket_base_t (parent_, tid_, sid_), + prefetched (false), + routing_id_sent (false), + current_in (NULL), + terminate_current_in (false), + more_in (false), + current_out (NULL), + more_out (false), + next_integral_routing_id (generate_random ()), + mandatory (false), + // raw_socket functionality in ROUTER is deprecated + raw_socket (false), + probe_router (false), + handover (false) +{ + options.type = ZMQ_ROUTER; + options.recv_routing_id = true; + options.raw_socket = false; + + prefetched_id.init (); + prefetched_msg.init (); +} + +zmq::router_t::~router_t () +{ + zmq_assert (anonymous_pipes.empty ()); + ; + zmq_assert (outpipes.empty ()); + prefetched_id.close (); + prefetched_msg.close (); +} + +void zmq::router_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) +{ + LIBZMQ_UNUSED (subscribe_to_all_); + + zmq_assert (pipe_); + + if (probe_router) { + msg_t probe_msg; + int rc = probe_msg.init (); + errno_assert (rc == 0); + + rc = pipe_->write (&probe_msg); + // zmq_assert (rc) is not applicable here, since it is not a bug. + pipe_->flush (); + + rc = probe_msg.close (); + errno_assert (rc == 0); + } + + bool routing_id_ok = identify_peer (pipe_); + if (routing_id_ok) + fq.attach (pipe_); + else + anonymous_pipes.insert (pipe_); +} + +int zmq::router_t::xsetsockopt (int option_, + const void *optval_, + size_t optvallen_) +{ + bool is_int = (optvallen_ == sizeof (int)); + int value = 0; + if (is_int) + memcpy (&value, optval_, sizeof (int)); + + switch (option_) { + case ZMQ_CONNECT_ROUTING_ID: + // TODO why isn't it possible to set an empty connect_routing_id + // (which is the default value) + if (optval_ && optvallen_) { + connect_routing_id.assign ((char *) optval_, optvallen_); + return 0; + } + break; + + case ZMQ_ROUTER_RAW: + if (is_int && value >= 0) { + raw_socket = (value != 0); + if (raw_socket) { + options.recv_routing_id = false; + options.raw_socket = true; + } + return 0; + } + break; + + case ZMQ_ROUTER_MANDATORY: + if (is_int && value >= 0) { + mandatory = (value != 0); + return 0; + } + break; + + case ZMQ_PROBE_ROUTER: + if (is_int && value >= 0) { + probe_router = (value != 0); + return 0; + } + break; + + case ZMQ_ROUTER_HANDOVER: + if (is_int && value >= 0) { + handover = (value != 0); + return 0; + } + break; + + default: + break; + } + errno = EINVAL; + return -1; +} + + +void zmq::router_t::xpipe_terminated (pipe_t *pipe_) +{ + std::set::iterator it = anonymous_pipes.find (pipe_); + if (it != anonymous_pipes.end ()) + anonymous_pipes.erase (it); + else { + outpipes_t::iterator iter = outpipes.find (pipe_->get_routing_id ()); + zmq_assert (iter != outpipes.end ()); + outpipes.erase (iter); + fq.pipe_terminated (pipe_); + pipe_->rollback (); + if (pipe_ == current_out) + current_out = NULL; + } +} + +void zmq::router_t::xread_activated (pipe_t *pipe_) +{ + std::set::iterator it = anonymous_pipes.find (pipe_); + if (it == anonymous_pipes.end ()) + fq.activated (pipe_); + else { + bool routing_id_ok = identify_peer (pipe_); + if (routing_id_ok) { + anonymous_pipes.erase (it); + fq.attach (pipe_); + } + } +} + +void zmq::router_t::xwrite_activated (pipe_t *pipe_) +{ + outpipes_t::iterator it; + for (it = outpipes.begin (); it != outpipes.end (); ++it) + if (it->second.pipe == pipe_) + break; + + zmq_assert (it != outpipes.end ()); + zmq_assert (!it->second.active); + it->second.active = true; +} + +int zmq::router_t::xsend (msg_t *msg_) +{ + // If this is the first part of the message it's the ID of the + // peer to send the message to. + if (!more_out) { + zmq_assert (!current_out); + + // If we have malformed message (prefix with no subsequent message) + // then just silently ignore it. + // TODO: The connections should be killed instead. + if (msg_->flags () & msg_t::more) { + more_out = true; + + // Find the pipe associated with the routing id stored in the prefix. + // If there's no such pipe just silently ignore the message, unless + // router_mandatory is set. + blob_t routing_id (static_cast (msg_->data ()), + msg_->size (), zmq::reference_tag_t ()); + outpipes_t::iterator it = outpipes.find (routing_id); + + if (it != outpipes.end ()) { + current_out = it->second.pipe; + + // Check whether pipe is closed or not + if (!current_out->check_write ()) { + // Check whether pipe is full or not + bool pipe_full = !current_out->check_hwm (); + it->second.active = false; + current_out = NULL; + + if (mandatory) { + more_out = false; + if (pipe_full) + errno = EAGAIN; + else + errno = EHOSTUNREACH; + return -1; + } + } + } else if (mandatory) { + more_out = false; + errno = EHOSTUNREACH; + return -1; + } + } + + int rc = msg_->close (); + errno_assert (rc == 0); + rc = msg_->init (); + errno_assert (rc == 0); + return 0; + } + + // Ignore the MORE flag for raw-sock or assert? + if (options.raw_socket) + msg_->reset_flags (msg_t::more); + + // Check whether this is the last part of the message. + more_out = (msg_->flags () & msg_t::more) != 0; + + // Push the message into the pipe. If there's no out pipe, just drop it. + if (current_out) { + // Close the remote connection if user has asked to do so + // by sending zero length message. + // Pending messages in the pipe will be dropped (on receiving term- ack) + if (raw_socket && msg_->size () == 0) { + current_out->terminate (false); + int rc = msg_->close (); + errno_assert (rc == 0); + rc = msg_->init (); + errno_assert (rc == 0); + current_out = NULL; + return 0; + } + + bool ok = current_out->write (msg_); + if (unlikely (!ok)) { + // Message failed to send - we must close it ourselves. + int rc = msg_->close (); + errno_assert (rc == 0); + // HWM was checked before, so the pipe must be gone. Roll back + // messages that were piped, for example REP labels. + current_out->rollback (); + current_out = NULL; + } else { + if (!more_out) { + current_out->flush (); + current_out = NULL; + } + } + } else { + int rc = msg_->close (); + errno_assert (rc == 0); + } + + // Detach the message from the data buffer. + int rc = msg_->init (); + errno_assert (rc == 0); + + return 0; +} + +int zmq::router_t::xrecv (msg_t *msg_) +{ + if (prefetched) { + if (!routing_id_sent) { + int rc = msg_->move (prefetched_id); + errno_assert (rc == 0); + routing_id_sent = true; + } else { + int rc = msg_->move (prefetched_msg); + errno_assert (rc == 0); + prefetched = false; + } + more_in = (msg_->flags () & msg_t::more) != 0; + + if (!more_in) { + if (terminate_current_in) { + current_in->terminate (true); + terminate_current_in = false; + } + current_in = NULL; + } + return 0; + } + + pipe_t *pipe = NULL; + int rc = fq.recvpipe (msg_, &pipe); + + // It's possible that we receive peer's routing id. That happens + // after reconnection. The current implementation assumes that + // the peer always uses the same routing id. + while (rc == 0 && msg_->is_routing_id ()) + rc = fq.recvpipe (msg_, &pipe); + + if (rc != 0) + return -1; + + zmq_assert (pipe != NULL); + + // If we are in the middle of reading a message, just return the next part. + if (more_in) { + more_in = (msg_->flags () & msg_t::more) != 0; + + if (!more_in) { + if (terminate_current_in) { + current_in->terminate (true); + terminate_current_in = false; + } + current_in = NULL; + } + } else { + // We are at the beginning of a message. + // Keep the message part we have in the prefetch buffer + // and return the ID of the peer instead. + rc = prefetched_msg.move (*msg_); + errno_assert (rc == 0); + prefetched = true; + current_in = pipe; + + const blob_t &routing_id = pipe->get_routing_id (); + rc = msg_->init_size (routing_id.size ()); + errno_assert (rc == 0); + memcpy (msg_->data (), routing_id.data (), routing_id.size ()); + msg_->set_flags (msg_t::more); + if (prefetched_msg.metadata ()) + msg_->set_metadata (prefetched_msg.metadata ()); + routing_id_sent = true; + } + + return 0; +} + +int zmq::router_t::rollback () +{ + if (current_out) { + current_out->rollback (); + current_out = NULL; + more_out = false; + } + return 0; +} + +bool zmq::router_t::xhas_in () +{ + // If we are in the middle of reading the messages, there are + // definitely more parts available. + if (more_in) + return true; + + // We may already have a message pre-fetched. + if (prefetched) + return true; + + // Try to read the next message. + // The message, if read, is kept in the pre-fetch buffer. + pipe_t *pipe = NULL; + int rc = fq.recvpipe (&prefetched_msg, &pipe); + + // It's possible that we receive peer's routing id. That happens + // after reconnection. The current implementation assumes that + // the peer always uses the same routing id. + // TODO: handle the situation when the peer changes its routing id. + while (rc == 0 && prefetched_msg.is_routing_id ()) + rc = fq.recvpipe (&prefetched_msg, &pipe); + + if (rc != 0) + return false; + + zmq_assert (pipe != NULL); + + const blob_t &routing_id = pipe->get_routing_id (); + rc = prefetched_id.init_size (routing_id.size ()); + errno_assert (rc == 0); + memcpy (prefetched_id.data (), routing_id.data (), routing_id.size ()); + prefetched_id.set_flags (msg_t::more); + + prefetched = true; + routing_id_sent = false; + current_in = pipe; + + return true; +} + +bool zmq::router_t::xhas_out () +{ + // In theory, ROUTER socket is always ready for writing (except when + // MANDATORY is set). Whether actual attempt to write succeeds depends + // on whitch pipe the message is going to be routed to. + + if (!mandatory) + return true; + + bool has_out = false; + outpipes_t::iterator it; + for (it = outpipes.begin (); it != outpipes.end (); ++it) + has_out |= it->second.pipe->check_hwm (); + + return has_out; +} + +const zmq::blob_t &zmq::router_t::get_credential () const +{ + return fq.get_credential (); +} + +int zmq::router_t::get_peer_state (const void *routing_id_, + size_t routing_id_size_) const +{ + int res = 0; + + blob_t routing_id_blob ((unsigned char *) routing_id_, routing_id_size_); + outpipes_t::const_iterator it = outpipes.find (routing_id_blob); + if (it == outpipes.end ()) { + errno = EHOSTUNREACH; + return -1; + } + + const outpipe_t &outpipe = it->second; + if (outpipe.pipe->check_hwm ()) + res |= ZMQ_POLLOUT; + + /** \todo does it make any sense to check the inpipe as well? */ + + return res; +} + +bool zmq::router_t::identify_peer (pipe_t *pipe_) +{ + msg_t msg; + bool ok; + blob_t routing_id; + + if (connect_routing_id.length ()) { + routing_id.set ((unsigned char *) connect_routing_id.c_str (), + connect_routing_id.length ()); + connect_routing_id.clear (); + outpipes_t::iterator it = outpipes.find (routing_id); + if (it != outpipes.end ()) + zmq_assert (false); // Not allowed to duplicate an existing rid + } else if ( + options + .raw_socket) { // Always assign an integral routing id for raw-socket + unsigned char buf[5]; + buf[0] = 0; + put_uint32 (buf + 1, next_integral_routing_id++); + routing_id.set (buf, sizeof buf); + } else if (!options.raw_socket) { + // Pick up handshake cases and also case where next integral routing id is set + msg.init (); + ok = pipe_->read (&msg); + if (!ok) + return false; + + if (msg.size () == 0) { + // Fall back on the auto-generation + unsigned char buf[5]; + buf[0] = 0; + put_uint32 (buf + 1, next_integral_routing_id++); + routing_id.set (buf, sizeof buf); + msg.close (); + } else { + routing_id.set (static_cast (msg.data ()), + msg.size ()); + outpipes_t::iterator it = outpipes.find (routing_id); + msg.close (); + + if (it != outpipes.end ()) { + if (!handover) + // Ignore peers with duplicate ID + return false; + + // We will allow the new connection to take over this + // routing id. Temporarily assign a new routing id to the + // existing pipe so we can terminate it asynchronously. + unsigned char buf[5]; + buf[0] = 0; + put_uint32 (buf + 1, next_integral_routing_id++); + blob_t new_routing_id (buf, sizeof buf); + + it->second.pipe->set_router_socket_routing_id (new_routing_id); + outpipe_t existing_outpipe = {it->second.pipe, + it->second.active}; + + ok = outpipes + .ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (new_routing_id), + existing_outpipe) + .second; + zmq_assert (ok); + + // Remove the existing routing id entry to allow the new + // connection to take the routing id. + outpipes.erase (it); + + if (existing_outpipe.pipe == current_in) + terminate_current_in = true; + else + existing_outpipe.pipe->terminate (true); + } + } + } + + pipe_->set_router_socket_routing_id (routing_id); + // Add the record into output pipes lookup table + outpipe_t outpipe = {pipe_, true}; + ok = outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe) + .second; + zmq_assert (ok); + + return true; +} diff --git a/src/router.hpp b/src/router.hpp index f9301773..56157d24 100644 --- a/src/router.hpp +++ b/src/router.hpp @@ -73,65 +73,65 @@ class router_t : public socket_base_t bool identify_peer (pipe_t *pipe_); // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; // True iff there is a message held in the pre-fetch buffer. - bool prefetched; + bool _prefetched; // If true, the receiver got the message part with // the peer's identity. - bool routing_id_sent; + bool _routing_id_sent; // Holds the prefetched identity. - msg_t prefetched_id; + msg_t _prefetched_id; // Holds the prefetched message. - msg_t prefetched_msg; + msg_t _prefetched_msg; // The pipe we are currently reading from - zmq::pipe_t *current_in; + zmq::pipe_t *_current_in; // Should current_in should be terminate after all parts received? - bool terminate_current_in; + bool _terminate_current_in; // If true, more incoming message parts are expected. - bool more_in; + bool _more_in; - struct outpipe_t + struct out_pipe_t { zmq::pipe_t *pipe; bool active; }; // We keep a set of pipes that have not been identified yet. - std::set anonymous_pipes; + std::set _anonymous_pipes; // Outbound pipes indexed by the peer IDs. - typedef std::map outpipes_t; - outpipes_t outpipes; + typedef std::map outpipes_t; + outpipes_t _out_pipes; // The pipe we are currently writing to. - zmq::pipe_t *current_out; + zmq::pipe_t *_current_out; // If true, more outgoing message parts are expected. - bool more_out; + bool _more_out; // Routing IDs are generated. It's a simple increment and wrap-over // algorithm. This value is the next ID to use (if not used already). - uint32_t next_integral_routing_id; + uint32_t _next_integral_routing_id; // If true, report EAGAIN to the caller instead of silently dropping // the message targeting an unknown peer. - bool mandatory; - bool raw_socket; + bool _mandatory; + bool _raw_socket; // if true, send an empty message to every connected router peer - bool probe_router; + bool _probe_router; // If true, the router will reassign an identity upon encountering a // name collision. The new pipe will take the identity, the old pipe // will be terminated. - bool handover; + bool _handover; router_t (const router_t &); const router_t &operator= (const router_t &); diff --git a/src/scatter.cpp b/src/scatter.cpp index dde3a799..cb9507a5 100644 --- a/src/scatter.cpp +++ b/src/scatter.cpp @@ -53,17 +53,17 @@ void zmq::scatter_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) pipe_->set_nodelay (); zmq_assert (pipe_); - lb.attach (pipe_); + _lb.attach (pipe_); } void zmq::scatter_t::xwrite_activated (pipe_t *pipe_) { - lb.activated (pipe_); + _lb.activated (pipe_); } void zmq::scatter_t::xpipe_terminated (pipe_t *pipe_) { - lb.pipe_terminated (pipe_); + _lb.pipe_terminated (pipe_); } int zmq::scatter_t::xsend (msg_t *msg_) @@ -74,10 +74,10 @@ int zmq::scatter_t::xsend (msg_t *msg_) return -1; } - return lb.send (msg_); + return _lb.send (msg_); } bool zmq::scatter_t::xhas_out () { - return lb.has_out (); + return _lb.has_out (); } diff --git a/src/scatter.hpp b/src/scatter.hpp index 22279205..10b57ed0 100644 --- a/src/scatter.hpp +++ b/src/scatter.hpp @@ -57,7 +57,7 @@ class scatter_t : public socket_base_t private: // Load balancer managing the outbound pipes. - lb_t lb; + lb_t _lb; scatter_t (const scatter_t &); const scatter_t &operator= (const scatter_t &); diff --git a/src/select.cpp b/src/select.cpp index 156bf76c..7b4ce815 100644 --- a/src/select.cpp +++ b/src/select.cpp @@ -59,14 +59,14 @@ zmq::select_t::select_t (const zmq::thread_ctx_t &ctx_) : worker_poller_base_t (ctx_), #if defined ZMQ_HAVE_WINDOWS // Fine as long as map is not cleared. - current_family_entry_it (family_entries.end ()) + _current_family_entry_it (_family_entries.end ()) #else - maxfd (retired_fd) + _max_fd (retired_fd) #endif { #if defined ZMQ_HAVE_WINDOWS for (size_t i = 0; i < fd_family_cache_size; ++i) - fd_family_cache[i] = std::make_pair (retired_fd, 0); + _fd_family_cache[i] = std::make_pair (retired_fd, 0); #endif } @@ -87,14 +87,16 @@ zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_) #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (fd_); wsa_assert (family != AF_UNSPEC); - family_entry_t &family_entry = family_entries[family]; + family_entry_t &family_entry = _family_entries[family]; +#else + family_entry_t &family_entry = _family_entry; #endif family_entry.fd_entries.push_back (fd_entry); FD_SET (fd_, &family_entry.fds_set.error); #if !defined ZMQ_HAVE_WINDOWS - if (fd_ > maxfd) - maxfd = fd_; + if (fd_ > _max_fd) + _max_fd = fd_; #endif adjust_load (1); @@ -171,7 +173,7 @@ int zmq::select_t::try_retire_fd_entry ( fd_entry_t &fd_entry = *fd_entry_it; zmq_assert (fd_entry.fd != retired_fd); - if (family_entry_it_ != current_family_entry_it) { + if (family_entry_it_ != _current_family_entry_it) { // Family is not currently being iterated and can be safely // modified in-place. So later it can be skipped without // re-verifying its content. @@ -195,16 +197,16 @@ void zmq::select_t::rm_fd (handle_t handle_) u_short family = get_fd_family (handle_); if (family != AF_UNSPEC) { family_entries_t::iterator family_entry_it = - family_entries.find (family); + _family_entries.find (family); retired += try_retire_fd_entry (family_entry_it, handle_); } else { // get_fd_family may fail and return AF_UNSPEC if the socket was not // successfully connected. In that case, we need to look for the // socket in all family_entries. - family_entries_t::iterator end = family_entries.end (); + family_entries_t::iterator end = _family_entries.end (); for (family_entries_t::iterator family_entry_it = - family_entries.begin (); + _family_entries.begin (); family_entry_it != end; ++family_entry_it) { if (retired += try_retire_fd_entry (family_entry_it, handle_)) { break; @@ -213,24 +215,24 @@ void zmq::select_t::rm_fd (handle_t handle_) } #else fd_entries_t::iterator fd_entry_it = - find_fd_entry_by_handle (family_entry.fd_entries, handle_); - assert (fd_entry_it != family_entry.fd_entries.end ()); + find_fd_entry_by_handle (_family_entry.fd_entries, handle_); + assert (fd_entry_it != _family_entry.fd_entries.end ()); zmq_assert (fd_entry_it->fd != retired_fd); fd_entry_it->fd = retired_fd; - family_entry.fds_set.remove_fd (handle_); + _family_entry.fds_set.remove_fd (handle_); ++retired; - if (handle_ == maxfd) { - maxfd = retired_fd; - for (fd_entry_it = family_entry.fd_entries.begin (); - fd_entry_it != family_entry.fd_entries.end (); ++fd_entry_it) - if (fd_entry_it->fd > maxfd) - maxfd = fd_entry_it->fd; + if (handle_ == _max_fd) { + _max_fd = retired_fd; + for (fd_entry_it = _family_entry.fd_entries.begin (); + fd_entry_it != _family_entry.fd_entries.end (); ++fd_entry_it) + if (fd_entry_it->fd > _max_fd) + _max_fd = fd_entry_it->fd; } - family_entry.has_retired = true; + _family_entry.has_retired = true; #endif zmq_assert (retired == 1); adjust_load (-1); @@ -242,7 +244,9 @@ void zmq::select_t::set_pollin (handle_t handle_) #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (handle_); wsa_assert (family != AF_UNSPEC); - family_entry_t &family_entry = family_entries[family]; + family_entry_t &family_entry = _family_entries[family]; +#else + family_entry_t &family_entry = _family_entry; #endif FD_SET (handle_, &family_entry.fds_set.read); } @@ -253,7 +257,9 @@ void zmq::select_t::reset_pollin (handle_t handle_) #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (handle_); wsa_assert (family != AF_UNSPEC); - family_entry_t &family_entry = family_entries[family]; + family_entry_t &family_entry = _family_entries[family]; +#else + family_entry_t &family_entry = _family_entry; #endif FD_CLR (handle_, &family_entry.fds_set.read); } @@ -264,7 +270,9 @@ void zmq::select_t::set_pollout (handle_t handle_) #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (handle_); wsa_assert (family != AF_UNSPEC); - family_entry_t &family_entry = family_entries[family]; + family_entry_t &family_entry = _family_entries[family]; +#else + family_entry_t &family_entry = _family_entry; #endif FD_SET (handle_, &family_entry.fds_set.write); } @@ -275,7 +283,9 @@ void zmq::select_t::reset_pollout (handle_t handle_) #if defined ZMQ_HAVE_WINDOWS u_short family = get_fd_family (handle_); wsa_assert (family != AF_UNSPEC); - family_entry_t &family_entry = family_entries[family]; + family_entry_t &family_entry = _family_entries[family]; +#else + family_entry_t &family_entry = _family_entry; #endif FD_CLR (handle_, &family_entry.fds_set.write); } @@ -300,9 +310,9 @@ void zmq::select_t::loop () cleanup_retired (); #ifdef _WIN32 - if (family_entries.empty ()) { + if (_family_entries.empty ()) { #else - if (family_entry.fd_entries.empty ()) { + if (_family_entry.fd_entries.empty ()) { #endif zmq_assert (get_load () == 0); @@ -338,7 +348,7 @@ void zmq::select_t::loop () // If there is just one family, there is no reason to use WSA events. int rc = 0; - const bool use_wsa_events = family_entries.size () > 1; + const bool use_wsa_events = _family_entries.size () > 1; if (use_wsa_events) { // TODO: I don't really understand why we are doing this. If any of // the events was signaled, we will call select for each fd_family @@ -350,8 +360,8 @@ void zmq::select_t::loop () wsa_events_t wsa_events; for (family_entries_t::iterator family_entry_it = - family_entries.begin (); - family_entry_it != family_entries.end (); ++family_entry_it) { + _family_entries.begin (); + family_entry_it != _family_entries.end (); ++family_entry_it) { family_entry_t &family_entry = family_entry_it->second; for (fd_entries_t::iterator fd_entry_it = @@ -392,10 +402,10 @@ void zmq::select_t::loop () continue; } - for (current_family_entry_it = family_entries.begin (); - current_family_entry_it != family_entries.end (); - ++current_family_entry_it) { - family_entry_t &family_entry = current_family_entry_it->second; + for (_current_family_entry_it = _family_entries.begin (); + _current_family_entry_it != _family_entries.end (); + ++_current_family_entry_it) { + family_entry_t &family_entry = _current_family_entry_it->second; if (use_wsa_events) { @@ -408,7 +418,7 @@ void zmq::select_t::loop () } } #else - select_family_entry (family_entry, maxfd + 1, timeout > 0, tv); + select_family_entry (_family_entry, _max_fd + 1, timeout > 0, tv); #endif } } @@ -518,15 +528,15 @@ bool zmq::select_t::cleanup_retired (family_entry_t &family_entry_) void zmq::select_t::cleanup_retired () { #ifdef _WIN32 - for (family_entries_t::iterator it = family_entries.begin (); - it != family_entries.end ();) { + for (family_entries_t::iterator it = _family_entries.begin (); + it != _family_entries.end ();) { if (cleanup_retired (it->second)) - it = family_entries.erase (it); + it = _family_entries.erase (it); else ++it; } #else - cleanup_retired (family_entry); + cleanup_retired (_family_entry); #endif } @@ -547,7 +557,7 @@ u_short zmq::select_t::get_fd_family (fd_t fd_) // for the same sockets, and determine_fd_family is expensive size_t i; for (i = 0; i < fd_family_cache_size; ++i) { - const std::pair &entry = fd_family_cache[i]; + const std::pair &entry = _fd_family_cache[i]; if (entry.first == fd_) { return entry.second; } @@ -558,11 +568,11 @@ u_short zmq::select_t::get_fd_family (fd_t fd_) std::pair res = std::make_pair (fd_, determine_fd_family (fd_)); if (i < fd_family_cache_size) { - fd_family_cache[i] = res; + _fd_family_cache[i] = res; } else { // just overwrite a random entry // could be optimized by some LRU strategy - fd_family_cache[rand () % fd_family_cache_size] = res; + _fd_family_cache[rand () % fd_family_cache_size] = res; } return res.second; diff --git a/src/select.hpp b/src/select.hpp index 76130244..017d9fd5 100644 --- a/src/select.hpp +++ b/src/select.hpp @@ -131,15 +131,15 @@ class select_t : public worker_poller_base_t WSAEVENT events[4]; }; - family_entries_t family_entries; + family_entries_t _family_entries; // See loop for details. - family_entries_t::iterator current_family_entry_it; + family_entries_t::iterator _current_family_entry_it; int try_retire_fd_entry (family_entries_t::iterator family_entry_it_, zmq::fd_t &handle_); static const size_t fd_family_cache_size = 8; - std::pair fd_family_cache[fd_family_cache_size]; + std::pair _fd_family_cache[fd_family_cache_size]; u_short get_fd_family (fd_t fd_); @@ -147,8 +147,8 @@ class select_t : public worker_poller_base_t static u_short determine_fd_family (fd_t fd_); #else // on non-Windows, we can treat all fds as one family - family_entry_t family_entry; - fd_t maxfd; + family_entry_t _family_entry; + fd_t _max_fd; #endif void cleanup_retired (); diff --git a/src/server.cpp b/src/server.cpp index 319edd30..9da4b478 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -38,14 +38,14 @@ zmq::server_t::server_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_, true), - next_routing_id (generate_random ()) + _next_routing_id (generate_random ()) { options.type = ZMQ_SERVER; } zmq::server_t::~server_t () { - zmq_assert (outpipes.empty ()); + zmq_assert (_out_pipes.empty ()); } void zmq::server_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) @@ -54,41 +54,41 @@ void zmq::server_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) zmq_assert (pipe_); - uint32_t routing_id = next_routing_id++; + uint32_t routing_id = _next_routing_id++; if (!routing_id) - routing_id = next_routing_id++; // Never use Routing ID zero + routing_id = _next_routing_id++; // Never use Routing ID zero pipe_->set_server_socket_routing_id (routing_id); // Add the record into output pipes lookup table outpipe_t outpipe = {pipe_, true}; - bool ok = outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (routing_id, outpipe).second; + bool ok = _out_pipes.ZMQ_MAP_INSERT_OR_EMPLACE (routing_id, outpipe).second; zmq_assert (ok); - fq.attach (pipe_); + _fq.attach (pipe_); } void zmq::server_t::xpipe_terminated (pipe_t *pipe_) { - outpipes_t::iterator it = - outpipes.find (pipe_->get_server_socket_routing_id ()); - zmq_assert (it != outpipes.end ()); - outpipes.erase (it); - fq.pipe_terminated (pipe_); + out_pipes_t::iterator it = + _out_pipes.find (pipe_->get_server_socket_routing_id ()); + zmq_assert (it != _out_pipes.end ()); + _out_pipes.erase (it); + _fq.pipe_terminated (pipe_); } void zmq::server_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::server_t::xwrite_activated (pipe_t *pipe_) { - outpipes_t::iterator it; - for (it = outpipes.begin (); it != outpipes.end (); ++it) + out_pipes_t::iterator it; + for (it = _out_pipes.begin (); it != _out_pipes.end (); ++it) if (it->second.pipe == pipe_) break; - zmq_assert (it != outpipes.end ()); + zmq_assert (it != _out_pipes.end ()); zmq_assert (!it->second.active); it->second.active = true; } @@ -102,9 +102,9 @@ int zmq::server_t::xsend (msg_t *msg_) } // Find the pipe associated with the routing stored in the message. uint32_t routing_id = msg_->get_routing_id (); - outpipes_t::iterator it = outpipes.find (routing_id); + out_pipes_t::iterator it = _out_pipes.find (routing_id); - if (it != outpipes.end ()) { + if (it != _out_pipes.end ()) { if (!it->second.pipe->check_write ()) { it->second.active = false; errno = EAGAIN; @@ -137,19 +137,19 @@ int zmq::server_t::xsend (msg_t *msg_) int zmq::server_t::xrecv (msg_t *msg_) { pipe_t *pipe = NULL; - int rc = fq.recvpipe (msg_, &pipe); + int rc = _fq.recvpipe (msg_, &pipe); // Drop any messages with more flag while (rc == 0 && msg_->flags () & msg_t::more) { // drop all frames of the current multi-frame message - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); while (rc == 0 && msg_->flags () & msg_t::more) - rc = fq.recvpipe (msg_, NULL); + rc = _fq.recvpipe (msg_, NULL); // get the new message if (rc == 0) - rc = fq.recvpipe (msg_, &pipe); + rc = _fq.recvpipe (msg_, &pipe); } if (rc != 0) @@ -165,7 +165,7 @@ int zmq::server_t::xrecv (msg_t *msg_) bool zmq::server_t::xhas_in () { - return fq.has_in (); + return _fq.has_in (); } bool zmq::server_t::xhas_out () @@ -178,5 +178,5 @@ bool zmq::server_t::xhas_out () const zmq::blob_t &zmq::server_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } diff --git a/src/server.hpp b/src/server.hpp index 3e344c4f..f77677ed 100644 --- a/src/server.hpp +++ b/src/server.hpp @@ -66,7 +66,7 @@ class server_t : public socket_base_t private: // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; struct outpipe_t { @@ -75,12 +75,12 @@ class server_t : public socket_base_t }; // Outbound pipes indexed by the peer IDs. - typedef std::map outpipes_t; - outpipes_t outpipes; + typedef std::map out_pipes_t; + out_pipes_t _out_pipes; // Routing IDs are generated. It's a simple increment and wrap-over // algorithm. This value is the next ID to use (if not used already). - uint32_t next_routing_id; + uint32_t _next_routing_id; server_t (const server_t &); const server_t &operator= (const server_t &); diff --git a/src/session_base.cpp b/src/session_base.cpp index ecf3aa2c..8137dcb7 100644 --- a/src/session_base.cpp +++ b/src/session_base.cpp @@ -104,59 +104,59 @@ zmq::session_base_t::session_base_t (class io_thread_t *io_thread_, address_t *addr_) : own_t (io_thread_, options_), io_object_t (io_thread_), - active (active_), - pipe (NULL), - zap_pipe (NULL), - incomplete_in (false), - pending (false), - engine (NULL), - socket (socket_), - io_thread (io_thread_), - has_linger_timer (false), - addr (addr_) + _active (active_), + _pipe (NULL), + _zap_pipe (NULL), + _incomplete_in (false), + _pending (false), + _engine (NULL), + _socket (socket_), + _io_thread (io_thread_), + _has_linger_timer (false), + _addr (addr_) { } const char *zmq::session_base_t::get_endpoint () const { - return engine->get_endpoint (); + return _engine->get_endpoint (); } zmq::session_base_t::~session_base_t () { - zmq_assert (!pipe); - zmq_assert (!zap_pipe); + zmq_assert (!_pipe); + zmq_assert (!_zap_pipe); // If there's still a pending linger timer, remove it. - if (has_linger_timer) { + if (_has_linger_timer) { cancel_timer (linger_timer_id); - has_linger_timer = false; + _has_linger_timer = false; } // Close the engine. - if (engine) - engine->terminate (); + if (_engine) + _engine->terminate (); - LIBZMQ_DELETE (addr); + LIBZMQ_DELETE (_addr); } void zmq::session_base_t::attach_pipe (pipe_t *pipe_) { zmq_assert (!is_terminating ()); - zmq_assert (!pipe); + zmq_assert (!_pipe); zmq_assert (pipe_); - pipe = pipe_; - pipe->set_event_sink (this); + _pipe = pipe_; + _pipe->set_event_sink (this); } int zmq::session_base_t::pull_msg (msg_t *msg_) { - if (!pipe || !pipe->read (msg_)) { + if (!_pipe || !_pipe->read (msg_)) { errno = EAGAIN; return -1; } - incomplete_in = (msg_->flags () & msg_t::more) != 0; + _incomplete_in = (msg_->flags () & msg_t::more) != 0; return 0; } @@ -165,7 +165,7 @@ int zmq::session_base_t::push_msg (msg_t *msg_) { if (msg_->flags () & msg_t::command) return 0; - if (pipe && pipe->write (msg_)) { + if (_pipe && _pipe->write (msg_)) { int rc = msg_->init (); errno_assert (rc == 0); return 0; @@ -177,12 +177,12 @@ int zmq::session_base_t::push_msg (msg_t *msg_) int zmq::session_base_t::read_zap_msg (msg_t *msg_) { - if (zap_pipe == NULL) { + if (_zap_pipe == NULL) { errno = ENOTCONN; return -1; } - if (!zap_pipe->read (msg_)) { + if (!_zap_pipe->read (msg_)) { errno = EAGAIN; return -1; } @@ -192,13 +192,13 @@ int zmq::session_base_t::read_zap_msg (msg_t *msg_) int zmq::session_base_t::write_zap_msg (msg_t *msg_) { - if (zap_pipe == NULL || !zap_pipe->write (msg_)) { + if (_zap_pipe == NULL || !_zap_pipe->write (msg_)) { errno = ENOTCONN; return -1; } if ((msg_->flags () & msg_t::more) == 0) - zap_pipe->flush (); + _zap_pipe->flush (); const int rc = msg_->init (); errno_assert (rc == 0); @@ -211,21 +211,21 @@ void zmq::session_base_t::reset () void zmq::session_base_t::flush () { - if (pipe) - pipe->flush (); + if (_pipe) + _pipe->flush (); } void zmq::session_base_t::clean_pipes () { - zmq_assert (pipe != NULL); + zmq_assert (_pipe != NULL); // Get rid of half-processed messages in the out pipe. Flush any // unflushed messages upstream. - pipe->rollback (); - pipe->flush (); + _pipe->rollback (); + _pipe->flush (); // Remove any half-read message from the in pipe. - while (incomplete_in) { + while (_incomplete_in) { msg_t msg; int rc = msg.init (); errno_assert (rc == 0); @@ -239,26 +239,26 @@ void zmq::session_base_t::clean_pipes () void zmq::session_base_t::pipe_terminated (pipe_t *pipe_) { // Drop the reference to the deallocated pipe if required. - zmq_assert (pipe_ == pipe || pipe_ == zap_pipe - || terminating_pipes.count (pipe_) == 1); + zmq_assert (pipe_ == _pipe || pipe_ == _zap_pipe + || _terminating_pipes.count (pipe_) == 1); - if (pipe_ == pipe) { + if (pipe_ == _pipe) { // If this is our current pipe, remove it - pipe = NULL; - if (has_linger_timer) { + _pipe = NULL; + if (_has_linger_timer) { cancel_timer (linger_timer_id); - has_linger_timer = false; + _has_linger_timer = false; } - } else if (pipe_ == zap_pipe) - zap_pipe = NULL; + } else if (pipe_ == _zap_pipe) + _zap_pipe = NULL; else // Remove the pipe from the detached pipes set - terminating_pipes.erase (pipe_); + _terminating_pipes.erase (pipe_); if (!is_terminating () && options.raw_socket) { - if (engine) { - engine->terminate (); - engine = NULL; + if (_engine) { + _engine->terminate (); + _engine = NULL; } terminate (); } @@ -266,8 +266,8 @@ void zmq::session_base_t::pipe_terminated (pipe_t *pipe_) // If we are waiting for pending messages to be sent, at this point // we are sure that there will be no more messages and we can proceed // with termination safely. - if (pending && !pipe && !zap_pipe && terminating_pipes.empty ()) { - pending = false; + if (_pending && !_pipe && !_zap_pipe && _terminating_pipes.empty ()) { + _pending = false; own_t::process_term (0); } } @@ -275,34 +275,34 @@ void zmq::session_base_t::pipe_terminated (pipe_t *pipe_) void zmq::session_base_t::read_activated (pipe_t *pipe_) { // Skip activating if we're detaching this pipe - if (unlikely (pipe_ != pipe && pipe_ != zap_pipe)) { - zmq_assert (terminating_pipes.count (pipe_) == 1); + if (unlikely (pipe_ != _pipe && pipe_ != _zap_pipe)) { + zmq_assert (_terminating_pipes.count (pipe_) == 1); return; } - if (unlikely (engine == NULL)) { - pipe->check_read (); + if (unlikely (_engine == NULL)) { + _pipe->check_read (); return; } - if (likely (pipe_ == pipe)) - engine->restart_output (); + if (likely (pipe_ == _pipe)) + _engine->restart_output (); else { // i.e. pipe_ == zap_pipe - engine->zap_msg_available (); + _engine->zap_msg_available (); } } void zmq::session_base_t::write_activated (pipe_t *pipe_) { // Skip activating if we're detaching this pipe - if (pipe != pipe_) { - zmq_assert (terminating_pipes.count (pipe_) == 1); + if (_pipe != pipe_) { + zmq_assert (_terminating_pipes.count (pipe_) == 1); return; } - if (engine) - engine->restart_input (); + if (_engine) + _engine->restart_input (); } void zmq::session_base_t::hiccuped (pipe_t *) @@ -314,12 +314,12 @@ void zmq::session_base_t::hiccuped (pipe_t *) zmq::socket_base_t *zmq::session_base_t::get_socket () { - return socket; + return _socket; } void zmq::session_base_t::process_plug () { - if (active) + if (_active) start_connecting (false); } @@ -331,7 +331,7 @@ void zmq::session_base_t::process_plug () // security flaw. int zmq::session_base_t::zap_connect () { - if (zap_pipe != NULL) + if (_zap_pipe != NULL) return 0; endpoint_t peer = find_endpoint ("inproc://zeromq.zap.01"); @@ -352,9 +352,9 @@ int zmq::session_base_t::zap_connect () errno_assert (rc == 0); // Attach local end of the pipe to this socket object. - zap_pipe = new_pipes[0]; - zap_pipe->set_nodelay (); - zap_pipe->set_event_sink (this); + _zap_pipe = new_pipes[0]; + _zap_pipe->set_nodelay (); + _zap_pipe->set_event_sink (this); send_bind (peer.socket, new_pipes[1], false); @@ -364,9 +364,9 @@ int zmq::session_base_t::zap_connect () rc = id.init (); errno_assert (rc == 0); id.set_flags (msg_t::routing_id); - bool ok = zap_pipe->write (&id); + bool ok = _zap_pipe->write (&id); zmq_assert (ok); - zap_pipe->flush (); + _zap_pipe->flush (); } return 0; @@ -382,8 +382,8 @@ void zmq::session_base_t::process_attach (i_engine *engine_) zmq_assert (engine_ != NULL); // Create the pipe if it does not exist yet. - if (!pipe && !is_terminating ()) { - object_t *parents[2] = {this, socket}; + if (!_pipe && !is_terminating ()) { + object_t *parents[2] = {this, _socket}; pipe_t *pipes[2] = {NULL, NULL}; bool conflate = @@ -402,27 +402,27 @@ void zmq::session_base_t::process_attach (i_engine *engine_) pipes[0]->set_event_sink (this); // Remember the local end of the pipe. - zmq_assert (!pipe); - pipe = pipes[0]; + zmq_assert (!_pipe); + _pipe = pipes[0]; // Ask socket to plug into the remote end of the pipe. - send_bind (socket, pipes[1]); + send_bind (_socket, pipes[1]); } // Plug in the engine. - zmq_assert (!engine); - engine = engine_; - engine->plug (io_thread, this); + zmq_assert (!_engine); + _engine = engine_; + _engine->plug (_io_thread, this); } void zmq::session_base_t::engine_error ( zmq::stream_engine_t::error_reason_t reason_) { // Engine is dead. Let's forget about it. - engine = NULL; + _engine = NULL; // Remove any half-done messages from the pipes. - if (pipe) + if (_pipe) clean_pipes (); zmq_assert (reason_ == stream_engine_t::connection_error @@ -433,17 +433,17 @@ void zmq::session_base_t::engine_error ( case stream_engine_t::timeout_error: /* FALLTHROUGH */ case stream_engine_t::connection_error: - if (active) { + if (_active) { reconnect (); break; } /* FALLTHROUGH */ case stream_engine_t::protocol_error: - if (pending) { - if (pipe) - pipe->terminate (false); - if (zap_pipe) - zap_pipe->terminate (false); + if (_pending) { + if (_pipe) + _pipe->terminate (false); + if (_zap_pipe) + _zap_pipe->terminate (false); } else { terminate (); } @@ -451,50 +451,50 @@ void zmq::session_base_t::engine_error ( } // Just in case there's only a delimiter in the pipe. - if (pipe) - pipe->check_read (); + if (_pipe) + _pipe->check_read (); - if (zap_pipe) - zap_pipe->check_read (); + if (_zap_pipe) + _zap_pipe->check_read (); } void zmq::session_base_t::process_term (int linger_) { - zmq_assert (!pending); + zmq_assert (!_pending); // If the termination of the pipe happens before the term command is // delivered there's nothing much to do. We can proceed with the // standard termination immediately. - if (!pipe && !zap_pipe && terminating_pipes.empty ()) { + if (!_pipe && !_zap_pipe && _terminating_pipes.empty ()) { own_t::process_term (0); return; } - pending = true; + _pending = true; - if (pipe != NULL) { + if (_pipe != NULL) { // If there's finite linger value, delay the termination. // If linger is infinite (negative) we don't even have to set // the timer. if (linger_ > 0) { - zmq_assert (!has_linger_timer); + zmq_assert (!_has_linger_timer); add_timer (linger_, linger_timer_id); - has_linger_timer = true; + _has_linger_timer = true; } // Start pipe termination process. Delay the termination till all messages // are processed in case the linger time is non-zero. - pipe->terminate (linger_ != 0); + _pipe->terminate (linger_ != 0); // TODO: Should this go into pipe_t::terminate ? // In case there's no engine and there's only delimiter in the // pipe it wouldn't be ever read. Thus we check for it explicitly. - if (!engine) - pipe->check_read (); + if (!_engine) + _pipe->check_read (); } - if (zap_pipe != NULL) - zap_pipe->terminate (false); + if (_zap_pipe != NULL) + _zap_pipe->terminate (false); } void zmq::session_base_t::timer_event (int id_) @@ -502,28 +502,28 @@ void zmq::session_base_t::timer_event (int id_) // Linger period expired. We can proceed with termination even though // there are still pending messages to be sent. zmq_assert (id_ == linger_timer_id); - has_linger_timer = false; + _has_linger_timer = false; // Ask pipe to terminate even though there may be pending messages in it. - zmq_assert (pipe); - pipe->terminate (false); + zmq_assert (_pipe); + _pipe->terminate (false); } void zmq::session_base_t::reconnect () { // For delayed connect situations, terminate the pipe // and reestablish later on - if (pipe && options.immediate == 1 && addr->protocol != "pgm" - && addr->protocol != "epgm" && addr->protocol != "norm" - && addr->protocol != "udp") { - pipe->hiccup (); - pipe->terminate (false); - terminating_pipes.insert (pipe); - pipe = NULL; + if (_pipe && options.immediate == 1 && _addr->protocol != "pgm" + && _addr->protocol != "epgm" && _addr->protocol != "norm" + && _addr->protocol != "udp") { + _pipe->hiccup (); + _pipe->terminate (false); + _terminating_pipes.insert (_pipe); + _pipe = NULL; - if (has_linger_timer) { + if (_has_linger_timer) { cancel_timer (linger_timer_id); - has_linger_timer = false; + _has_linger_timer = false; } } @@ -534,21 +534,21 @@ void zmq::session_base_t::reconnect () start_connecting (true); else { std::string *ep = new (std::string); - addr->to_string (*ep); - send_term_endpoint (socket, ep); + _addr->to_string (*ep); + send_term_endpoint (_socket, ep); } // For subscriber sockets we hiccup the inbound pipe, which will cause // the socket object to resend all the subscriptions. - if (pipe + if (_pipe && (options.type == ZMQ_SUB || options.type == ZMQ_XSUB || options.type == ZMQ_DISH)) - pipe->hiccup (); + _pipe->hiccup (); } void zmq::session_base_t::start_connecting (bool wait_) { - zmq_assert (active); + zmq_assert (_active); // Choose I/O thread to run connecter in. Given that we are already // running in an I/O thread, there must be at least one available. @@ -557,19 +557,19 @@ void zmq::session_base_t::start_connecting (bool wait_) // Create the connecter object. - if (addr->protocol == "tcp") { + if (_addr->protocol == "tcp") { if (!options.socks_proxy_address.empty ()) { address_t *proxy_address = new (std::nothrow) address_t ("tcp", options.socks_proxy_address, this->get_ctx ()); alloc_assert (proxy_address); - socks_connecter_t *connecter = - new (std::nothrow) socks_connecter_t (io_thread, this, options, - addr, proxy_address, wait_); + socks_connecter_t *connecter = new (std::nothrow) + socks_connecter_t (io_thread, this, options, _addr, proxy_address, + wait_); alloc_assert (connecter); launch_child (connecter); } else { tcp_connecter_t *connecter = new (std::nothrow) - tcp_connecter_t (io_thread, this, options, addr, wait_); + tcp_connecter_t (io_thread, this, options, _addr, wait_); alloc_assert (connecter); launch_child (connecter); } @@ -578,25 +578,25 @@ void zmq::session_base_t::start_connecting (bool wait_) #if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS \ && !defined ZMQ_HAVE_VXWORKS - if (addr->protocol == "ipc") { + if (_addr->protocol == "ipc") { ipc_connecter_t *connecter = new (std::nothrow) - ipc_connecter_t (io_thread, this, options, addr, wait_); + ipc_connecter_t (io_thread, this, options, _addr, wait_); alloc_assert (connecter); launch_child (connecter); return; } #endif #if defined ZMQ_HAVE_TIPC - if (addr->protocol == "tipc") { + if (_addr->protocol == "tipc") { tipc_connecter_t *connecter = new (std::nothrow) - tipc_connecter_t (io_thread, this, options, addr, wait_); + tipc_connecter_t (io_thread, this, options, _addr, wait_); alloc_assert (connecter); launch_child (connecter); return; } #endif - if (addr->protocol == "udp") { + if (_addr->protocol == "udp") { zmq_assert (options.type == ZMQ_DISH || options.type == ZMQ_RADIO || options.type == ZMQ_DGRAM); @@ -617,7 +617,7 @@ void zmq::session_base_t::start_connecting (bool wait_) recv = true; } - int rc = engine->init (addr, send, recv); + int rc = engine->init (_addr, send, recv); errno_assert (rc == 0); send_attach (this, engine); @@ -628,12 +628,12 @@ void zmq::session_base_t::start_connecting (bool wait_) #ifdef ZMQ_HAVE_OPENPGM // Both PGM and EPGM transports are using the same infrastructure. - if (addr->protocol == "pgm" || addr->protocol == "epgm") { + if (_addr->protocol == "pgm" || _addr->protocol == "epgm") { zmq_assert (options.type == ZMQ_PUB || options.type == ZMQ_XPUB || options.type == ZMQ_SUB || options.type == ZMQ_XSUB); // For EPGM transport with UDP encapsulation of PGM is used. - bool const udp_encapsulation = addr->protocol == "epgm"; + bool const udp_encapsulation = _addr->protocol == "epgm"; // At this point we'll create message pipes to the session straight // away. There's no point in delaying it as no concept of 'connect' @@ -645,7 +645,7 @@ void zmq::session_base_t::start_connecting (bool wait_) alloc_assert (pgm_sender); int rc = - pgm_sender->init (udp_encapsulation, addr->address.c_str ()); + pgm_sender->init (udp_encapsulation, _addr->address.c_str ()); errno_assert (rc == 0); send_attach (this, pgm_sender); @@ -656,7 +656,7 @@ void zmq::session_base_t::start_connecting (bool wait_) alloc_assert (pgm_receiver); int rc = - pgm_receiver->init (udp_encapsulation, addr->address.c_str ()); + pgm_receiver->init (udp_encapsulation, _addr->address.c_str ()); errno_assert (rc == 0); send_attach (this, pgm_receiver); @@ -667,7 +667,7 @@ void zmq::session_base_t::start_connecting (bool wait_) #endif #ifdef ZMQ_HAVE_NORM - if (addr->protocol == "norm") { + if (_addr->protocol == "norm") { // At this point we'll create message pipes to the session straight // away. There's no point in delaying it as no concept of 'connect' // exists with NORM anyway. @@ -677,7 +677,7 @@ void zmq::session_base_t::start_connecting (bool wait_) new (std::nothrow) norm_engine_t (io_thread, options); alloc_assert (norm_sender); - int rc = norm_sender->init (addr->address.c_str (), true, false); + int rc = norm_sender->init (_addr->address.c_str (), true, false); errno_assert (rc == 0); send_attach (this, norm_sender); @@ -688,7 +688,7 @@ void zmq::session_base_t::start_connecting (bool wait_) new (std::nothrow) norm_engine_t (io_thread, options); alloc_assert (norm_receiver); - int rc = norm_receiver->init (addr->address.c_str (), false, true); + int rc = norm_receiver->init (_addr->address.c_str (), false, true); errno_assert (rc == 0); send_attach (this, norm_receiver); @@ -698,9 +698,9 @@ void zmq::session_base_t::start_connecting (bool wait_) #endif // ZMQ_HAVE_NORM #if defined ZMQ_HAVE_VMCI - if (addr->protocol == "vmci") { + if (_addr->protocol == "vmci") { vmci_connecter_t *connecter = new (std::nothrow) - vmci_connecter_t (io_thread, this, options, addr, wait_); + vmci_connecter_t (io_thread, this, options, _addr, wait_); alloc_assert (connecter); launch_child (connecter); return; diff --git a/src/session_base.cpp~RF4069b78.TMP b/src/session_base.cpp~RF4069b78.TMP new file mode 100644 index 00000000..ecf3aa2c --- /dev/null +++ b/src/session_base.cpp~RF4069b78.TMP @@ -0,0 +1,711 @@ +/* + Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file + + This file is part of libzmq, the ZeroMQ core engine in C++. + + libzmq is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License (LGPL) as published + by the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + As a special exception, the Contributors give you permission to link + this library with independent modules to produce an executable, + regardless of the license terms of these independent modules, and to + copy and distribute the resulting executable under terms of your choice, + provided that you also meet, for each linked independent module, the + terms and conditions of the license of that module. An independent + module is a module which is not derived from or based on this library. + If you modify this library, you must extend this exception to your + version of the library. + + libzmq is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this program. If not, see . +*/ + +#include "precompiled.hpp" +#include "macros.hpp" +#include "session_base.hpp" +#include "i_engine.hpp" +#include "err.hpp" +#include "pipe.hpp" +#include "likely.hpp" +#include "tcp_connecter.hpp" +#include "ipc_connecter.hpp" +#include "tipc_connecter.hpp" +#include "socks_connecter.hpp" +#include "vmci_connecter.hpp" +#include "pgm_sender.hpp" +#include "pgm_receiver.hpp" +#include "address.hpp" +#include "norm_engine.hpp" +#include "udp_engine.hpp" + +#include "ctx.hpp" +#include "req.hpp" +#include "radio.hpp" +#include "dish.hpp" + +zmq::session_base_t *zmq::session_base_t::create (class io_thread_t *io_thread_, + bool active_, + class socket_base_t *socket_, + const options_t &options_, + address_t *addr_) +{ + session_base_t *s = NULL; + switch (options_.type) { + case ZMQ_REQ: + s = new (std::nothrow) + req_session_t (io_thread_, active_, socket_, options_, addr_); + break; + case ZMQ_RADIO: + s = new (std::nothrow) + radio_session_t (io_thread_, active_, socket_, options_, addr_); + break; + case ZMQ_DISH: + s = new (std::nothrow) + dish_session_t (io_thread_, active_, socket_, options_, addr_); + break; + case ZMQ_DEALER: + case ZMQ_REP: + case ZMQ_ROUTER: + case ZMQ_PUB: + case ZMQ_XPUB: + case ZMQ_SUB: + case ZMQ_XSUB: + case ZMQ_PUSH: + case ZMQ_PULL: + case ZMQ_PAIR: + case ZMQ_STREAM: + case ZMQ_SERVER: + case ZMQ_CLIENT: + case ZMQ_GATHER: + case ZMQ_SCATTER: + case ZMQ_DGRAM: + s = new (std::nothrow) + session_base_t (io_thread_, active_, socket_, options_, addr_); + break; + default: + errno = EINVAL; + return NULL; + } + alloc_assert (s); + return s; +} + +zmq::session_base_t::session_base_t (class io_thread_t *io_thread_, + bool active_, + class socket_base_t *socket_, + const options_t &options_, + address_t *addr_) : + own_t (io_thread_, options_), + io_object_t (io_thread_), + active (active_), + pipe (NULL), + zap_pipe (NULL), + incomplete_in (false), + pending (false), + engine (NULL), + socket (socket_), + io_thread (io_thread_), + has_linger_timer (false), + addr (addr_) +{ +} + +const char *zmq::session_base_t::get_endpoint () const +{ + return engine->get_endpoint (); +} + +zmq::session_base_t::~session_base_t () +{ + zmq_assert (!pipe); + zmq_assert (!zap_pipe); + + // If there's still a pending linger timer, remove it. + if (has_linger_timer) { + cancel_timer (linger_timer_id); + has_linger_timer = false; + } + + // Close the engine. + if (engine) + engine->terminate (); + + LIBZMQ_DELETE (addr); +} + +void zmq::session_base_t::attach_pipe (pipe_t *pipe_) +{ + zmq_assert (!is_terminating ()); + zmq_assert (!pipe); + zmq_assert (pipe_); + pipe = pipe_; + pipe->set_event_sink (this); +} + +int zmq::session_base_t::pull_msg (msg_t *msg_) +{ + if (!pipe || !pipe->read (msg_)) { + errno = EAGAIN; + return -1; + } + + incomplete_in = (msg_->flags () & msg_t::more) != 0; + + return 0; +} + +int zmq::session_base_t::push_msg (msg_t *msg_) +{ + if (msg_->flags () & msg_t::command) + return 0; + if (pipe && pipe->write (msg_)) { + int rc = msg_->init (); + errno_assert (rc == 0); + return 0; + } + + errno = EAGAIN; + return -1; +} + +int zmq::session_base_t::read_zap_msg (msg_t *msg_) +{ + if (zap_pipe == NULL) { + errno = ENOTCONN; + return -1; + } + + if (!zap_pipe->read (msg_)) { + errno = EAGAIN; + return -1; + } + + return 0; +} + +int zmq::session_base_t::write_zap_msg (msg_t *msg_) +{ + if (zap_pipe == NULL || !zap_pipe->write (msg_)) { + errno = ENOTCONN; + return -1; + } + + if ((msg_->flags () & msg_t::more) == 0) + zap_pipe->flush (); + + const int rc = msg_->init (); + errno_assert (rc == 0); + return 0; +} + +void zmq::session_base_t::reset () +{ +} + +void zmq::session_base_t::flush () +{ + if (pipe) + pipe->flush (); +} + +void zmq::session_base_t::clean_pipes () +{ + zmq_assert (pipe != NULL); + + // Get rid of half-processed messages in the out pipe. Flush any + // unflushed messages upstream. + pipe->rollback (); + pipe->flush (); + + // Remove any half-read message from the in pipe. + while (incomplete_in) { + msg_t msg; + int rc = msg.init (); + errno_assert (rc == 0); + rc = pull_msg (&msg); + errno_assert (rc == 0); + rc = msg.close (); + errno_assert (rc == 0); + } +} + +void zmq::session_base_t::pipe_terminated (pipe_t *pipe_) +{ + // Drop the reference to the deallocated pipe if required. + zmq_assert (pipe_ == pipe || pipe_ == zap_pipe + || terminating_pipes.count (pipe_) == 1); + + if (pipe_ == pipe) { + // If this is our current pipe, remove it + pipe = NULL; + if (has_linger_timer) { + cancel_timer (linger_timer_id); + has_linger_timer = false; + } + } else if (pipe_ == zap_pipe) + zap_pipe = NULL; + else + // Remove the pipe from the detached pipes set + terminating_pipes.erase (pipe_); + + if (!is_terminating () && options.raw_socket) { + if (engine) { + engine->terminate (); + engine = NULL; + } + terminate (); + } + + // If we are waiting for pending messages to be sent, at this point + // we are sure that there will be no more messages and we can proceed + // with termination safely. + if (pending && !pipe && !zap_pipe && terminating_pipes.empty ()) { + pending = false; + own_t::process_term (0); + } +} + +void zmq::session_base_t::read_activated (pipe_t *pipe_) +{ + // Skip activating if we're detaching this pipe + if (unlikely (pipe_ != pipe && pipe_ != zap_pipe)) { + zmq_assert (terminating_pipes.count (pipe_) == 1); + return; + } + + if (unlikely (engine == NULL)) { + pipe->check_read (); + return; + } + + if (likely (pipe_ == pipe)) + engine->restart_output (); + else { + // i.e. pipe_ == zap_pipe + engine->zap_msg_available (); + } +} + +void zmq::session_base_t::write_activated (pipe_t *pipe_) +{ + // Skip activating if we're detaching this pipe + if (pipe != pipe_) { + zmq_assert (terminating_pipes.count (pipe_) == 1); + return; + } + + if (engine) + engine->restart_input (); +} + +void zmq::session_base_t::hiccuped (pipe_t *) +{ + // Hiccups are always sent from session to socket, not the other + // way round. + zmq_assert (false); +} + +zmq::socket_base_t *zmq::session_base_t::get_socket () +{ + return socket; +} + +void zmq::session_base_t::process_plug () +{ + if (active) + start_connecting (false); +} + +// This functions can return 0 on success or -1 and errno=ECONNREFUSED if ZAP +// is not setup (IE: inproc://zeromq.zap.01 does not exist in the same context) +// or it aborts on any other error. In other words, either ZAP is not +// configured or if it is configured it MUST be configured correctly and it +// MUST work, otherwise authentication cannot be guaranteed and it would be a +// security flaw. +int zmq::session_base_t::zap_connect () +{ + if (zap_pipe != NULL) + return 0; + + endpoint_t peer = find_endpoint ("inproc://zeromq.zap.01"); + if (peer.socket == NULL) { + errno = ECONNREFUSED; + return -1; + } + zmq_assert (peer.options.type == ZMQ_REP || peer.options.type == ZMQ_ROUTER + || peer.options.type == ZMQ_SERVER); + + // Create a bi-directional pipe that will connect + // session with zap socket. + object_t *parents[2] = {this, peer.socket}; + pipe_t *new_pipes[2] = {NULL, NULL}; + int hwms[2] = {0, 0}; + bool conflates[2] = {false, false}; + int rc = pipepair (parents, new_pipes, hwms, conflates); + errno_assert (rc == 0); + + // Attach local end of the pipe to this socket object. + zap_pipe = new_pipes[0]; + zap_pipe->set_nodelay (); + zap_pipe->set_event_sink (this); + + send_bind (peer.socket, new_pipes[1], false); + + // Send empty routing id if required by the peer. + if (peer.options.recv_routing_id) { + msg_t id; + rc = id.init (); + errno_assert (rc == 0); + id.set_flags (msg_t::routing_id); + bool ok = zap_pipe->write (&id); + zmq_assert (ok); + zap_pipe->flush (); + } + + return 0; +} + +bool zmq::session_base_t::zap_enabled () +{ + return (options.mechanism != ZMQ_NULL || !options.zap_domain.empty ()); +} + +void zmq::session_base_t::process_attach (i_engine *engine_) +{ + zmq_assert (engine_ != NULL); + + // Create the pipe if it does not exist yet. + if (!pipe && !is_terminating ()) { + object_t *parents[2] = {this, socket}; + pipe_t *pipes[2] = {NULL, NULL}; + + bool conflate = + options.conflate + && (options.type == ZMQ_DEALER || options.type == ZMQ_PULL + || options.type == ZMQ_PUSH || options.type == ZMQ_PUB + || options.type == ZMQ_SUB); + + int hwms[2] = {conflate ? -1 : options.rcvhwm, + conflate ? -1 : options.sndhwm}; + bool conflates[2] = {conflate, conflate}; + int rc = pipepair (parents, pipes, hwms, conflates); + errno_assert (rc == 0); + + // Plug the local end of the pipe. + pipes[0]->set_event_sink (this); + + // Remember the local end of the pipe. + zmq_assert (!pipe); + pipe = pipes[0]; + + // Ask socket to plug into the remote end of the pipe. + send_bind (socket, pipes[1]); + } + + // Plug in the engine. + zmq_assert (!engine); + engine = engine_; + engine->plug (io_thread, this); +} + +void zmq::session_base_t::engine_error ( + zmq::stream_engine_t::error_reason_t reason_) +{ + // Engine is dead. Let's forget about it. + engine = NULL; + + // Remove any half-done messages from the pipes. + if (pipe) + clean_pipes (); + + zmq_assert (reason_ == stream_engine_t::connection_error + || reason_ == stream_engine_t::timeout_error + || reason_ == stream_engine_t::protocol_error); + + switch (reason_) { + case stream_engine_t::timeout_error: + /* FALLTHROUGH */ + case stream_engine_t::connection_error: + if (active) { + reconnect (); + break; + } + /* FALLTHROUGH */ + case stream_engine_t::protocol_error: + if (pending) { + if (pipe) + pipe->terminate (false); + if (zap_pipe) + zap_pipe->terminate (false); + } else { + terminate (); + } + break; + } + + // Just in case there's only a delimiter in the pipe. + if (pipe) + pipe->check_read (); + + if (zap_pipe) + zap_pipe->check_read (); +} + +void zmq::session_base_t::process_term (int linger_) +{ + zmq_assert (!pending); + + // If the termination of the pipe happens before the term command is + // delivered there's nothing much to do. We can proceed with the + // standard termination immediately. + if (!pipe && !zap_pipe && terminating_pipes.empty ()) { + own_t::process_term (0); + return; + } + + pending = true; + + if (pipe != NULL) { + // If there's finite linger value, delay the termination. + // If linger is infinite (negative) we don't even have to set + // the timer. + if (linger_ > 0) { + zmq_assert (!has_linger_timer); + add_timer (linger_, linger_timer_id); + has_linger_timer = true; + } + + // Start pipe termination process. Delay the termination till all messages + // are processed in case the linger time is non-zero. + pipe->terminate (linger_ != 0); + + // TODO: Should this go into pipe_t::terminate ? + // In case there's no engine and there's only delimiter in the + // pipe it wouldn't be ever read. Thus we check for it explicitly. + if (!engine) + pipe->check_read (); + } + + if (zap_pipe != NULL) + zap_pipe->terminate (false); +} + +void zmq::session_base_t::timer_event (int id_) +{ + // Linger period expired. We can proceed with termination even though + // there are still pending messages to be sent. + zmq_assert (id_ == linger_timer_id); + has_linger_timer = false; + + // Ask pipe to terminate even though there may be pending messages in it. + zmq_assert (pipe); + pipe->terminate (false); +} + +void zmq::session_base_t::reconnect () +{ + // For delayed connect situations, terminate the pipe + // and reestablish later on + if (pipe && options.immediate == 1 && addr->protocol != "pgm" + && addr->protocol != "epgm" && addr->protocol != "norm" + && addr->protocol != "udp") { + pipe->hiccup (); + pipe->terminate (false); + terminating_pipes.insert (pipe); + pipe = NULL; + + if (has_linger_timer) { + cancel_timer (linger_timer_id); + has_linger_timer = false; + } + } + + reset (); + + // Reconnect. + if (options.reconnect_ivl != -1) + start_connecting (true); + else { + std::string *ep = new (std::string); + addr->to_string (*ep); + send_term_endpoint (socket, ep); + } + + // For subscriber sockets we hiccup the inbound pipe, which will cause + // the socket object to resend all the subscriptions. + if (pipe + && (options.type == ZMQ_SUB || options.type == ZMQ_XSUB + || options.type == ZMQ_DISH)) + pipe->hiccup (); +} + +void zmq::session_base_t::start_connecting (bool wait_) +{ + zmq_assert (active); + + // Choose I/O thread to run connecter in. Given that we are already + // running in an I/O thread, there must be at least one available. + io_thread_t *io_thread = choose_io_thread (options.affinity); + zmq_assert (io_thread); + + // Create the connecter object. + + if (addr->protocol == "tcp") { + if (!options.socks_proxy_address.empty ()) { + address_t *proxy_address = new (std::nothrow) + address_t ("tcp", options.socks_proxy_address, this->get_ctx ()); + alloc_assert (proxy_address); + socks_connecter_t *connecter = + new (std::nothrow) socks_connecter_t (io_thread, this, options, + addr, proxy_address, wait_); + alloc_assert (connecter); + launch_child (connecter); + } else { + tcp_connecter_t *connecter = new (std::nothrow) + tcp_connecter_t (io_thread, this, options, addr, wait_); + alloc_assert (connecter); + launch_child (connecter); + } + return; + } + +#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS \ + && !defined ZMQ_HAVE_VXWORKS + if (addr->protocol == "ipc") { + ipc_connecter_t *connecter = new (std::nothrow) + ipc_connecter_t (io_thread, this, options, addr, wait_); + alloc_assert (connecter); + launch_child (connecter); + return; + } +#endif +#if defined ZMQ_HAVE_TIPC + if (addr->protocol == "tipc") { + tipc_connecter_t *connecter = new (std::nothrow) + tipc_connecter_t (io_thread, this, options, addr, wait_); + alloc_assert (connecter); + launch_child (connecter); + return; + } +#endif + + if (addr->protocol == "udp") { + zmq_assert (options.type == ZMQ_DISH || options.type == ZMQ_RADIO + || options.type == ZMQ_DGRAM); + + udp_engine_t *engine = new (std::nothrow) udp_engine_t (options); + alloc_assert (engine); + + bool recv = false; + bool send = false; + + if (options.type == ZMQ_RADIO) { + send = true; + recv = false; + } else if (options.type == ZMQ_DISH) { + send = false; + recv = true; + } else if (options.type == ZMQ_DGRAM) { + send = true; + recv = true; + } + + int rc = engine->init (addr, send, recv); + errno_assert (rc == 0); + + send_attach (this, engine); + + return; + } + +#ifdef ZMQ_HAVE_OPENPGM + + // Both PGM and EPGM transports are using the same infrastructure. + if (addr->protocol == "pgm" || addr->protocol == "epgm") { + zmq_assert (options.type == ZMQ_PUB || options.type == ZMQ_XPUB + || options.type == ZMQ_SUB || options.type == ZMQ_XSUB); + + // For EPGM transport with UDP encapsulation of PGM is used. + bool const udp_encapsulation = addr->protocol == "epgm"; + + // At this point we'll create message pipes to the session straight + // away. There's no point in delaying it as no concept of 'connect' + // exists with PGM anyway. + if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) { + // PGM sender. + pgm_sender_t *pgm_sender = + new (std::nothrow) pgm_sender_t (io_thread, options); + alloc_assert (pgm_sender); + + int rc = + pgm_sender->init (udp_encapsulation, addr->address.c_str ()); + errno_assert (rc == 0); + + send_attach (this, pgm_sender); + } else { + // PGM receiver. + pgm_receiver_t *pgm_receiver = + new (std::nothrow) pgm_receiver_t (io_thread, options); + alloc_assert (pgm_receiver); + + int rc = + pgm_receiver->init (udp_encapsulation, addr->address.c_str ()); + errno_assert (rc == 0); + + send_attach (this, pgm_receiver); + } + + return; + } +#endif + +#ifdef ZMQ_HAVE_NORM + if (addr->protocol == "norm") { + // At this point we'll create message pipes to the session straight + // away. There's no point in delaying it as no concept of 'connect' + // exists with NORM anyway. + if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) { + // NORM sender. + norm_engine_t *norm_sender = + new (std::nothrow) norm_engine_t (io_thread, options); + alloc_assert (norm_sender); + + int rc = norm_sender->init (addr->address.c_str (), true, false); + errno_assert (rc == 0); + + send_attach (this, norm_sender); + } else { // ZMQ_SUB or ZMQ_XSUB + + // NORM receiver. + norm_engine_t *norm_receiver = + new (std::nothrow) norm_engine_t (io_thread, options); + alloc_assert (norm_receiver); + + int rc = norm_receiver->init (addr->address.c_str (), false, true); + errno_assert (rc == 0); + + send_attach (this, norm_receiver); + } + return; + } +#endif // ZMQ_HAVE_NORM + +#if defined ZMQ_HAVE_VMCI + if (addr->protocol == "vmci") { + vmci_connecter_t *connecter = new (std::nothrow) + vmci_connecter_t (io_thread, this, options, addr, wait_); + alloc_assert (connecter); + launch_child (connecter); + return; + } +#endif + + zmq_assert (false); +} diff --git a/src/session_base.hpp b/src/session_base.hpp index a487f70e..b503adc8 100644 --- a/src/session_base.hpp +++ b/src/session_base.hpp @@ -120,34 +120,34 @@ class session_base_t : public own_t, public io_object_t, public i_pipe_events // If true, this session (re)connects to the peer. Otherwise, it's // a transient session created by the listener. - const bool active; + const bool _active; // Pipe connecting the session to its socket. - zmq::pipe_t *pipe; + zmq::pipe_t *_pipe; // Pipe used to exchange messages with ZAP socket. - zmq::pipe_t *zap_pipe; + zmq::pipe_t *_zap_pipe; // This set is added to with pipes we are disconnecting, but haven't yet completed - std::set terminating_pipes; + std::set _terminating_pipes; // This flag is true if the remainder of the message being processed // is still in the in pipe. - bool incomplete_in; + bool _incomplete_in; // True if termination have been suspended to push the pending // messages to the network. - bool pending; + bool _pending; // The protocol I/O engine connected to the session. - zmq::i_engine *engine; + zmq::i_engine *_engine; // The socket the session belongs to. - zmq::socket_base_t *socket; + zmq::socket_base_t *_socket; // I/O thread the session is living in. It will be used to plug in // the engines into the same thread. - zmq::io_thread_t *io_thread; + zmq::io_thread_t *_io_thread; // ID of the linger timer enum @@ -156,10 +156,10 @@ class session_base_t : public own_t, public io_object_t, public i_pipe_events }; // True is linger timer is running. - bool has_linger_timer; + bool _has_linger_timer; // Protocol and address to use when connecting. - address_t *addr; + address_t *_addr; session_base_t (const session_base_t &); const session_base_t &operator= (const session_base_t &); diff --git a/src/signaler.cpp b/src/signaler.cpp index acc261d4..07ede6f0 100644 --- a/src/signaler.cpp +++ b/src/signaler.cpp @@ -121,9 +121,9 @@ static int close_wait_ms (int fd_, unsigned int max_ms_ = 2000) zmq::signaler_t::signaler_t () { // Create the socketpair for signaling. - if (make_fdpair (&r, &w) == 0) { - unblock_socket (w); - unblock_socket (r); + if (make_fdpair (&_r, &_w) == 0) { + unblock_socket (_w); + unblock_socket (_r); } #ifdef HAVE_FORK pid = getpid (); @@ -131,38 +131,38 @@ zmq::signaler_t::signaler_t () } // This might get run after some part of construction failed, leaving one or -// both of r and w retired_fd. +// both of _r and _w retired_fd. zmq::signaler_t::~signaler_t () { #if defined ZMQ_HAVE_EVENTFD - if (r == retired_fd) + if (_r == retired_fd) return; - int rc = close_wait_ms (r); + int rc = close_wait_ms (_r); errno_assert (rc == 0); #elif defined ZMQ_HAVE_WINDOWS - if (w != retired_fd) { + if (_w != retired_fd) { const struct linger so_linger = {1, 0}; - int rc = setsockopt (w, SOL_SOCKET, SO_LINGER, + int rc = setsockopt (_w, SOL_SOCKET, SO_LINGER, reinterpret_cast (&so_linger), sizeof so_linger); // Only check shutdown if WSASTARTUP was previously done if (rc == 0 || WSAGetLastError () != WSANOTINITIALISED) { wsa_assert (rc != SOCKET_ERROR); - rc = closesocket (w); + rc = closesocket (_w); wsa_assert (rc != SOCKET_ERROR); - if (r == retired_fd) + if (_r == retired_fd) return; - rc = closesocket (r); + rc = closesocket (_r); wsa_assert (rc != SOCKET_ERROR); } } #else - if (w != retired_fd) { - int rc = close_wait_ms (w); + if (_w != retired_fd) { + int rc = close_wait_ms (_w); errno_assert (rc == 0); } - if (r != retired_fd) { - int rc = close_wait_ms (r); + if (_r != retired_fd) { + int rc = close_wait_ms (_r); errno_assert (rc == 0); } #endif @@ -170,7 +170,7 @@ zmq::signaler_t::~signaler_t () zmq::fd_t zmq::signaler_t::get_fd () const { - return r; + return _r; } void zmq::signaler_t::send () @@ -183,13 +183,13 @@ void zmq::signaler_t::send () #endif #if defined ZMQ_HAVE_EVENTFD const uint64_t inc = 1; - ssize_t sz = write (w, &inc, sizeof (inc)); + ssize_t sz = write (_w, &inc, sizeof (inc)); errno_assert (sz == sizeof (inc)); #elif defined ZMQ_HAVE_WINDOWS unsigned char dummy = 0; while (true) { int nbytes = - ::send (w, reinterpret_cast (&dummy), sizeof (dummy), 0); + ::send (_w, reinterpret_cast (&dummy), sizeof (dummy), 0); wsa_assert (nbytes != SOCKET_ERROR); if (unlikely (nbytes == SOCKET_ERROR)) continue; @@ -199,7 +199,7 @@ void zmq::signaler_t::send () #elif defined ZMQ_HAVE_VXWORKS unsigned char dummy = 0; while (true) { - ssize_t nbytes = ::send (w, (char *) &dummy, sizeof (dummy), 0); + ssize_t nbytes = ::send (_w, (char *) &dummy, sizeof (dummy), 0); if (unlikely (nbytes == -1 && errno == EINTR)) continue; #if defined(HAVE_FORK) @@ -215,7 +215,7 @@ void zmq::signaler_t::send () #else unsigned char dummy = 0; while (true) { - ssize_t nbytes = ::send (w, &dummy, sizeof (dummy), 0); + ssize_t nbytes = ::send (_w, &dummy, sizeof (dummy), 0); if (unlikely (nbytes == -1 && errno == EINTR)) continue; #if defined(HAVE_FORK) @@ -245,7 +245,7 @@ int zmq::signaler_t::wait (int timeout_) #ifdef ZMQ_POLL_BASED_ON_POLL struct pollfd pfd; - pfd.fd = r; + pfd.fd = _r; pfd.events = POLLIN; int rc = poll (&pfd, 1, timeout_); if (unlikely (rc < 0)) { @@ -272,7 +272,7 @@ int zmq::signaler_t::wait (int timeout_) fd_set fds; FD_ZERO (&fds); - FD_SET (r, &fds); + FD_SET (_r, &fds); struct timeval timeout; if (timeout_ >= 0) { timeout.tv_sec = timeout_ / 1000; @@ -282,7 +282,7 @@ int zmq::signaler_t::wait (int timeout_) int rc = select (0, &fds, NULL, NULL, timeout_ >= 0 ? &timeout : NULL); wsa_assert (rc != SOCKET_ERROR); #else - int rc = select (r + 1, &fds, NULL, NULL, timeout_ >= 0 ? &timeout : NULL); + int rc = select (_r + 1, &fds, NULL, NULL, timeout_ >= 0 ? &timeout : NULL); if (unlikely (rc < 0)) { errno_assert (errno == EINTR); return -1; @@ -305,14 +305,14 @@ void zmq::signaler_t::recv () // Attempt to read a signal. #if defined ZMQ_HAVE_EVENTFD uint64_t dummy; - ssize_t sz = read (r, &dummy, sizeof (dummy)); + ssize_t sz = read (_r, &dummy, sizeof (dummy)); errno_assert (sz == sizeof (dummy)); // If we accidentally grabbed the next signal(s) along with the current // one, return it back to the eventfd object. if (unlikely (dummy > 1)) { const uint64_t inc = dummy - 1; - ssize_t sz2 = write (w, &inc, sizeof (inc)); + ssize_t sz2 = write (_w, &inc, sizeof (inc)); errno_assert (sz2 == sizeof (inc)); return; } @@ -322,13 +322,13 @@ void zmq::signaler_t::recv () unsigned char dummy; #if defined ZMQ_HAVE_WINDOWS int nbytes = - ::recv (r, reinterpret_cast (&dummy), sizeof (dummy), 0); + ::recv (_r, reinterpret_cast (&dummy), sizeof (dummy), 0); wsa_assert (nbytes != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS - ssize_t nbytes = ::recv (r, (char *) &dummy, sizeof (dummy), 0); + ssize_t nbytes = ::recv (_r, (char *) &dummy, sizeof (dummy), 0); errno_assert (nbytes >= 0); #else - ssize_t nbytes = ::recv (r, &dummy, sizeof (dummy), 0); + ssize_t nbytes = ::recv (_r, &dummy, sizeof (dummy), 0); errno_assert (nbytes >= 0); #endif zmq_assert (nbytes == sizeof (dummy)); @@ -341,7 +341,7 @@ int zmq::signaler_t::recv_failable () // Attempt to read a signal. #if defined ZMQ_HAVE_EVENTFD uint64_t dummy; - ssize_t sz = read (r, &dummy, sizeof (dummy)); + ssize_t sz = read (_r, &dummy, sizeof (dummy)); if (sz == -1) { errno_assert (errno == EAGAIN); return -1; @@ -352,7 +352,7 @@ int zmq::signaler_t::recv_failable () // one, return it back to the eventfd object. if (unlikely (dummy > 1)) { const uint64_t inc = dummy - 1; - ssize_t sz2 = write (w, &inc, sizeof (inc)); + ssize_t sz2 = write (_w, &inc, sizeof (inc)); errno_assert (sz2 == sizeof (inc)); return 0; } @@ -363,7 +363,7 @@ int zmq::signaler_t::recv_failable () unsigned char dummy; #if defined ZMQ_HAVE_WINDOWS int nbytes = - ::recv (r, reinterpret_cast (&dummy), sizeof (dummy), 0); + ::recv (_r, reinterpret_cast (&dummy), sizeof (dummy), 0); if (nbytes == SOCKET_ERROR) { const int last_error = WSAGetLastError (); if (last_error == WSAEWOULDBLOCK) { @@ -373,7 +373,7 @@ int zmq::signaler_t::recv_failable () wsa_assert (last_error == WSAEWOULDBLOCK); } #elif defined ZMQ_HAVE_VXWORKS - ssize_t nbytes = ::recv (r, (char *) &dummy, sizeof (dummy), 0); + ssize_t nbytes = ::recv (_r, (char *) &dummy, sizeof (dummy), 0); if (nbytes == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { errno = EAGAIN; @@ -383,7 +383,7 @@ int zmq::signaler_t::recv_failable () || errno == EINTR); } #else - ssize_t nbytes = ::recv (r, &dummy, sizeof (dummy), 0); + ssize_t nbytes = ::recv (_r, &dummy, sizeof (dummy), 0); if (nbytes == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) { errno = EAGAIN; @@ -401,15 +401,15 @@ int zmq::signaler_t::recv_failable () bool zmq::signaler_t::valid () const { - return w != retired_fd; + return _w != retired_fd; } #ifdef HAVE_FORK void zmq::signaler_t::forked () { // Close file descriptors created in the parent and create new pair - close (r); - close (w); - make_fdpair (&r, &w); + close (_r); + close (_w); + make_fdpair (&_r, &_w); } #endif diff --git a/src/signaler.hpp b/src/signaler.hpp index e8797c7f..51e1a936 100644 --- a/src/signaler.hpp +++ b/src/signaler.hpp @@ -69,8 +69,8 @@ class signaler_t // Underlying write & read file descriptor // Will be -1 if an error occurred during initialization, e.g. we // exceeded the number of available handles - fd_t w; - fd_t r; + fd_t _w; + fd_t _r; // Disable copying of signaler_t object. signaler_t (const signaler_t &); diff --git a/src/socket_base.cpp b/src/socket_base.cpp index a43d1fde..8e4a8d26 100644 --- a/src/socket_base.cpp +++ b/src/socket_base.cpp @@ -99,12 +99,12 @@ bool zmq::socket_base_t::check_tag () { - return tag == 0xbaddecaf; + return _tag == 0xbaddecaf; } bool zmq::socket_base_t::is_thread_safe () const { - return thread_safe; + return _thread_safe; } zmq::socket_base_t *zmq::socket_base_t::create (int type_, @@ -178,8 +178,8 @@ zmq::socket_base_t *zmq::socket_base_t::create (int type_, alloc_assert (s); - if (s->mailbox == NULL) { - s->destroyed = true; + if (s->_mailbox == NULL) { + s->_destroyed = true; LIBZMQ_DELETE (s); return NULL; } @@ -192,38 +192,38 @@ zmq::socket_base_t::socket_base_t (ctx_t *parent_, int sid_, bool thread_safe_) : own_t (parent_, tid_), - tag (0xbaddecaf), - ctx_terminated (false), - destroyed (false), - poller (NULL), - handle (static_cast (NULL)), - last_tsc (0), - ticks (0), - rcvmore (false), - monitor_socket (NULL), - monitor_events (0), - thread_safe (thread_safe_), - reaper_signaler (NULL), - sync (), - monitor_sync () + _tag (0xbaddecaf), + _ctx_terminated (false), + _destroyed (false), + _poller (NULL), + _handle (static_cast (NULL)), + _last_tsc (0), + _ticks (0), + _rcvmore (false), + _monitor_socket (NULL), + _monitor_events (0), + _thread_safe (thread_safe_), + _reaper_signaler (NULL), + _sync (), + _monitor_sync () { options.socket_id = sid_; options.ipv6 = (parent_->get (ZMQ_IPV6) != 0); options.linger.store (parent_->get (ZMQ_BLOCKY) ? -1 : 0); options.zero_copy = parent_->get (ZMQ_ZERO_COPY_RECV) != 0; - if (thread_safe) { - mailbox = new (std::nothrow) mailbox_safe_t (&sync); - zmq_assert (mailbox); + if (_thread_safe) { + _mailbox = new (std::nothrow) mailbox_safe_t (&_sync); + zmq_assert (_mailbox); } else { mailbox_t *m = new (std::nothrow) mailbox_t (); zmq_assert (m); if (m->get_fd () != retired_fd) - mailbox = m; + _mailbox = m; else { LIBZMQ_DELETE (m); - mailbox = NULL; + _mailbox = NULL; } } } @@ -241,21 +241,21 @@ int zmq::socket_base_t::get_peer_state (const void *routing_id_, zmq::socket_base_t::~socket_base_t () { - if (mailbox) - LIBZMQ_DELETE (mailbox); + if (_mailbox) + LIBZMQ_DELETE (_mailbox); - if (reaper_signaler) - LIBZMQ_DELETE (reaper_signaler); + if (_reaper_signaler) + LIBZMQ_DELETE (_reaper_signaler); - scoped_lock_t lock (monitor_sync); + scoped_lock_t lock (_monitor_sync); stop_monitor (); - zmq_assert (destroyed); + zmq_assert (_destroyed); } zmq::i_mailbox *zmq::socket_base_t::get_mailbox () { - return mailbox; + return _mailbox; } void zmq::socket_base_t::stop () @@ -345,7 +345,7 @@ void zmq::socket_base_t::attach_pipe (pipe_t *pipe_, bool subscribe_to_all_) { // First, register the pipe so that we can terminate it later on. pipe_->set_event_sink (this); - pipes.push_back (pipe_); + _pipes.push_back (pipe_); // Let the derived socket type know about new pipe. xattach_pipe (pipe_, subscribe_to_all_); @@ -362,14 +362,14 @@ int zmq::socket_base_t::setsockopt (int option_, const void *optval_, size_t optvallen_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); if (!options.is_valid (option_)) { errno = EINVAL; return -1; } - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -392,26 +392,27 @@ int zmq::socket_base_t::getsockopt (int option_, void *optval_, size_t *optvallen_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } if (option_ == ZMQ_RCVMORE) { - return do_getsockopt (optval_, optvallen_, rcvmore ? 1 : 0); + return do_getsockopt (optval_, optvallen_, _rcvmore ? 1 : 0); } if (option_ == ZMQ_FD) { - if (thread_safe) { + if (_thread_safe) { // thread safe socket doesn't provide file descriptor errno = EINVAL; return -1; } return do_getsockopt ( - optval_, optvallen_, (static_cast (mailbox))->get_fd ()); + optval_, optvallen_, + (static_cast (_mailbox))->get_fd ()); } if (option_ == ZMQ_EVENTS) { @@ -427,11 +428,11 @@ int zmq::socket_base_t::getsockopt (int option_, } if (option_ == ZMQ_LAST_ENDPOINT) { - return do_getsockopt (optval_, optvallen_, last_endpoint); + return do_getsockopt (optval_, optvallen_, _last_endpoint); } if (option_ == ZMQ_THREAD_SAFE) { - return do_getsockopt (optval_, optvallen_, thread_safe ? 1 : 0); + return do_getsockopt (optval_, optvallen_, _thread_safe ? 1 : 0); } return options.getsockopt (option_, optval_, optvallen_); @@ -439,7 +440,7 @@ int zmq::socket_base_t::getsockopt (int option_, int zmq::socket_base_t::join (const char *group_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); int rc = xjoin (group_); @@ -449,7 +450,7 @@ int zmq::socket_base_t::join (const char *group_) int zmq::socket_base_t::leave (const char *group_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); int rc = xleave (group_); @@ -459,25 +460,25 @@ int zmq::socket_base_t::leave (const char *group_) void zmq::socket_base_t::add_signaler (signaler_t *s_) { - zmq_assert (thread_safe); + zmq_assert (_thread_safe); - scoped_lock_t sync_lock (sync); - (static_cast (mailbox))->add_signaler (s_); + scoped_lock_t sync_lock (_sync); + (static_cast (_mailbox))->add_signaler (s_); } void zmq::socket_base_t::remove_signaler (signaler_t *s_) { - zmq_assert (thread_safe); + zmq_assert (_thread_safe); - scoped_lock_t sync_lock (sync); - (static_cast (mailbox))->remove_signaler (s_); + scoped_lock_t sync_lock (_sync); + (static_cast (_mailbox))->remove_signaler (s_); } int zmq::socket_base_t::bind (const char *addr_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -500,7 +501,7 @@ int zmq::socket_base_t::bind (const char *addr_) rc = register_endpoint (addr_, endpoint); if (rc == 0) { connect_pending (addr_, this); - last_endpoint.assign (addr_); + _last_endpoint.assign (addr_); options.connected = true; } return rc; @@ -564,7 +565,7 @@ int zmq::socket_base_t::bind (const char *addr_) session->attach_pipe (new_pipes[1]); // Save last endpoint URI - paddr->to_string (last_endpoint); + paddr->to_string (_last_endpoint); add_endpoint (addr_, (own_t *) session, newpipe); @@ -591,9 +592,9 @@ int zmq::socket_base_t::bind (const char *addr_) } // Save last endpoint URI - listener->get_address (last_endpoint); + listener->get_address (_last_endpoint); - add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL); + add_endpoint (_last_endpoint.c_str (), (own_t *) listener, NULL); options.connected = true; return 0; } @@ -612,9 +613,9 @@ int zmq::socket_base_t::bind (const char *addr_) } // Save last endpoint URI - listener->get_address (last_endpoint); + listener->get_address (_last_endpoint); - add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL); + add_endpoint (_last_endpoint.c_str (), (own_t *) listener, NULL); options.connected = true; return 0; } @@ -632,7 +633,7 @@ int zmq::socket_base_t::bind (const char *addr_) } // Save last endpoint URI - listener->get_address (last_endpoint); + listener->get_address (_last_endpoint); add_endpoint (addr_, (own_t *) listener, NULL); options.connected = true; @@ -651,9 +652,9 @@ int zmq::socket_base_t::bind (const char *addr_) return -1; } - listener->get_address (last_endpoint); + listener->get_address (_last_endpoint); - add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL); + add_endpoint (_last_endpoint.c_str (), (own_t *) listener, NULL); options.connected = true; return 0; } @@ -665,9 +666,9 @@ int zmq::socket_base_t::bind (const char *addr_) int zmq::socket_base_t::connect (const char *addr_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -780,10 +781,10 @@ int zmq::socket_base_t::connect (const char *addr_) attach_pipe (new_pipes[0]); // Save last endpoint URI - last_endpoint.assign (addr_); + _last_endpoint.assign (addr_); // remember inproc connections for disconnect - inprocs.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), new_pipes[0]); + _inprocs.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), new_pipes[0]); options.connected = true; return 0; @@ -792,8 +793,8 @@ int zmq::socket_base_t::connect (const char *addr_) (options.type == ZMQ_DEALER || options.type == ZMQ_SUB || options.type == ZMQ_PUB || options.type == ZMQ_REQ); if (unlikely (is_single_connect)) { - const endpoints_t::iterator it = endpoints.find (addr_); - if (it != endpoints.end ()) { + const endpoints_t::iterator it = _endpoints.find (addr_); + if (it != _endpoints.end ()) { // There is no valid use for multiple connects for SUB-PUB nor // DEALER-ROUTER nor REQ-REP. Multiple connects produces // nonsensical results. @@ -970,7 +971,7 @@ int zmq::socket_base_t::connect (const char *addr_) } // Save last endpoint URI - paddr->to_string (last_endpoint); + paddr->to_string (_last_endpoint); add_endpoint (addr_, (own_t *) session, newpipe); return 0; @@ -982,16 +983,16 @@ void zmq::socket_base_t::add_endpoint (const char *addr_, { // Activate the session. Make it a child of this socket. launch_child (endpoint_); - endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), - endpoint_pipe_t (endpoint_, pipe_)); + _endpoints.ZMQ_MAP_INSERT_OR_EMPLACE (std::string (addr_), + endpoint_pipe_t (endpoint_, pipe_)); } int zmq::socket_base_t::term_endpoint (const char *addr_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); // Check whether the library haven't been shut down yet. - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -1024,7 +1025,7 @@ int zmq::socket_base_t::term_endpoint (const char *addr_) return 0; } std::pair range = - inprocs.equal_range (addr_str); + _inprocs.equal_range (addr_str); if (range.first == range.second) { errno = ENOENT; return -1; @@ -1032,7 +1033,7 @@ int zmq::socket_base_t::term_endpoint (const char *addr_) for (inprocs_t::iterator it = range.first; it != range.second; ++it) it->second->terminate (true); - inprocs.erase (range.first, range.second); + _inprocs.erase (range.first, range.second); return 0; } @@ -1044,14 +1045,14 @@ int zmq::socket_base_t::term_endpoint (const char *addr_) // resolve before giving up. Given at this stage we don't know whether a // socket is connected or bound, try with both. if (protocol == "tcp") { - if (endpoints.find (resolved_addr) == endpoints.end ()) { + if (_endpoints.find (resolved_addr) == _endpoints.end ()) { tcp_address_t *tcp_addr = new (std::nothrow) tcp_address_t (); alloc_assert (tcp_addr); rc = tcp_addr->resolve (address.c_str (), false, options.ipv6); if (rc == 0) { tcp_addr->to_string (resolved_addr); - if (endpoints.find (resolved_addr) == endpoints.end ()) { + if (_endpoints.find (resolved_addr) == _endpoints.end ()) { rc = tcp_addr->resolve (address.c_str (), true, options.ipv6); if (rc == 0) { @@ -1065,7 +1066,7 @@ int zmq::socket_base_t::term_endpoint (const char *addr_) // Find the endpoints range (if any) corresponding to the addr_ string. const std::pair range = - endpoints.equal_range (resolved_addr); + _endpoints.equal_range (resolved_addr); if (range.first == range.second) { errno = ENOENT; return -1; @@ -1077,16 +1078,16 @@ int zmq::socket_base_t::term_endpoint (const char *addr_) it->second.second->terminate (false); term_child (it->second.first); } - endpoints.erase (range.first, range.second); + _endpoints.erase (range.first, range.second); return 0; } int zmq::socket_base_t::send (msg_t *msg_, int flags_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); // Check whether the library haven't been shut down yet. - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -1130,7 +1131,7 @@ int zmq::socket_base_t::send (msg_t *msg_, int flags_) // Compute the time when the timeout should occur. // If the timeout is infinite, don't care. int timeout = options.sndtimeo; - uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout); + uint64_t end = timeout < 0 ? 0 : (_clock.now_ms () + timeout); // Oops, we couldn't send the message. Wait for the next // command, process it and try to send the message again. @@ -1146,7 +1147,7 @@ int zmq::socket_base_t::send (msg_t *msg_, int flags_) return -1; } if (timeout > 0) { - timeout = static_cast (end - clock.now_ms ()); + timeout = static_cast (end - _clock.now_ms ()); if (timeout <= 0) { errno = EAGAIN; return -1; @@ -1159,10 +1160,10 @@ int zmq::socket_base_t::send (msg_t *msg_, int flags_) int zmq::socket_base_t::recv (msg_t *msg_, int flags_) { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); // Check whether the library haven't been shut down yet. - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -1181,11 +1182,11 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_) // Note that 'recv' uses different command throttling algorithm (the one // described above) from the one used by 'send'. This is because counting // ticks is more efficient than doing RDTSC all the time. - if (++ticks == inbound_poll_rate) { + if (++_ticks == inbound_poll_rate) { if (unlikely (process_commands (0, false) != 0)) { return -1; } - ticks = 0; + _ticks = 0; } // Get the message. @@ -1208,7 +1209,7 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_) if (unlikely (process_commands (0, false) != 0)) { return -1; } - ticks = 0; + _ticks = 0; rc = xrecv (msg_); if (rc < 0) { @@ -1222,18 +1223,18 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_) // Compute the time when the timeout should occur. // If the timeout is infinite, don't care. int timeout = options.rcvtimeo; - uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout); + uint64_t end = timeout < 0 ? 0 : (_clock.now_ms () + timeout); // In blocking scenario, commands are processed over and over again until // we are able to fetch a message. - bool block = (ticks != 0); + bool block = (_ticks != 0); while (true) { if (unlikely (process_commands (block ? timeout : 0, false) != 0)) { return -1; } rc = xrecv (msg_); if (rc == 0) { - ticks = 0; + _ticks = 0; break; } if (unlikely (errno != EAGAIN)) { @@ -1241,7 +1242,7 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_) } block = true; if (timeout > 0) { - timeout = static_cast (end - clock.now_ms ()); + timeout = static_cast (end - _clock.now_ms ()); if (timeout <= 0) { errno = EAGAIN; return -1; @@ -1255,14 +1256,14 @@ int zmq::socket_base_t::recv (msg_t *msg_, int flags_) int zmq::socket_base_t::close () { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); // Remove all existing signalers for thread safe sockets - if (thread_safe) - (static_cast (mailbox))->clear_signalers (); + if (_thread_safe) + (static_cast (_mailbox))->clear_signalers (); // Mark the socket as dead - tag = 0xdeadbeef; + _tag = 0xdeadbeef; // Transfer the ownership of the socket from this application thread @@ -1286,29 +1287,29 @@ bool zmq::socket_base_t::has_out () void zmq::socket_base_t::start_reaping (poller_t *poller_) { // Plug the socket to the reaper thread. - poller = poller_; + _poller = poller_; fd_t fd; - if (!thread_safe) - fd = (static_cast (mailbox))->get_fd (); + if (!_thread_safe) + fd = (static_cast (_mailbox))->get_fd (); else { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); - reaper_signaler = new (std::nothrow) signaler_t (); - zmq_assert (reaper_signaler); + _reaper_signaler = new (std::nothrow) signaler_t (); + zmq_assert (_reaper_signaler); // Add signaler to the safe mailbox - fd = reaper_signaler->get_fd (); - (static_cast (mailbox)) - ->add_signaler (reaper_signaler); + fd = _reaper_signaler->get_fd (); + (static_cast (_mailbox)) + ->add_signaler (_reaper_signaler); // Send a signal to make sure reaper handle existing commands - reaper_signaler->send (); + _reaper_signaler->send (); } - handle = poller->add_fd (fd, this); - poller->set_pollin (handle); + _handle = _poller->add_fd (fd, this); + _poller->set_pollin (_handle); // Initialise the termination and check whether it can be deallocated // immediately. @@ -1322,7 +1323,7 @@ int zmq::socket_base_t::process_commands (int timeout_, bool throttle_) command_t cmd; if (timeout_ != 0) { // If we are asked to wait, simply ask mailbox to wait. - rc = mailbox->recv (&cmd, timeout_); + rc = _mailbox->recv (&cmd, timeout_); } else { // If we are asked not to wait, check whether we haven't processed // commands recently, so that we can throttle the new commands. @@ -1340,19 +1341,19 @@ int zmq::socket_base_t::process_commands (int timeout_, bool throttle_) // Check whether TSC haven't jumped backwards (in case of migration // between CPU cores) and whether certain time have elapsed since // last command processing. If it didn't do nothing. - if (tsc >= last_tsc && tsc - last_tsc <= max_command_delay) + if (tsc >= _last_tsc && tsc - _last_tsc <= max_command_delay) return 0; - last_tsc = tsc; + _last_tsc = tsc; } // Check whether there are any commands pending for this thread. - rc = mailbox->recv (&cmd, 0); + rc = _mailbox->recv (&cmd, 0); } // Process all available commands. while (rc == 0) { cmd.destination->process_command (cmd); - rc = mailbox->recv (&cmd, 0); + rc = _mailbox->recv (&cmd, 0); } if (errno == EINTR) @@ -1360,7 +1361,7 @@ int zmq::socket_base_t::process_commands (int timeout_, bool throttle_) zmq_assert (errno == EAGAIN); - if (ctx_terminated) { + if (_ctx_terminated) { errno = ETERM; return -1; } @@ -1374,10 +1375,10 @@ void zmq::socket_base_t::process_stop () // We'll remember the fact so that any blocking call is interrupted and any // further attempt to use the socket will return ETERM. The user is still // responsible for calling zmq_close on the socket though! - scoped_lock_t lock (monitor_sync); + scoped_lock_t lock (_monitor_sync); stop_monitor (); - ctx_terminated = true; + _ctx_terminated = true; } void zmq::socket_base_t::process_bind (pipe_t *pipe_) @@ -1393,9 +1394,9 @@ void zmq::socket_base_t::process_term (int linger_) unregister_endpoints (this); // Ask all attached pipes to terminate. - for (pipes_t::size_type i = 0; i != pipes.size (); ++i) - pipes[i]->terminate (false); - register_term_acks (static_cast (pipes.size ())); + for (pipes_t::size_type i = 0; i != _pipes.size (); ++i) + _pipes[i]->terminate (false); + register_term_acks (static_cast (_pipes.size ())); // Continue the termination process immediately. own_t::process_term (linger_); @@ -1410,16 +1411,16 @@ void zmq::socket_base_t::process_term_endpoint (std::string *endpoint_) void zmq::socket_base_t::update_pipe_options (int option_) { if (option_ == ZMQ_SNDHWM || option_ == ZMQ_RCVHWM) { - for (pipes_t::size_type i = 0; i != pipes.size (); ++i) { - pipes[i]->set_hwms (options.rcvhwm, options.sndhwm); - pipes[i]->send_hwms_to_peer (options.sndhwm, options.rcvhwm); + for (pipes_t::size_type i = 0; i != _pipes.size (); ++i) { + _pipes[i]->set_hwms (options.rcvhwm, options.sndhwm); + _pipes[i]->send_hwms_to_peer (options.sndhwm, options.rcvhwm); } } } void zmq::socket_base_t::process_destroy () { - destroyed = true; + _destroyed = true; } int zmq::socket_base_t::xsetsockopt (int, const void *, size_t) @@ -1492,11 +1493,11 @@ void zmq::socket_base_t::in_event () // that may be available at the moment. Ultimately, the socket will // be destroyed. { - scoped_optional_lock_t sync_lock (thread_safe ? &sync : NULL); + scoped_optional_lock_t sync_lock (_thread_safe ? &_sync : NULL); // If the socket is thread safe we need to unsignal the reaper signaler - if (thread_safe) - reaper_signaler->recv (); + if (_thread_safe) + _reaper_signaler->recv (); process_commands (0, false); } @@ -1516,9 +1517,9 @@ void zmq::socket_base_t::timer_event (int) void zmq::socket_base_t::check_destroy () { // If the object was already marked as destroyed, finish the deallocation. - if (destroyed) { + if (_destroyed) { // Remove the socket from the reaper's poller. - poller->rm_fd (handle); + _poller->rm_fd (_handle); // Remove the socket from the context. destroy_socket (this); @@ -1556,15 +1557,16 @@ void zmq::socket_base_t::pipe_terminated (pipe_t *pipe_) xpipe_terminated (pipe_); // Remove pipe from inproc pipes - for (inprocs_t::iterator it = inprocs.begin (); it != inprocs.end (); ++it) + for (inprocs_t::iterator it = _inprocs.begin (); it != _inprocs.end (); + ++it) if (it->second == pipe_) { - inprocs.erase (it); + _inprocs.erase (it); break; } // Remove the pipe from the list of attached pipes and confirm its // termination if we are already shutting down. - pipes.erase (pipe_); + _pipes.erase (pipe_); if (is_terminating ()) unregister_term_ack (); } @@ -1576,14 +1578,14 @@ void zmq::socket_base_t::extract_flags (msg_t *msg_) zmq_assert (options.recv_routing_id); // Remove MORE flag. - rcvmore = (msg_->flags () & msg_t::more) != 0; + _rcvmore = (msg_->flags () & msg_t::more) != 0; } int zmq::socket_base_t::monitor (const char *addr_, int events_) { - scoped_lock_t lock (monitor_sync); + scoped_lock_t lock (_monitor_sync); - if (unlikely (ctx_terminated)) { + if (unlikely (_ctx_terminated)) { errno = ETERM; return -1; } @@ -1605,24 +1607,24 @@ int zmq::socket_base_t::monitor (const char *addr_, int events_) return -1; } // already monitoring. Stop previous monitor before starting new one. - if (monitor_socket != NULL) { + if (_monitor_socket != NULL) { stop_monitor (true); } // Register events to monitor - monitor_events = events_; - monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR); - if (monitor_socket == NULL) + _monitor_events = events_; + _monitor_socket = zmq_socket (get_ctx (), ZMQ_PAIR); + if (_monitor_socket == NULL) return -1; // Never block context termination on pending event messages int linger = 0; int rc = - zmq_setsockopt (monitor_socket, ZMQ_LINGER, &linger, sizeof (linger)); + zmq_setsockopt (_monitor_socket, ZMQ_LINGER, &linger, sizeof (linger)); if (rc == -1) stop_monitor (false); // Spawn the monitor socket endpoint - rc = zmq_bind (monitor_socket, addr_); + rc = zmq_bind (_monitor_socket, addr_); if (rc == -1) stop_monitor (false); return rc; @@ -1713,8 +1715,8 @@ void zmq::socket_base_t::event (const std::string &addr_, intptr_t value_, int type_) { - scoped_lock_t lock (monitor_sync); - if (monitor_events & type_) { + scoped_lock_t lock (_monitor_sync); + if (_monitor_events & type_) { monitor_event (type_, value_, addr_); } } @@ -1727,7 +1729,7 @@ void zmq::socket_base_t::monitor_event (int event_, // this is a private method which is only called from // contexts where the mutex has been locked before - if (monitor_socket) { + if (_monitor_socket) { // Send event in first frame zmq_msg_t msg; zmq_msg_init_size (&msg, 6); @@ -1737,12 +1739,12 @@ void zmq::socket_base_t::monitor_event (int event_, uint32_t value = static_cast (value_); memcpy (data + 0, &event, sizeof (event)); memcpy (data + 2, &value, sizeof (value)); - zmq_sendmsg (monitor_socket, &msg, ZMQ_SNDMORE); + zmq_sendmsg (_monitor_socket, &msg, ZMQ_SNDMORE); // Send address in second frame zmq_msg_init_size (&msg, addr_.size ()); memcpy (zmq_msg_data (&msg), addr_.c_str (), addr_.size ()); - zmq_sendmsg (monitor_socket, &msg, 0); + zmq_sendmsg (_monitor_socket, &msg, 0); } } @@ -1751,12 +1753,12 @@ void zmq::socket_base_t::stop_monitor (bool send_monitor_stopped_event_) // this is a private method which is only called from // contexts where the mutex has been locked before - if (monitor_socket) { - if ((monitor_events & ZMQ_EVENT_MONITOR_STOPPED) + if (_monitor_socket) { + if ((_monitor_events & ZMQ_EVENT_MONITOR_STOPPED) && send_monitor_stopped_event_) monitor_event (ZMQ_EVENT_MONITOR_STOPPED, 0, ""); - zmq_close (monitor_socket); - monitor_socket = NULL; - monitor_events = 0; + zmq_close (_monitor_socket); + _monitor_socket = NULL; + _monitor_events = 0; } } diff --git a/src/socket_base.hpp b/src/socket_base.hpp index 9ab44c9a..dbc6f85d 100644 --- a/src/socket_base.hpp +++ b/src/socket_base.hpp @@ -203,11 +203,11 @@ class socket_base_t : public own_t, // Map of open endpoints. typedef std::pair endpoint_pipe_t; typedef std::multimap endpoints_t; - endpoints_t endpoints; + endpoints_t _endpoints; // Map of open inproc endpoints. typedef std::multimap inprocs_t; - inprocs_t inprocs; + inprocs_t _inprocs; // To be called after processing commands or invoking any command // handlers explicitly. If required, it will deallocate the socket. @@ -218,15 +218,15 @@ class socket_base_t : public own_t, void extract_flags (msg_t *msg_); // Used to check whether the object is a socket. - uint32_t tag; + uint32_t _tag; // If true, associated context was already terminated. - bool ctx_terminated; + bool _ctx_terminated; // If true, object should have been already destroyed. However, // destruction is delayed while we unwind the stack to the point // where it doesn't intersect the object being destroyed. - bool destroyed; + bool _destroyed; // Parse URI string. int @@ -254,48 +254,48 @@ class socket_base_t : public own_t, void update_pipe_options (int option_); // Socket's mailbox object. - i_mailbox *mailbox; + i_mailbox *_mailbox; // List of attached pipes. typedef array_t pipes_t; - pipes_t pipes; + pipes_t _pipes; // Reaper's poller and handle of this socket within it. - poller_t *poller; - poller_t::handle_t handle; + poller_t *_poller; + poller_t::handle_t _handle; // Timestamp of when commands were processed the last time. - uint64_t last_tsc; + uint64_t _last_tsc; // Number of messages received since last command processing. - int ticks; + int _ticks; // True if the last message received had MORE flag set. - bool rcvmore; + bool _rcvmore; // Improves efficiency of time measurement. - clock_t clock; + clock_t _clock; // Monitor socket; - void *monitor_socket; + void *_monitor_socket; // Bitmask of events being monitored - int monitor_events; + int _monitor_events; // Last socket endpoint resolved URI - std::string last_endpoint; + std::string _last_endpoint; // Indicate if the socket is thread safe - const bool thread_safe; + const bool _thread_safe; // Signaler to be used in the reaping stage - signaler_t *reaper_signaler; + signaler_t *_reaper_signaler; // Mutex for synchronize access to the socket in thread safe mode - mutex_t sync; + mutex_t _sync; // Mutex to synchronize access to the monitor Pair socket - mutex_t monitor_sync; + mutex_t _monitor_sync; socket_base_t (const socket_base_t &); const socket_base_t &operator= (const socket_base_t &); diff --git a/src/socket_poller.cpp b/src/socket_poller.cpp index d30ba611..9eb41c7c 100644 --- a/src/socket_poller.cpp +++ b/src/socket_poller.cpp @@ -40,17 +40,17 @@ static bool is_thread_safe (zmq::socket_base_t &socket_) } zmq::socket_poller_t::socket_poller_t () : - tag (0xCAFEBABE), - signaler (NULL), - need_rebuild (true), - use_signaler (false), - poll_size (0) + _tag (0xCAFEBABE), + _signaler (NULL), + _need_rebuild (true), + _use_signaler (false), + _pollset_size (0) #if defined ZMQ_POLL_BASED_ON_POLL , - pollfds (NULL) + _pollfds (NULL) #elif defined ZMQ_POLL_BASED_ON_SELECT , - maxfd (0) + _max_fd (0) #endif { #if defined ZMQ_POLL_BASED_ON_SELECT @@ -58,13 +58,13 @@ zmq::socket_poller_t::socket_poller_t () : // On Windows fd_set contains array of SOCKETs, each 4 bytes. // For large fd_sets memset() could be expensive and it is unnecessary. // It is enough to set fd_count to 0, exactly what FD_ZERO() macro does. - FD_ZERO (&pollset_in); - FD_ZERO (&pollset_out); - FD_ZERO (&pollset_err); + FD_ZERO (&_pollset_in); + FD_ZERO (&_pollset_out); + FD_ZERO (&_pollset_err); #else - memset (&pollset_in, 0, sizeof (pollset_in)); - memset (&pollset_out, 0, sizeof (pollset_out)); - memset (&pollset_err, 0, sizeof (pollset_err)); + memset (&_pollset_in, 0, sizeof (_pollset_in)); + memset (&_pollset_out, 0, sizeof (_pollset_out)); + memset (&_pollset_err, 0, sizeof (_pollset_err)); #endif #endif } @@ -72,39 +72,39 @@ zmq::socket_poller_t::socket_poller_t () : zmq::socket_poller_t::~socket_poller_t () { // Mark the socket_poller as dead - tag = 0xdeadbeef; + _tag = 0xdeadbeef; - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { // TODO shouldn't this zmq_assert (it->socket->check_tag ()) instead? if (it->socket && it->socket->check_tag () && is_thread_safe (*it->socket)) { - it->socket->remove_signaler (signaler); + it->socket->remove_signaler (_signaler); } } - if (signaler != NULL) { - delete signaler; - signaler = NULL; + if (_signaler != NULL) { + delete _signaler; + _signaler = NULL; } #if defined ZMQ_POLL_BASED_ON_POLL - if (pollfds) { - free (pollfds); - pollfds = NULL; + if (_pollfds) { + free (_pollfds); + _pollfds = NULL; } #endif } bool zmq::socket_poller_t::check_tag () { - return tag == 0xCAFEBABE; + return _tag == 0xCAFEBABE; } int zmq::socket_poller_t::add (socket_base_t *socket_, void *user_data_, short events_) { - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { if (it->socket == socket_) { errno = EINVAL; return -1; @@ -112,21 +112,21 @@ int zmq::socket_poller_t::add (socket_base_t *socket_, } if (is_thread_safe (*socket_)) { - if (signaler == NULL) { - signaler = new (std::nothrow) signaler_t (); - if (!signaler) { + if (_signaler == NULL) { + _signaler = new (std::nothrow) signaler_t (); + if (!_signaler) { errno = ENOMEM; return -1; } - if (!signaler->valid ()) { - delete signaler; - signaler = NULL; + if (!_signaler->valid ()) { + delete _signaler; + _signaler = NULL; errno = EMFILE; return -1; } } - socket_->add_signaler (signaler); + socket_->add_signaler (_signaler); } item_t item = { @@ -140,20 +140,20 @@ int zmq::socket_poller_t::add (socket_base_t *socket_, #endif }; try { - items.push_back (item); + _items.push_back (item); } catch (const std::bad_alloc &) { errno = ENOMEM; return -1; } - need_rebuild = true; + _need_rebuild = true; return 0; } int zmq::socket_poller_t::add_fd (fd_t fd_, void *user_data_, short events_) { - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { if (!it->socket && it->fd == fd_) { errno = EINVAL; return -1; @@ -171,13 +171,13 @@ int zmq::socket_poller_t::add_fd (fd_t fd_, void *user_data_, short events_) #endif }; try { - items.push_back (item); + _items.push_back (item); } catch (const std::bad_alloc &) { errno = ENOMEM; return -1; } - need_rebuild = true; + _need_rebuild = true; return 0; } @@ -186,18 +186,18 @@ int zmq::socket_poller_t::modify (socket_base_t *socket_, short events_) { items_t::iterator it; - for (it = items.begin (); it != items.end (); ++it) { + for (it = _items.begin (); it != _items.end (); ++it) { if (it->socket == socket_) break; } - if (it == items.end ()) { + if (it == _items.end ()) { errno = EINVAL; return -1; } it->events = events_; - need_rebuild = true; + _need_rebuild = true; return 0; } @@ -207,18 +207,18 @@ int zmq::socket_poller_t::modify_fd (fd_t fd_, short events_) { items_t::iterator it; - for (it = items.begin (); it != items.end (); ++it) { + for (it = _items.begin (); it != _items.end (); ++it) { if (!it->socket && it->fd == fd_) break; } - if (it == items.end ()) { + if (it == _items.end ()) { errno = EINVAL; return -1; } it->events = events_; - need_rebuild = true; + _need_rebuild = true; return 0; } @@ -228,21 +228,21 @@ int zmq::socket_poller_t::remove (socket_base_t *socket_) { items_t::iterator it; - for (it = items.begin (); it != items.end (); ++it) { + for (it = _items.begin (); it != _items.end (); ++it) { if (it->socket == socket_) break; } - if (it == items.end ()) { + if (it == _items.end ()) { errno = EINVAL; return -1; } - items.erase (it); - need_rebuild = true; + _items.erase (it); + _need_rebuild = true; if (is_thread_safe (*socket_)) { - socket_->remove_signaler (signaler); + socket_->remove_signaler (_signaler); } return 0; @@ -252,18 +252,18 @@ int zmq::socket_poller_t::remove_fd (fd_t fd_) { items_t::iterator it; - for (it = items.begin (); it != items.end (); ++it) { + for (it = _items.begin (); it != _items.end (); ++it) { if (!it->socket && it->fd == fd_) break; } - if (it == items.end ()) { + if (it == _items.end ()) { errno = EINVAL; return -1; } - items.erase (it); - need_rebuild = true; + _items.erase (it); + _need_rebuild = true; return 0; } @@ -272,56 +272,56 @@ void zmq::socket_poller_t::rebuild () { #if defined ZMQ_POLL_BASED_ON_POLL - if (pollfds) { - free (pollfds); - pollfds = NULL; + if (_pollfds) { + free (_pollfds); + _pollfds = NULL; } - use_signaler = false; + _use_signaler = false; - poll_size = 0; + _pollset_size = 0; - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { if (it->events) { if (it->socket && is_thread_safe (*it->socket)) { - if (!use_signaler) { - use_signaler = true; - poll_size++; + if (!_use_signaler) { + _use_signaler = true; + _pollset_size++; } } else - poll_size++; + _pollset_size++; } } - if (poll_size == 0) + if (_pollset_size == 0) return; - pollfds = (pollfd *) malloc (poll_size * sizeof (pollfd)); - alloc_assert (pollfds); + _pollfds = (pollfd *) malloc (_pollset_size * sizeof (pollfd)); + alloc_assert (_pollfds); int item_nbr = 0; - if (use_signaler) { + if (_use_signaler) { item_nbr = 1; - pollfds[0].fd = signaler->get_fd (); - pollfds[0].events = POLLIN; + _pollfds[0].fd = _signaler->get_fd (); + _pollfds[0].events = POLLIN; } - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { if (it->events) { if (it->socket) { if (!is_thread_safe (*it->socket)) { size_t fd_size = sizeof (zmq::fd_t); int rc = it->socket->getsockopt ( - ZMQ_FD, &pollfds[item_nbr].fd, &fd_size); + ZMQ_FD, &_pollfds[item_nbr].fd, &fd_size); zmq_assert (rc == 0); - pollfds[item_nbr].events = POLLIN; + _pollfds[item_nbr].events = POLLIN; item_nbr++; } } else { - pollfds[item_nbr].fd = it->fd; - pollfds[item_nbr].events = + _pollfds[item_nbr].fd = it->fd; + _pollfds[item_nbr].events = (it->events & ZMQ_POLLIN ? POLLIN : 0) | (it->events & ZMQ_POLLOUT ? POLLOUT : 0) | (it->events & ZMQ_POLLPRI ? POLLPRI : 0); @@ -333,31 +333,31 @@ void zmq::socket_poller_t::rebuild () #elif defined ZMQ_POLL_BASED_ON_SELECT - FD_ZERO (&pollset_in); - FD_ZERO (&pollset_out); - FD_ZERO (&pollset_err); + FD_ZERO (&_pollset_in); + FD_ZERO (&_pollset_out); + FD_ZERO (&_pollset_err); // Ensure we do not attempt to select () on more than FD_SETSIZE // file descriptors. - zmq_assert (items.size () <= FD_SETSIZE); + zmq_assert (_items.size () <= FD_SETSIZE); - poll_size = 0; + _pollset_size = 0; - use_signaler = false; + _use_signaler = false; - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { if (it->socket && is_thread_safe (*it->socket) && it->events) { - use_signaler = true; - FD_SET (signaler->get_fd (), &pollset_in); - poll_size = 1; + _use_signaler = true; + FD_SET (_signaler->get_fd (), &_pollset_in); + _pollset_size = 1; break; } } - maxfd = 0; + _max_fd = 0; // Build the fd_sets for passing to select (). - for (items_t::iterator it = items.begin (); it != items.end (); ++it) { + for (items_t::iterator it = _items.begin (); it != _items.end (); ++it) { if (it->events) { // If the poll item is a 0MQ socket we are interested in input on the // notification file descriptor retrieved by the ZMQ_FD socket option. @@ -369,33 +369,33 @@ void zmq::socket_poller_t::rebuild () it->socket->getsockopt (ZMQ_FD, ¬ify_fd, &fd_size); zmq_assert (rc == 0); - FD_SET (notify_fd, &pollset_in); - if (maxfd < notify_fd) - maxfd = notify_fd; + FD_SET (notify_fd, &_pollset_in); + if (_max_fd < notify_fd) + _max_fd = notify_fd; - poll_size++; + _pollset_size++; } } // Else, the poll item is a raw file descriptor. Convert the poll item // events to the appropriate fd_sets. else { if (it->events & ZMQ_POLLIN) - FD_SET (it->fd, &pollset_in); + FD_SET (it->fd, &_pollset_in); if (it->events & ZMQ_POLLOUT) - FD_SET (it->fd, &pollset_out); + FD_SET (it->fd, &_pollset_out); if (it->events & ZMQ_POLLERR) - FD_SET (it->fd, &pollset_err); - if (maxfd < it->fd) - maxfd = it->fd; + FD_SET (it->fd, &_pollset_err); + if (_max_fd < it->fd) + _max_fd = it->fd; - poll_size++; + _pollset_size++; } } } #endif - need_rebuild = false; + _need_rebuild = false; } void zmq::socket_poller_t::zero_trail_events ( @@ -421,8 +421,8 @@ int zmq::socket_poller_t::check_events (zmq::socket_poller_t::event_t *events_, #endif { int found = 0; - for (items_t::iterator it = items.begin (); - it != items.end () && found < n_events_; ++it) { + for (items_t::iterator it = _items.begin (); + it != _items.end () && found < n_events_; ++it) { // The poll item is a 0MQ socket. Retrieve pending events // using the ZMQ_EVENTS socket option. if (it->socket) { @@ -445,7 +445,7 @@ int zmq::socket_poller_t::check_events (zmq::socket_poller_t::event_t *events_, else { #if defined ZMQ_POLL_BASED_ON_POLL - short revents = pollfds[it->pollfd_index].revents; + short revents = _pollfds[it->pollfd_index].revents; short events = 0; if (revents & POLLIN) @@ -524,15 +524,15 @@ int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_, int n_events_, long timeout_) { - if (items.empty () && timeout_ < 0) { + if (_items.empty () && timeout_ < 0) { errno = EFAULT; return -1; } - if (need_rebuild) + if (_need_rebuild) rebuild (); - if (unlikely (poll_size == 0)) { + if (unlikely (_pollset_size == 0)) { // We'll report an error (timed out) as if the list was non-empty and // no event occurred within the specified timeout. Otherwise the caller // needs to check the return value AND the event to avoid using the @@ -582,7 +582,7 @@ int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_, // Wait for events. while (true) { - int rc = poll (pollfds, poll_size, timeout); + int rc = poll (_pollfds, _pollset_size, timeout); if (rc == -1 && errno == EINTR) { return -1; } @@ -591,8 +591,8 @@ int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_, } // Receive the signal from pollfd - if (use_signaler && pollfds[0].revents & POLLIN) - signaler->recv (); + if (_use_signaler && _pollfds[0].revents & POLLIN) + _signaler->recv (); // Check for the events. int found = check_events (events_, n_events_); @@ -642,18 +642,18 @@ int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_, // SOCKETS are continuous from the beginning of fd_array in fd_set. // We just need to copy fd_count elements of fd_array. // We gain huge memcpy() improvement if number of used SOCKETs is much lower than FD_SETSIZE. - memcpy (&inset, &pollset_in, - reinterpret_cast (pollset_in.fd_array - + pollset_in.fd_count) - - reinterpret_cast (&pollset_in)); - memcpy (&outset, &pollset_out, - reinterpret_cast (pollset_out.fd_array - + pollset_out.fd_count) - - reinterpret_cast (&pollset_out)); - memcpy (&errset, &pollset_err, - reinterpret_cast (pollset_err.fd_array - + pollset_err.fd_count) - - reinterpret_cast (&pollset_err)); + memcpy (&inset, &_pollset_in, + reinterpret_cast (_pollset_in.fd_array + + _pollset_in.fd_count) + - reinterpret_cast (&_pollset_in)); + memcpy (&outset, &_pollset_out, + reinterpret_cast (_pollset_out.fd_array + + _pollset_out.fd_count) + - reinterpret_cast (&_pollset_out)); + memcpy (&errset, &_pollset_err, + reinterpret_cast (_pollset_err.fd_array + + _pollset_err.fd_count) + - reinterpret_cast (&_pollset_err)); int rc = select (0, &inset, &outset, &errset, ptimeout); if (unlikely (rc == SOCKET_ERROR)) { errno = zmq::wsa_error_to_errno (WSAGetLastError ()); @@ -661,10 +661,10 @@ int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_, return -1; } #else - memcpy (&inset, &pollset_in, sizeof (fd_set)); - memcpy (&outset, &pollset_out, sizeof (fd_set)); - memcpy (&errset, &pollset_err, sizeof (fd_set)); - int rc = select (maxfd + 1, &inset, &outset, &errset, ptimeout); + memcpy (&inset, &_pollset_in, sizeof (fd_set)); + memcpy (&outset, &_pollset_out, sizeof (fd_set)); + memcpy (&errset, &_pollset_err, sizeof (fd_set)); + int rc = select (_max_fd + 1, &inset, &outset, &errset, ptimeout); if (unlikely (rc == -1)) { errno_assert (errno == EINTR || errno == EBADF); return -1; @@ -673,8 +673,8 @@ int zmq::socket_poller_t::wait (zmq::socket_poller_t::event_t *events_, break; } - if (use_signaler && FD_ISSET (signaler->get_fd (), &inset)) - signaler->recv (); + if (_use_signaler && FD_ISSET (_signaler->get_fd (), &inset)) + _signaler->recv (); // Check for the events. int found = check_events (events_, n_events_, inset, outset, errset); diff --git a/src/socket_poller.hpp b/src/socket_poller.hpp index ba40911f..e989b090 100644 --- a/src/socket_poller.hpp +++ b/src/socket_poller.hpp @@ -77,7 +77,7 @@ class socket_poller_t int wait (event_t *event_, int n_events_, long timeout_); - inline int size () { return static_cast (items.size ()); }; + inline int size () { return static_cast (_items.size ()); }; // Return false if object is not a socket. bool check_tag (); @@ -103,10 +103,10 @@ class socket_poller_t void rebuild (); // Used to check whether the object is a socket_poller. - uint32_t tag; + uint32_t _tag; // Signaler used for thread safe sockets polling - signaler_t *signaler; + signaler_t *_signaler; typedef struct item_t { @@ -121,24 +121,24 @@ class socket_poller_t // List of sockets typedef std::vector items_t; - items_t items; + items_t _items; // Does the pollset needs rebuilding? - bool need_rebuild; + bool _need_rebuild; // Should the signaler be used for the thread safe polling? - bool use_signaler; + bool _use_signaler; // Size of the pollset - int poll_size; + int _pollset_size; #if defined ZMQ_POLL_BASED_ON_POLL - pollfd *pollfds; + pollfd *_pollfds; #elif defined ZMQ_POLL_BASED_ON_SELECT - fd_set pollset_in; - fd_set pollset_out; - fd_set pollset_err; - zmq::fd_t maxfd; + fd_set _pollset_in; + fd_set _pollset_out; + fd_set _pollset_err; + zmq::fd_t _max_fd; #endif socket_poller_t (const socket_poller_t &); diff --git a/src/socks.cpp b/src/socks.cpp index 0ec2dc84..93ac63bd 100644 --- a/src/socks.cpp +++ b/src/socks.cpp @@ -54,58 +54,58 @@ zmq::socks_greeting_t::socks_greeting_t (uint8_t *methods_, } zmq::socks_greeting_encoder_t::socks_greeting_encoder_t () : - bytes_encoded (0), - bytes_written (0) + _bytes_encoded (0), + _bytes_written (0) { } void zmq::socks_greeting_encoder_t::encode (const socks_greeting_t &greeting_) { - uint8_t *ptr = buf; + uint8_t *ptr = _buf; *ptr++ = 0x05; *ptr++ = static_cast (greeting_.num_methods); for (uint8_t i = 0; i < greeting_.num_methods; i++) *ptr++ = greeting_.methods[i]; - bytes_encoded = 2 + greeting_.num_methods; - bytes_written = 0; + _bytes_encoded = 2 + greeting_.num_methods; + _bytes_written = 0; } int zmq::socks_greeting_encoder_t::output (fd_t fd_) { const int rc = - tcp_write (fd_, buf + bytes_written, bytes_encoded - bytes_written); + tcp_write (fd_, _buf + _bytes_written, _bytes_encoded - _bytes_written); if (rc > 0) - bytes_written += static_cast (rc); + _bytes_written += static_cast (rc); return rc; } bool zmq::socks_greeting_encoder_t::has_pending_data () const { - return bytes_written < bytes_encoded; + return _bytes_written < _bytes_encoded; } void zmq::socks_greeting_encoder_t::reset () { - bytes_encoded = bytes_written = 0; + _bytes_encoded = _bytes_written = 0; } zmq::socks_choice_t::socks_choice_t (unsigned char method_) : method (method_) { } -zmq::socks_choice_decoder_t::socks_choice_decoder_t () : bytes_read (0) +zmq::socks_choice_decoder_t::socks_choice_decoder_t () : _bytes_read (0) { } int zmq::socks_choice_decoder_t::input (fd_t fd_) { - zmq_assert (bytes_read < 2); - const int rc = tcp_read (fd_, buf + bytes_read, 2 - bytes_read); + zmq_assert (_bytes_read < 2); + const int rc = tcp_read (fd_, _buf + _bytes_read, 2 - _bytes_read); if (rc > 0) { - bytes_read += static_cast (rc); - if (buf[0] != 0x05) + _bytes_read += static_cast (rc); + if (_buf[0] != 0x05) return -1; } return rc; @@ -113,18 +113,18 @@ int zmq::socks_choice_decoder_t::input (fd_t fd_) bool zmq::socks_choice_decoder_t::message_ready () const { - return bytes_read == 2; + return _bytes_read == 2; } zmq::socks_choice_t zmq::socks_choice_decoder_t::decode () { zmq_assert (message_ready ()); - return socks_choice_t (buf[1]); + return socks_choice_t (_buf[1]); } void zmq::socks_choice_decoder_t::reset () { - bytes_read = 0; + _bytes_read = 0; } @@ -139,8 +139,8 @@ zmq::socks_request_t::socks_request_t (uint8_t command_, } zmq::socks_request_encoder_t::socks_request_encoder_t () : - bytes_encoded (0), - bytes_written (0) + _bytes_encoded (0), + _bytes_written (0) { } @@ -148,7 +148,7 @@ void zmq::socks_request_encoder_t::encode (const socks_request_t &req_) { zmq_assert (req_.hostname.size () <= UINT8_MAX); - unsigned char *ptr = buf; + unsigned char *ptr = _buf; *ptr++ = 0x05; *ptr++ = req_.command; *ptr++ = 0x00; @@ -190,27 +190,27 @@ void zmq::socks_request_encoder_t::encode (const socks_request_t &req_) *ptr++ = req_.port / 256; *ptr++ = req_.port % 256; - bytes_encoded = ptr - buf; - bytes_written = 0; + _bytes_encoded = ptr - _buf; + _bytes_written = 0; } int zmq::socks_request_encoder_t::output (fd_t fd_) { const int rc = - tcp_write (fd_, buf + bytes_written, bytes_encoded - bytes_written); + tcp_write (fd_, _buf + _bytes_written, _bytes_encoded - _bytes_written); if (rc > 0) - bytes_written += static_cast (rc); + _bytes_written += static_cast (rc); return rc; } bool zmq::socks_request_encoder_t::has_pending_data () const { - return bytes_written < bytes_encoded; + return _bytes_written < _bytes_encoded; } void zmq::socks_request_encoder_t::reset () { - bytes_encoded = bytes_written = 0; + _bytes_encoded = _bytes_written = 0; } zmq::socks_response_t::socks_response_t (uint8_t response_code_, @@ -222,7 +222,7 @@ zmq::socks_response_t::socks_response_t (uint8_t response_code_, { } -zmq::socks_response_decoder_t::socks_response_decoder_t () : bytes_read (0) +zmq::socks_response_decoder_t::socks_response_decoder_t () : _bytes_read (0) { } @@ -230,31 +230,31 @@ int zmq::socks_response_decoder_t::input (fd_t fd_) { size_t n = 0; - if (bytes_read < 5) - n = 5 - bytes_read; + if (_bytes_read < 5) + n = 5 - _bytes_read; else { - const uint8_t atyp = buf[3]; + const uint8_t atyp = _buf[3]; zmq_assert (atyp == 0x01 || atyp == 0x03 || atyp == 0x04); if (atyp == 0x01) n = 3 + 2; else if (atyp == 0x03) - n = buf[4] + 2; + n = _buf[4] + 2; else if (atyp == 0x04) n = 15 + 2; } - const int rc = tcp_read (fd_, buf + bytes_read, n); + const int rc = tcp_read (fd_, _buf + _bytes_read, n); if (rc > 0) { - bytes_read += static_cast (rc); - if (buf[0] != 0x05) + _bytes_read += static_cast (rc); + if (_buf[0] != 0x05) return -1; - if (bytes_read >= 2) - if (buf[1] > 0x08) + if (_bytes_read >= 2) + if (_buf[1] > 0x08) return -1; - if (bytes_read >= 3) - if (buf[2] != 0x00) + if (_bytes_read >= 3) + if (_buf[2] != 0x00) return -1; - if (bytes_read >= 4) { - const uint8_t atyp = buf[3]; + if (_bytes_read >= 4) { + const uint8_t atyp = _buf[3]; if (atyp != 0x01 && atyp != 0x03 && atyp != 0x04) return -1; } @@ -264,26 +264,26 @@ int zmq::socks_response_decoder_t::input (fd_t fd_) bool zmq::socks_response_decoder_t::message_ready () const { - if (bytes_read < 4) + if (_bytes_read < 4) return false; - const uint8_t atyp = buf[3]; + const uint8_t atyp = _buf[3]; zmq_assert (atyp == 0x01 || atyp == 0x03 || atyp == 0x04); if (atyp == 0x01) - return bytes_read == 10; + return _bytes_read == 10; if (atyp == 0x03) - return bytes_read > 4 && bytes_read == 4 + 1 + buf[4] + 2u; + return _bytes_read > 4 && _bytes_read == 4 + 1 + _buf[4] + 2u; else - return bytes_read == 22; + return _bytes_read == 22; } zmq::socks_response_t zmq::socks_response_decoder_t::decode () { zmq_assert (message_ready ()); - return socks_response_t (buf[1], "", 0); + return socks_response_t (_buf[1], "", 0); } void zmq::socks_response_decoder_t::reset () { - bytes_read = 0; + _bytes_read = 0; } diff --git a/src/socks.hpp b/src/socks.hpp index 98c2fe41..fa9fb608 100644 --- a/src/socks.hpp +++ b/src/socks.hpp @@ -55,9 +55,9 @@ class socks_greeting_encoder_t void reset (); private: - size_t bytes_encoded; - size_t bytes_written; - uint8_t buf[2 + UINT8_MAX]; + size_t _bytes_encoded; + size_t _bytes_written; + uint8_t _buf[2 + UINT8_MAX]; }; struct socks_choice_t @@ -77,8 +77,8 @@ class socks_choice_decoder_t void reset (); private: - unsigned char buf[2]; - size_t bytes_read; + unsigned char _buf[2]; + size_t _bytes_read; }; struct socks_request_t @@ -100,9 +100,9 @@ class socks_request_encoder_t void reset (); private: - size_t bytes_encoded; - size_t bytes_written; - uint8_t buf[4 + UINT8_MAX + 1 + 2]; + size_t _bytes_encoded; + size_t _bytes_written; + uint8_t _buf[4 + UINT8_MAX + 1 + 2]; }; struct socks_response_t @@ -125,8 +125,8 @@ class socks_response_decoder_t void reset (); private: - int8_t buf[4 + UINT8_MAX + 1 + 2]; - size_t bytes_read; + int8_t _buf[4 + UINT8_MAX + 1 + 2]; + size_t _bytes_read; }; } diff --git a/src/socks_connecter.cpp b/src/socks_connecter.cpp index 9c5ce110..91bcc423 100644 --- a/src/socks_connecter.cpp +++ b/src/socks_connecter.cpp @@ -60,32 +60,32 @@ zmq::socks_connecter_t::socks_connecter_t (class io_thread_t *io_thread_, bool delayed_start_) : own_t (io_thread_, options_), io_object_t (io_thread_), - addr (addr_), - proxy_addr (proxy_addr_), - status (unplugged), - s (retired_fd), - handle (static_cast (NULL)), - handle_valid (false), - delayed_start (delayed_start_), - timer_started (false), - session (session_), - current_reconnect_ivl (options.reconnect_ivl) + _addr (addr_), + _proxy_addr (proxy_addr_), + _status (unplugged), + _s (retired_fd), + _handle (static_cast (NULL)), + _handle_valid (false), + _delayed_start (delayed_start_), + _timer_started (false), + _session (session_), + _current_reconnect_ivl (options.reconnect_ivl) { - zmq_assert (addr); - zmq_assert (addr->protocol == "tcp"); - proxy_addr->to_string (endpoint); - socket = session->get_socket (); + zmq_assert (_addr); + zmq_assert (_addr->protocol == "tcp"); + _proxy_addr->to_string (_endpoint); + _socket = _session->get_socket (); } zmq::socks_connecter_t::~socks_connecter_t () { - zmq_assert (s == retired_fd); - LIBZMQ_DELETE (proxy_addr); + zmq_assert (_s == retired_fd); + LIBZMQ_DELETE (_proxy_addr); } void zmq::socks_connecter_t::process_plug () { - if (delayed_start) + if (_delayed_start) start_timer (); else initiate_connect (); @@ -93,7 +93,7 @@ void zmq::socks_connecter_t::process_plug () void zmq::socks_connecter_t::process_term (int linger_) { - switch (status) { + switch (_status) { case unplugged: break; case waiting_for_reconnect_time: @@ -104,8 +104,8 @@ void zmq::socks_connecter_t::process_term (int linger_) case waiting_for_choice: case sending_request: case waiting_for_response: - rm_fd (handle); - if (s != retired_fd) + rm_fd (_handle); + if (_s != retired_fd) close (); break; } @@ -115,54 +115,54 @@ void zmq::socks_connecter_t::process_term (int linger_) void zmq::socks_connecter_t::in_event () { - zmq_assert (status != unplugged && status != waiting_for_reconnect_time); + zmq_assert (_status != unplugged && _status != waiting_for_reconnect_time); - if (status == waiting_for_choice) { - int rc = choice_decoder.input (s); + if (_status == waiting_for_choice) { + int rc = _choice_decoder.input (_s); if (rc == 0 || rc == -1) error (); - else if (choice_decoder.message_ready ()) { - const socks_choice_t choice = choice_decoder.decode (); + else if (_choice_decoder.message_ready ()) { + const socks_choice_t choice = _choice_decoder.decode (); rc = process_server_response (choice); if (rc == -1) error (); else { std::string hostname = ""; uint16_t port = 0; - if (parse_address (addr->address, hostname, port) == -1) + if (parse_address (_addr->address, hostname, port) == -1) error (); else { - request_encoder.encode ( + _request_encoder.encode ( socks_request_t (1, hostname, port)); - reset_pollin (handle); - set_pollout (handle); - status = sending_request; + reset_pollin (_handle); + set_pollout (_handle); + _status = sending_request; } } } - } else if (status == waiting_for_response) { - int rc = response_decoder.input (s); + } else if (_status == waiting_for_response) { + int rc = _response_decoder.input (_s); if (rc == 0 || rc == -1) error (); - else if (response_decoder.message_ready ()) { - const socks_response_t response = response_decoder.decode (); + else if (_response_decoder.message_ready ()) { + const socks_response_t response = _response_decoder.decode (); rc = process_server_response (response); if (rc == -1) error (); else { // Create the engine object for this connection. stream_engine_t *engine = - new (std::nothrow) stream_engine_t (s, options, endpoint); + new (std::nothrow) stream_engine_t (_s, options, _endpoint); alloc_assert (engine); // Attach the engine to the corresponding session object. - send_attach (session, engine); + send_attach (_session, engine); - socket->event_connected (endpoint, s); + _socket->event_connected (_endpoint, _s); - rm_fd (handle); - s = -1; - status = unplugged; + rm_fd (_handle); + _s = -1; + _status = unplugged; // Shut the connecter down. terminate (); @@ -174,36 +174,37 @@ void zmq::socks_connecter_t::in_event () void zmq::socks_connecter_t::out_event () { - zmq_assert (status == waiting_for_proxy_connection - || status == sending_greeting || status == sending_request); + zmq_assert (_status == waiting_for_proxy_connection + || _status == sending_greeting || _status == sending_request); - if (status == waiting_for_proxy_connection) { + if (_status == waiting_for_proxy_connection) { const int rc = static_cast (check_proxy_connection ()); if (rc == -1) error (); else { - greeting_encoder.encode (socks_greeting_t (socks_no_auth_required)); - status = sending_greeting; + _greeting_encoder.encode ( + socks_greeting_t (socks_no_auth_required)); + _status = sending_greeting; } - } else if (status == sending_greeting) { - zmq_assert (greeting_encoder.has_pending_data ()); - const int rc = greeting_encoder.output (s); + } else if (_status == sending_greeting) { + zmq_assert (_greeting_encoder.has_pending_data ()); + const int rc = _greeting_encoder.output (_s); if (rc == -1 || rc == 0) error (); - else if (!greeting_encoder.has_pending_data ()) { - reset_pollout (handle); - set_pollin (handle); - status = waiting_for_choice; + else if (!_greeting_encoder.has_pending_data ()) { + reset_pollout (_handle); + set_pollin (_handle); + _status = waiting_for_choice; } } else { - zmq_assert (request_encoder.has_pending_data ()); - const int rc = request_encoder.output (s); + zmq_assert (_request_encoder.has_pending_data ()); + const int rc = _request_encoder.output (_s); if (rc == -1 || rc == 0) error (); - else if (!request_encoder.has_pending_data ()) { - reset_pollout (handle); - set_pollin (handle); - status = waiting_for_response; + else if (!_request_encoder.has_pending_data ()) { + reset_pollout (_handle); + set_pollin (_handle); + _status = waiting_for_response; } } } @@ -215,20 +216,20 @@ void zmq::socks_connecter_t::initiate_connect () // Connect may succeed in synchronous manner. if (rc == 0) { - handle = add_fd (s); - set_pollout (handle); - status = sending_greeting; + _handle = add_fd (_s); + set_pollout (_handle); + _status = sending_greeting; } // Connection establishment may be delayed. Poll for its completion. else if (errno == EINPROGRESS) { - handle = add_fd (s); - set_pollout (handle); - status = waiting_for_proxy_connection; - socket->event_connect_delayed (endpoint, zmq_errno ()); + _handle = add_fd (_s); + set_pollout (_handle); + _status = waiting_for_proxy_connection; + _socket->event_connect_delayed (_endpoint, zmq_errno ()); } // Handle any other error condition by eventual reconnect. else { - if (s != retired_fd) + if (_s != retired_fd) close (); start_timer (); } @@ -249,19 +250,19 @@ int zmq::socks_connecter_t::process_server_response ( void zmq::socks_connecter_t::timer_event (int id_) { - zmq_assert (status == waiting_for_reconnect_time); + zmq_assert (_status == waiting_for_reconnect_time); zmq_assert (id_ == reconnect_timer_id); initiate_connect (); } void zmq::socks_connecter_t::error () { - rm_fd (handle); + rm_fd (_handle); close (); - greeting_encoder.reset (); - choice_decoder.reset (); - request_encoder.reset (); - response_decoder.reset (); + _greeting_encoder.reset (); + _choice_decoder.reset (); + _request_encoder.reset (); + _response_decoder.reset (); start_timer (); } @@ -269,82 +270,82 @@ void zmq::socks_connecter_t::start_timer () { const int interval = get_new_reconnect_ivl (); add_timer (interval, reconnect_timer_id); - status = waiting_for_reconnect_time; - socket->event_connect_retried (endpoint, interval); + _status = waiting_for_reconnect_time; + _socket->event_connect_retried (_endpoint, interval); } int zmq::socks_connecter_t::get_new_reconnect_ivl () { // The new interval is the current interval + random value. const int interval = - current_reconnect_ivl + generate_random () % options.reconnect_ivl; + _current_reconnect_ivl + generate_random () % options.reconnect_ivl; // Only change the current reconnect interval if the maximum reconnect // interval was set and if it's larger than the reconnect interval. if (options.reconnect_ivl_max > 0 && options.reconnect_ivl_max > options.reconnect_ivl) // Calculate the next interval - current_reconnect_ivl = - std::min (current_reconnect_ivl * 2, options.reconnect_ivl_max); + _current_reconnect_ivl = + std::min (_current_reconnect_ivl * 2, options.reconnect_ivl_max); return interval; } int zmq::socks_connecter_t::connect_to_proxy () { - zmq_assert (s == retired_fd); + zmq_assert (_s == retired_fd); // Resolve the address - LIBZMQ_DELETE (proxy_addr->resolved.tcp_addr); - proxy_addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); - alloc_assert (proxy_addr->resolved.tcp_addr); + LIBZMQ_DELETE (_proxy_addr->resolved.tcp_addr); + _proxy_addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); + alloc_assert (_proxy_addr->resolved.tcp_addr); - int rc = proxy_addr->resolved.tcp_addr->resolve ( - proxy_addr->address.c_str (), false, options.ipv6); + int rc = _proxy_addr->resolved.tcp_addr->resolve ( + _proxy_addr->address.c_str (), false, options.ipv6); if (rc != 0) { - LIBZMQ_DELETE (proxy_addr->resolved.tcp_addr); + LIBZMQ_DELETE (_proxy_addr->resolved.tcp_addr); return -1; } - zmq_assert (proxy_addr->resolved.tcp_addr != NULL); - const tcp_address_t *tcp_addr = proxy_addr->resolved.tcp_addr; + zmq_assert (_proxy_addr->resolved.tcp_addr != NULL); + const tcp_address_t *tcp_addr = _proxy_addr->resolved.tcp_addr; // Create the socket. - s = open_socket (tcp_addr->family (), SOCK_STREAM, IPPROTO_TCP); - if (s == retired_fd) + _s = open_socket (tcp_addr->family (), SOCK_STREAM, IPPROTO_TCP); + if (_s == retired_fd) return -1; // On some systems, IPv4 mapping in IPv6 sockets is disabled by default. // Switch it on in such cases. if (tcp_addr->family () == AF_INET6) - enable_ipv4_mapping (s); + enable_ipv4_mapping (_s); // Set the IP Type-Of-Service priority for this socket if (options.tos != 0) - set_ip_type_of_service (s, options.tos); + set_ip_type_of_service (_s, options.tos); // Bind the socket to a device if applicable if (!options.bound_device.empty ()) - bind_to_device (s, options.bound_device); + bind_to_device (_s, options.bound_device); // Set the socket to non-blocking mode so that we get async connect(). - unblock_socket (s); + unblock_socket (_s); // Set the socket buffer limits for the underlying socket. if (options.sndbuf >= 0) - set_tcp_send_buffer (s, options.sndbuf); + set_tcp_send_buffer (_s, options.sndbuf); if (options.rcvbuf >= 0) - set_tcp_receive_buffer (s, options.rcvbuf); + set_tcp_receive_buffer (_s, options.rcvbuf); // Set the IP Type-Of-Service for the underlying socket if (options.tos != 0) - set_ip_type_of_service (s, options.tos); + set_ip_type_of_service (_s, options.tos); // Set a source address for conversations if (tcp_addr->has_src_addr ()) { #if defined ZMQ_HAVE_VXWORKS - rc = ::bind (s, (sockaddr *) tcp_addr->src_addr (), + rc = ::bind (_s, (sockaddr *) tcp_addr->src_addr (), tcp_addr->src_addrlen ()); #else - rc = ::bind (s, tcp_addr->src_addr (), tcp_addr->src_addrlen ()); + rc = ::bind (_s, tcp_addr->src_addr (), tcp_addr->src_addrlen ()); #endif if (rc == -1) { close (); @@ -354,9 +355,9 @@ int zmq::socks_connecter_t::connect_to_proxy () // Connect to the remote peer. #if defined ZMQ_HAVE_VXWORKS - rc = ::connect (s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ()); + rc = ::connect (_s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ()); #else - rc = ::connect (s, tcp_addr->addr (), tcp_addr->addrlen ()); + rc = ::connect (_s, tcp_addr->addr (), tcp_addr->addrlen ()); #endif // Connect was successful immediately. if (rc == 0) @@ -389,7 +390,7 @@ zmq::fd_t zmq::socks_connecter_t::check_proxy_connection () socklen_t len = sizeof err; #endif - int rc = getsockopt (s, SOL_SOCKET, SO_ERROR, + int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, reinterpret_cast (&err), &len); // Assert if the error was caused by 0MQ bug. @@ -419,10 +420,10 @@ zmq::fd_t zmq::socks_connecter_t::check_proxy_connection () } #endif - rc = tune_tcp_socket (s); + rc = tune_tcp_socket (_s); rc = rc | tune_tcp_keepalives ( - s, options.tcp_keepalive, options.tcp_keepalive_cnt, + _s, options.tcp_keepalive, options.tcp_keepalive_cnt, options.tcp_keepalive_idle, options.tcp_keepalive_intvl); if (rc != 0) return -1; @@ -432,16 +433,16 @@ zmq::fd_t zmq::socks_connecter_t::check_proxy_connection () void zmq::socks_connecter_t::close () { - zmq_assert (s != retired_fd); + zmq_assert (_s != retired_fd); #ifdef ZMQ_HAVE_WINDOWS - const int rc = closesocket (s); + const int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else - const int rc = ::close (s); + const int rc = ::close (_s); errno_assert (rc == 0); #endif - socket->event_closed (endpoint, s); - s = retired_fd; + _socket->event_closed (_endpoint, _s); + _s = retired_fd; } int zmq::socks_connecter_t::parse_address (const std::string &address_, diff --git a/src/socks_connecter.hpp b/src/socks_connecter.hpp index 1838dd71..254ca7ee 100644 --- a/src/socks_connecter.hpp +++ b/src/socks_connecter.hpp @@ -122,46 +122,46 @@ class socks_connecter_t : public own_t, public io_object_t // retired_fd if the connection was unsuccessful. zmq::fd_t check_proxy_connection (); - socks_greeting_encoder_t greeting_encoder; - socks_choice_decoder_t choice_decoder; - socks_request_encoder_t request_encoder; - socks_response_decoder_t response_decoder; + socks_greeting_encoder_t _greeting_encoder; + socks_choice_decoder_t _choice_decoder; + socks_request_encoder_t _request_encoder; + socks_response_decoder_t _response_decoder; // Address to connect to. Owned by session_base_t. - address_t *addr; + address_t *_addr; // SOCKS address; owned by this connecter. - address_t *proxy_addr; + address_t *_proxy_addr; - int status; + int _status; // Underlying socket. - fd_t s; + fd_t _s; // Handle corresponding to the listening socket. - handle_t handle; + handle_t _handle; // If true file descriptor is registered with the poller and 'handle' // contains valid value. - bool handle_valid; + bool _handle_valid; // If true, connecter is waiting a while before trying to connect. - const bool delayed_start; + const bool _delayed_start; // True iff a timer has been started. - bool timer_started; + bool _timer_started; // Reference to the session we belong to. - zmq::session_base_t *session; + zmq::session_base_t *_session; // Current reconnect ivl, updated for backoff strategy - int current_reconnect_ivl; + int _current_reconnect_ivl; // String representation of endpoint to connect to - std::string endpoint; + std::string _endpoint; // Socket - zmq::socket_base_t *socket; + zmq::socket_base_t *_socket; socks_connecter_t (const socks_connecter_t &); const socks_connecter_t &operator= (const socks_connecter_t &); diff --git a/src/stream.cpp b/src/stream.cpp index 9c3c4128..c0f31605 100644 --- a/src/stream.cpp +++ b/src/stream.cpp @@ -38,24 +38,24 @@ zmq::stream_t::stream_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - prefetched (false), - routing_id_sent (false), - current_out (NULL), - more_out (false), - next_integral_routing_id (generate_random ()) + _prefetched (false), + _routing_id_sent (false), + _current_out (NULL), + _more_out (false), + _next_integral_routing_id (generate_random ()) { options.type = ZMQ_STREAM; options.raw_socket = true; - prefetched_routing_id.init (); - prefetched_msg.init (); + _prefetched_routing_id.init (); + _prefetched_msg.init (); } zmq::stream_t::~stream_t () { - zmq_assert (outpipes.empty ()); - prefetched_routing_id.close (); - prefetched_msg.close (); + zmq_assert (_outpipes.empty ()); + _prefetched_routing_id.close (); + _prefetched_msg.close (); } void zmq::stream_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) @@ -65,32 +65,32 @@ void zmq::stream_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) zmq_assert (pipe_); identify_peer (pipe_); - fq.attach (pipe_); + _fq.attach (pipe_); } void zmq::stream_t::xpipe_terminated (pipe_t *pipe_) { - outpipes_t::iterator it = outpipes.find (pipe_->get_routing_id ()); - zmq_assert (it != outpipes.end ()); - outpipes.erase (it); - fq.pipe_terminated (pipe_); - if (pipe_ == current_out) - current_out = NULL; + outpipes_t::iterator it = _outpipes.find (pipe_->get_routing_id ()); + zmq_assert (it != _outpipes.end ()); + _outpipes.erase (it); + _fq.pipe_terminated (pipe_); + if (pipe_ == _current_out) + _current_out = NULL; } void zmq::stream_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::stream_t::xwrite_activated (pipe_t *pipe_) { outpipes_t::iterator it; - for (it = outpipes.begin (); it != outpipes.end (); ++it) + for (it = _outpipes.begin (); it != _outpipes.end (); ++it) if (it->second.pipe == pipe_) break; - zmq_assert (it != outpipes.end ()); + zmq_assert (it != _outpipes.end ()); zmq_assert (!it->second.active); it->second.active = true; } @@ -99,8 +99,8 @@ int zmq::stream_t::xsend (msg_t *msg_) { // If this is the first part of the message it's the ID of the // peer to send the message to. - if (!more_out) { - zmq_assert (!current_out); + if (!_more_out) { + zmq_assert (!_current_out); // If we have malformed message (prefix with no subsequent message) // then just silently ignore it. @@ -110,13 +110,13 @@ int zmq::stream_t::xsend (msg_t *msg_) // If there's no such pipe return an error blob_t routing_id (static_cast (msg_->data ()), msg_->size ()); - outpipes_t::iterator it = outpipes.find (routing_id); + outpipes_t::iterator it = _outpipes.find (routing_id); - if (it != outpipes.end ()) { - current_out = it->second.pipe; - if (!current_out->check_write ()) { + if (it != _outpipes.end ()) { + _current_out = it->second.pipe; + if (!_current_out->check_write ()) { it->second.active = false; - current_out = NULL; + _current_out = NULL; errno = EAGAIN; return -1; } @@ -127,7 +127,7 @@ int zmq::stream_t::xsend (msg_t *msg_) } // Expect one more message frame. - more_out = true; + _more_out = true; int rc = msg_->close (); errno_assert (rc == 0); @@ -140,26 +140,26 @@ int zmq::stream_t::xsend (msg_t *msg_) msg_->reset_flags (msg_t::more); // This is the last part of the message. - more_out = false; + _more_out = false; // Push the message into the pipe. If there's no out pipe, just drop it. - if (current_out) { + if (_current_out) { // Close the remote connection if user has asked to do so // by sending zero length message. // Pending messages in the pipe will be dropped (on receiving term- ack) if (msg_->size () == 0) { - current_out->terminate (false); + _current_out->terminate (false); int rc = msg_->close (); errno_assert (rc == 0); rc = msg_->init (); errno_assert (rc == 0); - current_out = NULL; + _current_out = NULL; return 0; } - bool ok = current_out->write (msg_); + bool ok = _current_out->write (msg_); if (likely (ok)) - current_out->flush (); - current_out = NULL; + _current_out->flush (); + _current_out = NULL; } else { int rc = msg_->close (); errno_assert (rc == 0); @@ -200,26 +200,26 @@ int zmq::stream_t::xsetsockopt (int option_, int zmq::stream_t::xrecv (msg_t *msg_) { - if (prefetched) { - if (!routing_id_sent) { - int rc = msg_->move (prefetched_routing_id); + if (_prefetched) { + if (!_routing_id_sent) { + int rc = msg_->move (_prefetched_routing_id); errno_assert (rc == 0); - routing_id_sent = true; + _routing_id_sent = true; } else { - int rc = msg_->move (prefetched_msg); + int rc = msg_->move (_prefetched_msg); errno_assert (rc == 0); - prefetched = false; + _prefetched = false; } return 0; } pipe_t *pipe = NULL; - int rc = fq.recvpipe (&prefetched_msg, &pipe); + int rc = _fq.recvpipe (&_prefetched_msg, &pipe); if (rc != 0) return -1; zmq_assert (pipe != NULL); - zmq_assert ((prefetched_msg.flags () & msg_t::more) == 0); + zmq_assert ((_prefetched_msg.flags () & msg_t::more) == 0); // We have received a frame with TCP data. // Rather than sending this frame, we keep it in prefetched @@ -231,15 +231,15 @@ int zmq::stream_t::xrecv (msg_t *msg_) errno_assert (rc == 0); // forward metadata (if any) - metadata_t *metadata = prefetched_msg.metadata (); + metadata_t *metadata = _prefetched_msg.metadata (); if (metadata) msg_->set_metadata (metadata); memcpy (msg_->data (), routing_id.data (), routing_id.size ()); msg_->set_flags (msg_t::more); - prefetched = true; - routing_id_sent = true; + _prefetched = true; + _routing_id_sent = true; return 0; } @@ -247,34 +247,34 @@ int zmq::stream_t::xrecv (msg_t *msg_) bool zmq::stream_t::xhas_in () { // We may already have a message pre-fetched. - if (prefetched) + if (_prefetched) return true; // Try to read the next message. // The message, if read, is kept in the pre-fetch buffer. pipe_t *pipe = NULL; - int rc = fq.recvpipe (&prefetched_msg, &pipe); + int rc = _fq.recvpipe (&_prefetched_msg, &pipe); if (rc != 0) return false; zmq_assert (pipe != NULL); - zmq_assert ((prefetched_msg.flags () & msg_t::more) == 0); + zmq_assert ((_prefetched_msg.flags () & msg_t::more) == 0); const blob_t &routing_id = pipe->get_routing_id (); - rc = prefetched_routing_id.init_size (routing_id.size ()); + rc = _prefetched_routing_id.init_size (routing_id.size ()); errno_assert (rc == 0); // forward metadata (if any) - metadata_t *metadata = prefetched_msg.metadata (); + metadata_t *metadata = _prefetched_msg.metadata (); if (metadata) - prefetched_routing_id.set_metadata (metadata); + _prefetched_routing_id.set_metadata (metadata); - memcpy (prefetched_routing_id.data (), routing_id.data (), + memcpy (_prefetched_routing_id.data (), routing_id.data (), routing_id.size ()); - prefetched_routing_id.set_flags (msg_t::more); + _prefetched_routing_id.set_flags (msg_t::more); - prefetched = true; - routing_id_sent = false; + _prefetched = true; + _routing_id_sent = false; return true; } @@ -297,10 +297,10 @@ void zmq::stream_t::identify_peer (pipe_t *pipe_) routing_id.set ((unsigned char *) connect_routing_id.c_str (), connect_routing_id.length ()); connect_routing_id.clear (); - outpipes_t::iterator it = outpipes.find (routing_id); - zmq_assert (it == outpipes.end ()); + outpipes_t::iterator it = _outpipes.find (routing_id); + zmq_assert (it == _outpipes.end ()); } else { - put_uint32 (buffer + 1, next_integral_routing_id++); + put_uint32 (buffer + 1, _next_integral_routing_id++); routing_id.set (buffer, sizeof buffer); memcpy (options.routing_id, routing_id.data (), routing_id.size ()); options.routing_id_size = @@ -310,7 +310,7 @@ void zmq::stream_t::identify_peer (pipe_t *pipe_) // Add the record into output pipes lookup table outpipe_t outpipe = {pipe_, true}; const bool ok = - outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe) + _outpipes.ZMQ_MAP_INSERT_OR_EMPLACE (ZMQ_MOVE (routing_id), outpipe) .second; zmq_assert (ok); } diff --git a/src/stream.hpp b/src/stream.hpp index 8c327522..58da1e32 100644 --- a/src/stream.hpp +++ b/src/stream.hpp @@ -61,20 +61,20 @@ class stream_t : public socket_base_t void identify_peer (pipe_t *pipe_); // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; // True iff there is a message held in the pre-fetch buffer. - bool prefetched; + bool _prefetched; // If true, the receiver got the message part with // the peer's identity. - bool routing_id_sent; + bool _routing_id_sent; // Holds the prefetched identity. - msg_t prefetched_routing_id; + msg_t _prefetched_routing_id; // Holds the prefetched message. - msg_t prefetched_msg; + msg_t _prefetched_msg; struct outpipe_t { @@ -84,17 +84,17 @@ class stream_t : public socket_base_t // Outbound pipes indexed by the peer IDs. typedef std::map outpipes_t; - outpipes_t outpipes; + outpipes_t _outpipes; // The pipe we are currently writing to. - zmq::pipe_t *current_out; + zmq::pipe_t *_current_out; // If true, more outgoing message parts are expected. - bool more_out; + bool _more_out; // Routing IDs are generated. It's a simple increment and wrap-over // algorithm. This value is the next ID to use (if not used already). - uint32_t next_integral_routing_id; + uint32_t _next_integral_routing_id; stream_t (const stream_t &); const stream_t &operator= (const stream_t &); diff --git a/src/stream_engine.cpp b/src/stream_engine.cpp index ed04bacf..f57da8dc 100644 --- a/src/stream_engine.cpp +++ b/src/stream_engine.cpp @@ -65,91 +65,91 @@ zmq::stream_engine_t::stream_engine_t (fd_t fd_, const options_t &options_, const std::string &endpoint_) : - s (fd_), - as_server (false), - handle (static_cast (NULL)), - inpos (NULL), - insize (0), - decoder (NULL), - outpos (NULL), - outsize (0), - encoder (NULL), - metadata (NULL), - handshaking (true), - greeting_size (v2_greeting_size), - greeting_bytes_read (0), - session (NULL), - options (options_), - endpoint (endpoint_), - plugged (false), - next_msg (&stream_engine_t::routing_id_msg), - process_msg (&stream_engine_t::process_routing_id_msg), - io_error (false), - subscription_required (false), - mechanism (NULL), - input_stopped (false), - output_stopped (false), - has_handshake_timer (false), - has_ttl_timer (false), - has_timeout_timer (false), - has_heartbeat_timer (false), - heartbeat_timeout (0), - socket (NULL) + _s (fd_), + _as_server (false), + _handle (static_cast (NULL)), + _inpos (NULL), + _insize (0), + _decoder (NULL), + _outpos (NULL), + _outsize (0), + _encoder (NULL), + _metadata (NULL), + _handshaking (true), + _greeting_size (v2_greeting_size), + _greeting_bytes_read (0), + _session (NULL), + _options (options_), + _endpoint (endpoint_), + _plugged (false), + _next_msg (&stream_engine_t::routing_id_msg), + _process_msg (&stream_engine_t::process_routing_id_msg), + _io_error (false), + _subscription_required (false), + _mechanism (NULL), + _input_stopped (false), + _output_stopped (false), + _has_handshake_timer (false), + _has_ttl_timer (false), + _has_timeout_timer (false), + _has_heartbeat_timer (false), + _heartbeat_timeout (0), + _socket (NULL) { - int rc = tx_msg.init (); + int rc = _tx_msg.init (); errno_assert (rc == 0); - rc = pong_msg.init (); + rc = _pong_msg.init (); errno_assert (rc == 0); // Put the socket into non-blocking mode. - unblock_socket (s); + unblock_socket (_s); - int family = get_peer_ip_address (s, peer_address); + int family = get_peer_ip_address (_s, _peer_address); if (family == 0) - peer_address.clear (); + _peer_address.clear (); #if defined ZMQ_HAVE_SO_PEERCRED else if (family == PF_UNIX) { struct ucred cred; socklen_t size = sizeof (cred); - if (!getsockopt (s, SOL_SOCKET, SO_PEERCRED, &cred, &size)) { + if (!getsockopt (_s, SOL_SOCKET, SO_PEERCRED, &cred, &size)) { std::ostringstream buf; buf << ":" << cred.uid << ":" << cred.gid << ":" << cred.pid; - peer_address += buf.str (); + _peer_address += buf.str (); } } #elif defined ZMQ_HAVE_LOCAL_PEERCRED else if (family == PF_UNIX) { struct xucred cred; socklen_t size = sizeof (cred); - if (!getsockopt (s, 0, LOCAL_PEERCRED, &cred, &size) + if (!getsockopt (_s, 0, LOCAL_PEERCRED, &cred, &size) && cred.cr_version == XUCRED_VERSION) { std::ostringstream buf; buf << ":" << cred.cr_uid << ":"; if (cred.cr_ngroups > 0) buf << cred.cr_groups[0]; buf << ":"; - peer_address += buf.str (); + _peer_address += buf.str (); } } #endif - if (options.heartbeat_interval > 0) { - heartbeat_timeout = options.heartbeat_timeout; - if (heartbeat_timeout == -1) - heartbeat_timeout = options.heartbeat_interval; + if (_options.heartbeat_interval > 0) { + _heartbeat_timeout = _options.heartbeat_timeout; + if (_heartbeat_timeout == -1) + _heartbeat_timeout = _options.heartbeat_interval; } } zmq::stream_engine_t::~stream_engine_t () { - zmq_assert (!plugged); + zmq_assert (!_plugged); - if (s != retired_fd) { + if (_s != retired_fd) { #ifdef ZMQ_HAVE_WINDOWS - int rc = closesocket (s); + int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else - int rc = close (s); + int rc = close (_s); #if defined(__FreeBSD_kernel__) || defined(__FreeBSD__) // FreeBSD may return ECONNRESET on close() under load but this is not // an error. @@ -158,72 +158,72 @@ zmq::stream_engine_t::~stream_engine_t () #endif errno_assert (rc == 0); #endif - s = retired_fd; + _s = retired_fd; } - int rc = tx_msg.close (); + int rc = _tx_msg.close (); errno_assert (rc == 0); // Drop reference to metadata and destroy it if we are // the only user. - if (metadata != NULL) { - if (metadata->drop_ref ()) { - LIBZMQ_DELETE (metadata); + if (_metadata != NULL) { + if (_metadata->drop_ref ()) { + LIBZMQ_DELETE (_metadata); } } - LIBZMQ_DELETE (encoder); - LIBZMQ_DELETE (decoder); - LIBZMQ_DELETE (mechanism); + LIBZMQ_DELETE (_encoder); + LIBZMQ_DELETE (_decoder); + LIBZMQ_DELETE (_mechanism); } void zmq::stream_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) { - zmq_assert (!plugged); - plugged = true; + zmq_assert (!_plugged); + _plugged = true; // Connect to session object. - zmq_assert (!session); + zmq_assert (!_session); zmq_assert (session_); - session = session_; - socket = session->get_socket (); + _session = session_; + _socket = _session->get_socket (); // Connect to I/O threads poller object. io_object_t::plug (io_thread_); - handle = add_fd (s); - io_error = false; + _handle = add_fd (_s); + _io_error = false; - if (options.raw_socket) { + if (_options.raw_socket) { // no handshaking for raw sock, instantiate raw encoder and decoders - encoder = new (std::nothrow) raw_encoder_t (out_batch_size); - alloc_assert (encoder); + _encoder = new (std::nothrow) raw_encoder_t (out_batch_size); + alloc_assert (_encoder); - decoder = new (std::nothrow) raw_decoder_t (in_batch_size); - alloc_assert (decoder); + _decoder = new (std::nothrow) raw_decoder_t (in_batch_size); + alloc_assert (_decoder); // disable handshaking for raw socket - handshaking = false; + _handshaking = false; - next_msg = &stream_engine_t::pull_msg_from_session; - process_msg = &stream_engine_t::push_raw_msg_to_session; + _next_msg = &stream_engine_t::pull_msg_from_session; + _process_msg = &stream_engine_t::push_raw_msg_to_session; properties_t properties; if (init_properties (properties)) { // Compile metadata. - zmq_assert (metadata == NULL); - metadata = new (std::nothrow) metadata_t (properties); - alloc_assert (metadata); + zmq_assert (_metadata == NULL); + _metadata = new (std::nothrow) metadata_t (properties); + alloc_assert (_metadata); } - if (options.raw_notify) { + if (_options.raw_notify) { // For raw sockets, send an initial 0-length message to the // application so that it knows a peer has connected. msg_t connector; connector.init (); push_raw_msg_to_session (&connector); connector.close (); - session->flush (); + _session->flush (); } } else { // start optional timer, to prevent handshake hanging on no input @@ -231,52 +231,52 @@ void zmq::stream_engine_t::plug (io_thread_t *io_thread_, // Send the 'length' and 'flags' fields of the routing id message. // The 'length' field is encoded in the long format. - outpos = greeting_send; - outpos[outsize++] = 0xff; - put_uint64 (&outpos[outsize], options.routing_id_size + 1); - outsize += 8; - outpos[outsize++] = 0x7f; + _outpos = _greeting_send; + _outpos[_outsize++] = 0xff; + put_uint64 (&_outpos[_outsize], _options.routing_id_size + 1); + _outsize += 8; + _outpos[_outsize++] = 0x7f; } - set_pollin (handle); - set_pollout (handle); + set_pollin (_handle); + set_pollout (_handle); // Flush all the data that may have been already received downstream. in_event (); } void zmq::stream_engine_t::unplug () { - zmq_assert (plugged); - plugged = false; + zmq_assert (_plugged); + _plugged = false; // Cancel all timers. - if (has_handshake_timer) { + if (_has_handshake_timer) { cancel_timer (handshake_timer_id); - has_handshake_timer = false; + _has_handshake_timer = false; } - if (has_ttl_timer) { + if (_has_ttl_timer) { cancel_timer (heartbeat_ttl_timer_id); - has_ttl_timer = false; + _has_ttl_timer = false; } - if (has_timeout_timer) { + if (_has_timeout_timer) { cancel_timer (heartbeat_timeout_timer_id); - has_timeout_timer = false; + _has_timeout_timer = false; } - if (has_heartbeat_timer) { + if (_has_heartbeat_timer) { cancel_timer (heartbeat_ivl_timer_id); - has_heartbeat_timer = false; + _has_heartbeat_timer = false; } // Cancel all fd subscriptions. - if (!io_error) - rm_fd (handle); + if (!_io_error) + rm_fd (_handle); // Disconnect from I/O threads poller object. io_object_t::unplug (); - session = NULL; + _session = NULL; } void zmq::stream_engine_t::terminate () @@ -287,32 +287,32 @@ void zmq::stream_engine_t::terminate () void zmq::stream_engine_t::in_event () { - zmq_assert (!io_error); + zmq_assert (!_io_error); // If still handshaking, receive and process the greeting message. - if (unlikely (handshaking)) + if (unlikely (_handshaking)) if (!handshake ()) return; - zmq_assert (decoder); + zmq_assert (_decoder); // If there has been an I/O error, stop polling. - if (input_stopped) { - rm_fd (handle); - io_error = true; + if (_input_stopped) { + rm_fd (_handle); + _io_error = true; return; } // If there's no data to process in the buffer... - if (!insize) { + if (!_insize) { // Retrieve the buffer and read as much data as possible. // Note that buffer can be arbitrarily large. However, we assume // the underlying TCP layer has fixed buffer size and thus the // number of bytes read will be always limited. size_t bufsize = 0; - decoder->get_buffer (&inpos, &bufsize); + _decoder->get_buffer (&_inpos, &bufsize); - const int rc = tcp_read (s, inpos, bufsize); + const int rc = tcp_read (_s, _inpos, bufsize); if (rc == 0) { // connection closed by peer @@ -327,22 +327,22 @@ void zmq::stream_engine_t::in_event () } // Adjust input size - insize = static_cast (rc); + _insize = static_cast (rc); // Adjust buffer size to received bytes - decoder->resize_buffer (insize); + _decoder->resize_buffer (_insize); } int rc = 0; size_t processed = 0; - while (insize > 0) { - rc = decoder->decode (inpos, insize, processed); - zmq_assert (processed <= insize); - inpos += processed; - insize -= processed; + while (_insize > 0) { + rc = _decoder->decode (_inpos, _insize, processed); + zmq_assert (processed <= _insize); + _inpos += processed; + _insize -= processed; if (rc == 0 || rc == -1) break; - rc = (this->*process_msg) (decoder->msg ()); + rc = (this->*_process_msg) (_decoder->msg ()); if (rc == -1) break; } @@ -354,46 +354,46 @@ void zmq::stream_engine_t::in_event () error (protocol_error); return; } - input_stopped = true; - reset_pollin (handle); + _input_stopped = true; + reset_pollin (_handle); } - session->flush (); + _session->flush (); } void zmq::stream_engine_t::out_event () { - zmq_assert (!io_error); + zmq_assert (!_io_error); // If write buffer is empty, try to read new data from the encoder. - if (!outsize) { + if (!_outsize) { // Even when we stop polling as soon as there is no // data to send, the poller may invoke out_event one // more time due to 'speculative write' optimisation. - if (unlikely (encoder == NULL)) { - zmq_assert (handshaking); + if (unlikely (_encoder == NULL)) { + zmq_assert (_handshaking); return; } - outpos = NULL; - outsize = encoder->encode (&outpos, 0); + _outpos = NULL; + _outsize = _encoder->encode (&_outpos, 0); - while (outsize < static_cast (out_batch_size)) { - if ((this->*next_msg) (&tx_msg) == -1) + while (_outsize < static_cast (out_batch_size)) { + if ((this->*_next_msg) (&_tx_msg) == -1) break; - encoder->load_msg (&tx_msg); - unsigned char *bufptr = outpos + outsize; - size_t n = encoder->encode (&bufptr, out_batch_size - outsize); + _encoder->load_msg (&_tx_msg); + unsigned char *bufptr = _outpos + _outsize; + size_t n = _encoder->encode (&bufptr, out_batch_size - _outsize); zmq_assert (n > 0); - if (outpos == NULL) - outpos = bufptr; - outsize += n; + if (_outpos == NULL) + _outpos = bufptr; + _outsize += n; } // If there is no data to send, stop polling for output. - if (outsize == 0) { - output_stopped = true; - reset_pollout (handle); + if (_outsize == 0) { + _output_stopped = true; + reset_pollout (_handle); return; } } @@ -403,34 +403,34 @@ void zmq::stream_engine_t::out_event () // arbitrarily large. However, we assume that underlying TCP layer has // limited transmission buffer and thus the actual number of bytes // written should be reasonably modest. - const int nbytes = tcp_write (s, outpos, outsize); + const int nbytes = tcp_write (_s, _outpos, _outsize); // IO error has occurred. We stop waiting for output events. // The engine is not terminated until we detect input error; // this is necessary to prevent losing incoming messages. if (nbytes == -1) { - reset_pollout (handle); + reset_pollout (_handle); return; } - outpos += nbytes; - outsize -= nbytes; + _outpos += nbytes; + _outsize -= nbytes; // If we are still handshaking and there are no data // to send, stop polling for output. - if (unlikely (handshaking)) - if (outsize == 0) - reset_pollout (handle); + if (unlikely (_handshaking)) + if (_outsize == 0) + reset_pollout (_handle); } void zmq::stream_engine_t::restart_output () { - if (unlikely (io_error)) + if (unlikely (_io_error)) return; - if (likely (output_stopped)) { - set_pollout (handle); - output_stopped = false; + if (likely (_output_stopped)) { + set_pollout (_handle); + _output_stopped = false; } // Speculative write: The assumption is that at the moment new message @@ -442,42 +442,42 @@ void zmq::stream_engine_t::restart_output () void zmq::stream_engine_t::restart_input () { - zmq_assert (input_stopped); - zmq_assert (session != NULL); - zmq_assert (decoder != NULL); + zmq_assert (_input_stopped); + zmq_assert (_session != NULL); + zmq_assert (_decoder != NULL); - int rc = (this->*process_msg) (decoder->msg ()); + int rc = (this->*_process_msg) (_decoder->msg ()); if (rc == -1) { if (errno == EAGAIN) - session->flush (); + _session->flush (); else error (protocol_error); return; } - while (insize > 0) { + while (_insize > 0) { size_t processed = 0; - rc = decoder->decode (inpos, insize, processed); - zmq_assert (processed <= insize); - inpos += processed; - insize -= processed; + rc = _decoder->decode (_inpos, _insize, processed); + zmq_assert (processed <= _insize); + _inpos += processed; + _insize -= processed; if (rc == 0 || rc == -1) break; - rc = (this->*process_msg) (decoder->msg ()); + rc = (this->*_process_msg) (_decoder->msg ()); if (rc == -1) break; } if (rc == -1 && errno == EAGAIN) - session->flush (); - else if (io_error) + _session->flush (); + else if (_io_error) error (connection_error); else if (rc == -1) error (protocol_error); else { - input_stopped = false; - set_pollin (handle); - session->flush (); + _input_stopped = false; + set_pollin (_handle); + _session->flush (); // Speculative read. in_event (); @@ -486,12 +486,12 @@ void zmq::stream_engine_t::restart_input () bool zmq::stream_engine_t::handshake () { - zmq_assert (handshaking); - zmq_assert (greeting_bytes_read < greeting_size); + zmq_assert (_handshaking); + zmq_assert (_greeting_bytes_read < _greeting_size); // Receive the greeting. - while (greeting_bytes_read < greeting_size) { - const int n = tcp_read (s, greeting_recv + greeting_bytes_read, - greeting_size - greeting_bytes_read); + while (_greeting_bytes_read < _greeting_size) { + const int n = tcp_read (_s, _greeting_recv + _greeting_bytes_read, + _greeting_size - _greeting_bytes_read); if (n == 0) { errno = EPIPE; error (connection_error); @@ -503,62 +503,62 @@ bool zmq::stream_engine_t::handshake () return false; } - greeting_bytes_read += n; + _greeting_bytes_read += n; // We have received at least one byte from the peer. // If the first byte is not 0xff, we know that the // peer is using unversioned protocol. - if (greeting_recv[0] != 0xff) + if (_greeting_recv[0] != 0xff) break; - if (greeting_bytes_read < signature_size) + if (_greeting_bytes_read < signature_size) continue; // Inspect the right-most bit of the 10th byte (which coincides // with the 'flags' field if a regular message was sent). // Zero indicates this is a header of a routing id message // (i.e. the peer is using the unversioned protocol). - if (!(greeting_recv[9] & 0x01)) + if (!(_greeting_recv[9] & 0x01)) break; // The peer is using versioned protocol. // Send the major version number. - if (outpos + outsize == greeting_send + signature_size) { - if (outsize == 0) - set_pollout (handle); - outpos[outsize++] = 3; // Major version number + if (_outpos + _outsize == _greeting_send + signature_size) { + if (_outsize == 0) + set_pollout (_handle); + _outpos[_outsize++] = 3; // Major version number } - if (greeting_bytes_read > signature_size) { - if (outpos + outsize == greeting_send + signature_size + 1) { - if (outsize == 0) - set_pollout (handle); + if (_greeting_bytes_read > signature_size) { + if (_outpos + _outsize == _greeting_send + signature_size + 1) { + if (_outsize == 0) + set_pollout (_handle); // Use ZMTP/2.0 to talk to older peers. - if (greeting_recv[10] == ZMTP_1_0 - || greeting_recv[10] == ZMTP_2_0) - outpos[outsize++] = options.type; + if (_greeting_recv[10] == ZMTP_1_0 + || _greeting_recv[10] == ZMTP_2_0) + _outpos[_outsize++] = _options.type; else { - outpos[outsize++] = 0; // Minor version number - memset (outpos + outsize, 0, 20); + _outpos[_outsize++] = 0; // Minor version number + memset (_outpos + _outsize, 0, 20); - zmq_assert (options.mechanism == ZMQ_NULL - || options.mechanism == ZMQ_PLAIN - || options.mechanism == ZMQ_CURVE - || options.mechanism == ZMQ_GSSAPI); + zmq_assert (_options.mechanism == ZMQ_NULL + || _options.mechanism == ZMQ_PLAIN + || _options.mechanism == ZMQ_CURVE + || _options.mechanism == ZMQ_GSSAPI); - if (options.mechanism == ZMQ_NULL) - memcpy (outpos + outsize, "NULL", 4); - else if (options.mechanism == ZMQ_PLAIN) - memcpy (outpos + outsize, "PLAIN", 5); - else if (options.mechanism == ZMQ_GSSAPI) - memcpy (outpos + outsize, "GSSAPI", 6); - else if (options.mechanism == ZMQ_CURVE) - memcpy (outpos + outsize, "CURVE", 5); - outsize += 20; - memset (outpos + outsize, 0, 32); - outsize += 32; - greeting_size = v3_greeting_size; + if (_options.mechanism == ZMQ_NULL) + memcpy (_outpos + _outsize, "NULL", 4); + else if (_options.mechanism == ZMQ_PLAIN) + memcpy (_outpos + _outsize, "PLAIN", 5); + else if (_options.mechanism == ZMQ_GSSAPI) + memcpy (_outpos + _outsize, "GSSAPI", 6); + else if (_options.mechanism == ZMQ_CURVE) + memcpy (_outpos + _outsize, "CURVE", 5); + _outsize += 20; + memset (_outpos + _outsize, 0, 32); + _outsize += 32; + _greeting_size = v3_greeting_size; } } } @@ -569,155 +569,155 @@ bool zmq::stream_engine_t::handshake () // Is the peer using ZMTP/1.0 with no revision number? // If so, we send and receive rest of routing id message - if (greeting_recv[0] != 0xff || !(greeting_recv[9] & 0x01)) { - if (session->zap_enabled ()) { + if (_greeting_recv[0] != 0xff || !(_greeting_recv[9] & 0x01)) { + if (_session->zap_enabled ()) { // reject ZMTP 1.0 connections if ZAP is enabled error (protocol_error); return false; } - encoder = new (std::nothrow) v1_encoder_t (out_batch_size); - alloc_assert (encoder); + _encoder = new (std::nothrow) v1_encoder_t (out_batch_size); + alloc_assert (_encoder); - decoder = - new (std::nothrow) v1_decoder_t (in_batch_size, options.maxmsgsize); - alloc_assert (decoder); + _decoder = + new (std::nothrow) v1_decoder_t (in_batch_size, _options.maxmsgsize); + alloc_assert (_decoder); // We have already sent the message header. // Since there is no way to tell the encoder to // skip the message header, we simply throw that // header data away. - const size_t header_size = options.routing_id_size + 1 >= 255 ? 10 : 2; + const size_t header_size = _options.routing_id_size + 1 >= 255 ? 10 : 2; unsigned char tmp[10], *bufferp = tmp; // Prepare the routing id message and load it into encoder. // Then consume bytes we have already sent to the peer. - const int rc = tx_msg.init_size (options.routing_id_size); + const int rc = _tx_msg.init_size (_options.routing_id_size); zmq_assert (rc == 0); - memcpy (tx_msg.data (), options.routing_id, options.routing_id_size); - encoder->load_msg (&tx_msg); - size_t buffer_size = encoder->encode (&bufferp, header_size); + memcpy (_tx_msg.data (), _options.routing_id, _options.routing_id_size); + _encoder->load_msg (&_tx_msg); + size_t buffer_size = _encoder->encode (&bufferp, header_size); zmq_assert (buffer_size == header_size); // Make sure the decoder sees the data we have already received. - inpos = greeting_recv; - insize = greeting_bytes_read; + _inpos = _greeting_recv; + _insize = _greeting_bytes_read; // To allow for interoperability with peers that do not forward // their subscriptions, we inject a phantom subscription message // message into the incoming message stream. - if (options.type == ZMQ_PUB || options.type == ZMQ_XPUB) - subscription_required = true; + if (_options.type == ZMQ_PUB || _options.type == ZMQ_XPUB) + _subscription_required = true; // We are sending our routing id now and the next message // will come from the socket. - next_msg = &stream_engine_t::pull_msg_from_session; + _next_msg = &stream_engine_t::pull_msg_from_session; // We are expecting routing id message. - process_msg = &stream_engine_t::process_routing_id_msg; - } else if (greeting_recv[revision_pos] == ZMTP_1_0) { - if (session->zap_enabled ()) { + _process_msg = &stream_engine_t::process_routing_id_msg; + } else if (_greeting_recv[revision_pos] == ZMTP_1_0) { + if (_session->zap_enabled ()) { // reject ZMTP 1.0 connections if ZAP is enabled error (protocol_error); return false; } - encoder = new (std::nothrow) v1_encoder_t (out_batch_size); - alloc_assert (encoder); + _encoder = new (std::nothrow) v1_encoder_t (out_batch_size); + alloc_assert (_encoder); - decoder = - new (std::nothrow) v1_decoder_t (in_batch_size, options.maxmsgsize); - alloc_assert (decoder); - } else if (greeting_recv[revision_pos] == ZMTP_2_0) { - if (session->zap_enabled ()) { + _decoder = + new (std::nothrow) v1_decoder_t (in_batch_size, _options.maxmsgsize); + alloc_assert (_decoder); + } else if (_greeting_recv[revision_pos] == ZMTP_2_0) { + if (_session->zap_enabled ()) { // reject ZMTP 2.0 connections if ZAP is enabled error (protocol_error); return false; } - encoder = new (std::nothrow) v2_encoder_t (out_batch_size); - alloc_assert (encoder); + _encoder = new (std::nothrow) v2_encoder_t (out_batch_size); + alloc_assert (_encoder); - decoder = new (std::nothrow) - v2_decoder_t (in_batch_size, options.maxmsgsize, options.zero_copy); - alloc_assert (decoder); + _decoder = new (std::nothrow) + v2_decoder_t (in_batch_size, _options.maxmsgsize, _options.zero_copy); + alloc_assert (_decoder); } else { - encoder = new (std::nothrow) v2_encoder_t (out_batch_size); - alloc_assert (encoder); + _encoder = new (std::nothrow) v2_encoder_t (out_batch_size); + alloc_assert (_encoder); - decoder = new (std::nothrow) - v2_decoder_t (in_batch_size, options.maxmsgsize, options.zero_copy); - alloc_assert (decoder); + _decoder = new (std::nothrow) + v2_decoder_t (in_batch_size, _options.maxmsgsize, _options.zero_copy); + alloc_assert (_decoder); - if (options.mechanism == ZMQ_NULL - && memcmp (greeting_recv + 12, + if (_options.mechanism == ZMQ_NULL + && memcmp (_greeting_recv + 12, "NULL\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { - mechanism = new (std::nothrow) - null_mechanism_t (session, peer_address, options); - alloc_assert (mechanism); - } else if (options.mechanism == ZMQ_PLAIN - && memcmp (greeting_recv + 12, + _mechanism = new (std::nothrow) + null_mechanism_t (_session, _peer_address, _options); + alloc_assert (_mechanism); + } else if (_options.mechanism == ZMQ_PLAIN + && memcmp (_greeting_recv + 12, "PLAIN\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { - if (options.as_server) - mechanism = new (std::nothrow) - plain_server_t (session, peer_address, options); + if (_options.as_server) + _mechanism = new (std::nothrow) + plain_server_t (_session, _peer_address, _options); else - mechanism = - new (std::nothrow) plain_client_t (session, options); - alloc_assert (mechanism); + _mechanism = + new (std::nothrow) plain_client_t (_session, _options); + alloc_assert (_mechanism); } #ifdef ZMQ_HAVE_CURVE - else if (options.mechanism == ZMQ_CURVE - && memcmp (greeting_recv + 12, + else if (_options.mechanism == ZMQ_CURVE + && memcmp (_greeting_recv + 12, "CURVE\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { - if (options.as_server) - mechanism = new (std::nothrow) - curve_server_t (session, peer_address, options); + if (_options.as_server) + _mechanism = new (std::nothrow) + curve_server_t (_session, _peer_address, _options); else - mechanism = - new (std::nothrow) curve_client_t (session, options); - alloc_assert (mechanism); + _mechanism = + new (std::nothrow) curve_client_t (_session, _options); + alloc_assert (_mechanism); } #endif #ifdef HAVE_LIBGSSAPI_KRB5 - else if (options.mechanism == ZMQ_GSSAPI - && memcmp (greeting_recv + 12, + else if (_options.mechanism == ZMQ_GSSAPI + && memcmp (_greeting_recv + 12, "GSSAPI\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 20) == 0) { - if (options.as_server) - mechanism = new (std::nothrow) - gssapi_server_t (session, peer_address, options); + if (_options.as_server) + _mechanism = new (std::nothrow) + gssapi_server_t (_session, _peer_address, _options); else - mechanism = - new (std::nothrow) gssapi_client_t (session, options); - alloc_assert (mechanism); + _mechanism = + new (std::nothrow) gssapi_client_t (_session, _options); + alloc_assert (_mechanism); } #endif else { - session->get_socket ()->event_handshake_failed_protocol ( - session->get_endpoint (), + _session->get_socket ()->event_handshake_failed_protocol ( + _session->get_endpoint (), ZMQ_PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH); error (protocol_error); return false; } - next_msg = &stream_engine_t::next_handshake_command; - process_msg = &stream_engine_t::process_handshake_command; + _next_msg = &stream_engine_t::next_handshake_command; + _process_msg = &stream_engine_t::process_handshake_command; } // Start polling for output if necessary. - if (outsize == 0) - set_pollout (handle); + if (_outsize == 0) + set_pollout (_handle); // Handshaking was successful. // Switch into the normal message flow. - handshaking = false; + _handshaking = false; - if (has_handshake_timer) { + if (_has_handshake_timer) { cancel_timer (handshake_timer_id); - has_handshake_timer = false; + _has_handshake_timer = false; } return true; @@ -725,19 +725,19 @@ bool zmq::stream_engine_t::handshake () int zmq::stream_engine_t::routing_id_msg (msg_t *msg_) { - int rc = msg_->init_size (options.routing_id_size); + int rc = msg_->init_size (_options.routing_id_size); errno_assert (rc == 0); - if (options.routing_id_size > 0) - memcpy (msg_->data (), options.routing_id, options.routing_id_size); - next_msg = &stream_engine_t::pull_msg_from_session; + if (_options.routing_id_size > 0) + memcpy (msg_->data (), _options.routing_id, _options.routing_id_size); + _next_msg = &stream_engine_t::pull_msg_from_session; return 0; } int zmq::stream_engine_t::process_routing_id_msg (msg_t *msg_) { - if (options.recv_routing_id) { + if (_options.recv_routing_id) { msg_->set_flags (msg_t::routing_id); - int rc = session->push_msg (msg_); + int rc = _session->push_msg (msg_); errno_assert (rc == 0); } else { int rc = msg_->close (); @@ -746,7 +746,7 @@ int zmq::stream_engine_t::process_routing_id_msg (msg_t *msg_) errno_assert (rc == 0); } - if (subscription_required) { + if (_subscription_required) { msg_t subscription; // Inject the subscription message, so that also @@ -754,28 +754,28 @@ int zmq::stream_engine_t::process_routing_id_msg (msg_t *msg_) int rc = subscription.init_size (1); errno_assert (rc == 0); *static_cast (subscription.data ()) = 1; - rc = session->push_msg (&subscription); + rc = _session->push_msg (&subscription); errno_assert (rc == 0); } - process_msg = &stream_engine_t::push_msg_to_session; + _process_msg = &stream_engine_t::push_msg_to_session; return 0; } int zmq::stream_engine_t::next_handshake_command (msg_t *msg_) { - zmq_assert (mechanism != NULL); + zmq_assert (_mechanism != NULL); - if (mechanism->status () == mechanism_t::ready) { + if (_mechanism->status () == mechanism_t::ready) { mechanism_ready (); return pull_and_encode (msg_); } - if (mechanism->status () == mechanism_t::error) { + if (_mechanism->status () == mechanism_t::error) { errno = EPROTO; return -1; } else { - const int rc = mechanism->next_handshake_command (msg_); + const int rc = _mechanism->next_handshake_command (msg_); if (rc == 0) msg_->set_flags (msg_t::command); @@ -786,16 +786,16 @@ int zmq::stream_engine_t::next_handshake_command (msg_t *msg_) int zmq::stream_engine_t::process_handshake_command (msg_t *msg_) { - zmq_assert (mechanism != NULL); - const int rc = mechanism->process_handshake_command (msg_); + zmq_assert (_mechanism != NULL); + const int rc = _mechanism->process_handshake_command (msg_); if (rc == 0) { - if (mechanism->status () == mechanism_t::ready) + if (_mechanism->status () == mechanism_t::ready) mechanism_ready (); - else if (mechanism->status () == mechanism_t::error) { + else if (_mechanism->status () == mechanism_t::error) { errno = EPROTO; return -1; } - if (output_stopped) + if (_output_stopped) restart_output (); } @@ -804,35 +804,35 @@ int zmq::stream_engine_t::process_handshake_command (msg_t *msg_) void zmq::stream_engine_t::zap_msg_available () { - zmq_assert (mechanism != NULL); + zmq_assert (_mechanism != NULL); - const int rc = mechanism->zap_msg_available (); + const int rc = _mechanism->zap_msg_available (); if (rc == -1) { error (protocol_error); return; } - if (input_stopped) + if (_input_stopped) restart_input (); - if (output_stopped) + if (_output_stopped) restart_output (); } const char *zmq::stream_engine_t::get_endpoint () const { - return endpoint.c_str (); + return _endpoint.c_str (); } void zmq::stream_engine_t::mechanism_ready () { - if (options.heartbeat_interval > 0) { - add_timer (options.heartbeat_interval, heartbeat_ivl_timer_id); - has_heartbeat_timer = true; + if (_options.heartbeat_interval > 0) { + add_timer (_options.heartbeat_interval, heartbeat_ivl_timer_id); + _has_heartbeat_timer = true; } - if (options.recv_routing_id) { + if (_options.recv_routing_id) { msg_t routing_id; - mechanism->peer_routing_id (&routing_id); - const int rc = session->push_msg (&routing_id); + _mechanism->peer_routing_id (&routing_id); + const int rc = _session->push_msg (&routing_id); if (rc == -1 && errno == EAGAIN) { // If the write is failing at this stage with // an EAGAIN the pipe must be being shut down, @@ -840,100 +840,100 @@ void zmq::stream_engine_t::mechanism_ready () return; } errno_assert (rc == 0); - session->flush (); + _session->flush (); } - next_msg = &stream_engine_t::pull_and_encode; - process_msg = &stream_engine_t::write_credential; + _next_msg = &stream_engine_t::pull_and_encode; + _process_msg = &stream_engine_t::write_credential; // Compile metadata. properties_t properties; init_properties (properties); // Add ZAP properties. - const properties_t &zap_properties = mechanism->get_zap_properties (); + const properties_t &zap_properties = _mechanism->get_zap_properties (); properties.insert (zap_properties.begin (), zap_properties.end ()); // Add ZMTP properties. - const properties_t &zmtp_properties = mechanism->get_zmtp_properties (); + const properties_t &zmtp_properties = _mechanism->get_zmtp_properties (); properties.insert (zmtp_properties.begin (), zmtp_properties.end ()); - zmq_assert (metadata == NULL); + zmq_assert (_metadata == NULL); if (!properties.empty ()) { - metadata = new (std::nothrow) metadata_t (properties); - alloc_assert (metadata); + _metadata = new (std::nothrow) metadata_t (properties); + alloc_assert (_metadata); } #ifdef ZMQ_BUILD_DRAFT_API - socket->event_handshake_succeeded (endpoint, 0); + _socket->event_handshake_succeeded (_endpoint, 0); #endif } int zmq::stream_engine_t::pull_msg_from_session (msg_t *msg_) { - return session->pull_msg (msg_); + return _session->pull_msg (msg_); } int zmq::stream_engine_t::push_msg_to_session (msg_t *msg_) { - return session->push_msg (msg_); + return _session->push_msg (msg_); } int zmq::stream_engine_t::push_raw_msg_to_session (msg_t *msg_) { - if (metadata && metadata != msg_->metadata ()) - msg_->set_metadata (metadata); + if (_metadata && _metadata != msg_->metadata ()) + msg_->set_metadata (_metadata); return push_msg_to_session (msg_); } int zmq::stream_engine_t::write_credential (msg_t *msg_) { - zmq_assert (mechanism != NULL); - zmq_assert (session != NULL); + zmq_assert (_mechanism != NULL); + zmq_assert (_session != NULL); - const blob_t &credential = mechanism->get_user_id (); + const blob_t &credential = _mechanism->get_user_id (); if (credential.size () > 0) { msg_t msg; int rc = msg.init_size (credential.size ()); zmq_assert (rc == 0); memcpy (msg.data (), credential.data (), credential.size ()); msg.set_flags (msg_t::credential); - rc = session->push_msg (&msg); + rc = _session->push_msg (&msg); if (rc == -1) { rc = msg.close (); errno_assert (rc == 0); return -1; } } - process_msg = &stream_engine_t::decode_and_push; + _process_msg = &stream_engine_t::decode_and_push; return decode_and_push (msg_); } int zmq::stream_engine_t::pull_and_encode (msg_t *msg_) { - zmq_assert (mechanism != NULL); + zmq_assert (_mechanism != NULL); - if (session->pull_msg (msg_) == -1) + if (_session->pull_msg (msg_) == -1) return -1; - if (mechanism->encode (msg_) == -1) + if (_mechanism->encode (msg_) == -1) return -1; return 0; } int zmq::stream_engine_t::decode_and_push (msg_t *msg_) { - zmq_assert (mechanism != NULL); + zmq_assert (_mechanism != NULL); - if (mechanism->decode (msg_) == -1) + if (_mechanism->decode (msg_) == -1) return -1; - if (has_timeout_timer) { - has_timeout_timer = false; + if (_has_timeout_timer) { + _has_timeout_timer = false; cancel_timer (heartbeat_timeout_timer_id); } - if (has_ttl_timer) { - has_ttl_timer = false; + if (_has_ttl_timer) { + _has_ttl_timer = false; cancel_timer (heartbeat_ttl_timer_id); } @@ -941,11 +941,11 @@ int zmq::stream_engine_t::decode_and_push (msg_t *msg_) process_command_message (msg_); } - if (metadata) - msg_->set_metadata (metadata); - if (session->push_msg (msg_) == -1) { + if (_metadata) + msg_->set_metadata (_metadata); + if (_session->push_msg (msg_) == -1) { if (errno == EAGAIN) - process_msg = &stream_engine_t::push_one_then_decode_and_push; + _process_msg = &stream_engine_t::push_one_then_decode_and_push; return -1; } return 0; @@ -953,59 +953,59 @@ int zmq::stream_engine_t::decode_and_push (msg_t *msg_) int zmq::stream_engine_t::push_one_then_decode_and_push (msg_t *msg_) { - const int rc = session->push_msg (msg_); + const int rc = _session->push_msg (msg_); if (rc == 0) - process_msg = &stream_engine_t::decode_and_push; + _process_msg = &stream_engine_t::decode_and_push; return rc; } void zmq::stream_engine_t::error (error_reason_t reason_) { - if (options.raw_socket && options.raw_notify) { + if (_options.raw_socket && _options.raw_notify) { // For raw sockets, send a final 0-length message to the application // so that it knows the peer has been disconnected. msg_t terminator; terminator.init (); - (this->*process_msg) (&terminator); + (this->*_process_msg) (&terminator); terminator.close (); } - zmq_assert (session); + zmq_assert (_session); #ifdef ZMQ_BUILD_DRAFT_API // protocol errors have been signaled already at the point where they occurred if (reason_ != protocol_error - && (mechanism == NULL - || mechanism->status () == mechanism_t::handshaking)) { + && (_mechanism == NULL + || _mechanism->status () == mechanism_t::handshaking)) { int err = errno; - socket->event_handshake_failed_no_detail (endpoint, err); + _socket->event_handshake_failed_no_detail (_endpoint, err); } #endif - socket->event_disconnected (endpoint, s); - session->flush (); - session->engine_error (reason_); + _socket->event_disconnected (_endpoint, _s); + _session->flush (); + _session->engine_error (reason_); unplug (); delete this; } void zmq::stream_engine_t::set_handshake_timer () { - zmq_assert (!has_handshake_timer); + zmq_assert (!_has_handshake_timer); - if (!options.raw_socket && options.handshake_ivl > 0) { - add_timer (options.handshake_ivl, handshake_timer_id); - has_handshake_timer = true; + if (!_options.raw_socket && _options.handshake_ivl > 0) { + add_timer (_options.handshake_ivl, handshake_timer_id); + _has_handshake_timer = true; } } bool zmq::stream_engine_t::init_properties (properties_t &properties_) { - if (peer_address.empty ()) + if (_peer_address.empty ()) return false; properties_.ZMQ_MAP_INSERT_OR_EMPLACE ( - std::string (ZMQ_MSG_PROPERTY_PEER_ADDRESS), peer_address); + std::string (ZMQ_MSG_PROPERTY_PEER_ADDRESS), _peer_address); // Private property to support deprecated SRCFD std::ostringstream stream; - stream << static_cast (s); + stream << static_cast (_s); std::string fd_string = stream.str (); properties_.ZMQ_MAP_INSERT_OR_EMPLACE (std::string ("__fd"), ZMQ_MOVE (fd_string)); @@ -1015,18 +1015,18 @@ bool zmq::stream_engine_t::init_properties (properties_t &properties_) void zmq::stream_engine_t::timer_event (int id_) { if (id_ == handshake_timer_id) { - has_handshake_timer = false; + _has_handshake_timer = false; // handshake timer expired before handshake completed, so engine fail error (timeout_error); } else if (id_ == heartbeat_ivl_timer_id) { - next_msg = &stream_engine_t::produce_ping_message; + _next_msg = &stream_engine_t::produce_ping_message; out_event (); - add_timer (options.heartbeat_interval, heartbeat_ivl_timer_id); + add_timer (_options.heartbeat_interval, heartbeat_ivl_timer_id); } else if (id_ == heartbeat_ttl_timer_id) { - has_ttl_timer = false; + _has_ttl_timer = false; error (timeout_error); } else if (id_ == heartbeat_timeout_timer_id) { - has_timeout_timer = false; + _has_timeout_timer = false; error (timeout_error); } else // There are no other valid timer ids! @@ -1036,7 +1036,7 @@ void zmq::stream_engine_t::timer_event (int id_) int zmq::stream_engine_t::produce_ping_message (msg_t *msg_) { int rc = 0; - zmq_assert (mechanism != NULL); + zmq_assert (_mechanism != NULL); // 16-bit TTL + \4PING == 7 rc = msg_->init_size (7); @@ -1045,15 +1045,15 @@ int zmq::stream_engine_t::produce_ping_message (msg_t *msg_) // Copy in the command message memcpy (msg_->data (), "\4PING", 5); - uint16_t ttl_val = htons (options.heartbeat_ttl); + uint16_t ttl_val = htons (_options.heartbeat_ttl); memcpy ((static_cast (msg_->data ())) + 5, &ttl_val, sizeof (ttl_val)); - rc = mechanism->encode (msg_); - next_msg = &stream_engine_t::pull_and_encode; - if (!has_timeout_timer && heartbeat_timeout > 0) { - add_timer (heartbeat_timeout, heartbeat_timeout_timer_id); - has_timeout_timer = true; + rc = _mechanism->encode (msg_); + _next_msg = &stream_engine_t::pull_and_encode; + if (!_has_timeout_timer && _heartbeat_timeout > 0) { + add_timer (_heartbeat_timeout, heartbeat_timeout_timer_id); + _has_timeout_timer = true; } return rc; } @@ -1061,13 +1061,13 @@ int zmq::stream_engine_t::produce_ping_message (msg_t *msg_) int zmq::stream_engine_t::produce_pong_message (msg_t *msg_) { int rc = 0; - zmq_assert (mechanism != NULL); + zmq_assert (_mechanism != NULL); - rc = msg_->move (pong_msg); + rc = msg_->move (_pong_msg); errno_assert (rc == 0); - rc = mechanism->encode (msg_); - next_msg = &stream_engine_t::pull_and_encode; + rc = _mechanism->encode (msg_); + _next_msg = &stream_engine_t::pull_and_encode; return rc; } @@ -1083,9 +1083,9 @@ int zmq::stream_engine_t::process_heartbeat_message (msg_t *msg_) // so we multiply it by 100 to get the timer interval in ms. remote_heartbeat_ttl *= 100; - if (!has_ttl_timer && remote_heartbeat_ttl > 0) { + if (!_has_ttl_timer && remote_heartbeat_ttl > 0) { add_timer (remote_heartbeat_ttl, heartbeat_ttl_timer_id); - has_ttl_timer = true; + _has_ttl_timer = true; } // As per ZMTP 3.1 the PING command might contain an up to 16 bytes @@ -1094,15 +1094,15 @@ int zmq::stream_engine_t::process_heartbeat_message (msg_t *msg_) // Given the engine goes straight to out_event, sequential PINGs will // not be a problem. size_t context_len = msg_->size () - 7 > 16 ? 16 : msg_->size () - 7; - int rc = pong_msg.init_size (5 + context_len); + int rc = _pong_msg.init_size (5 + context_len); errno_assert (rc == 0); - pong_msg.set_flags (msg_t::command); - memcpy (pong_msg.data (), "\4PONG", 5); + _pong_msg.set_flags (msg_t::command); + memcpy (_pong_msg.data (), "\4PONG", 5); if (context_len > 0) - memcpy ((static_cast (pong_msg.data ())) + 5, + memcpy ((static_cast (_pong_msg.data ())) + 5, (static_cast (msg_->data ())) + 7, context_len); - next_msg = &stream_engine_t::produce_pong_message; + _next_msg = &stream_engine_t::produce_pong_message; out_event (); } diff --git a/src/stream_engine.hpp b/src/stream_engine.hpp index fdd489de..e714fbd9 100644 --- a/src/stream_engine.hpp +++ b/src/stream_engine.hpp @@ -133,32 +133,32 @@ class stream_engine_t : public io_object_t, public i_engine int produce_pong_message (msg_t *msg_); // Underlying socket. - fd_t s; + fd_t _s; // True iff this is server's engine. - bool as_server; + bool _as_server; - msg_t tx_msg; + msg_t _tx_msg; // Need to store PING payload for PONG - msg_t pong_msg; + msg_t _pong_msg; - handle_t handle; + handle_t _handle; - unsigned char *inpos; - size_t insize; - i_decoder *decoder; + unsigned char *_inpos; + size_t _insize; + i_decoder *_decoder; - unsigned char *outpos; - size_t outsize; - i_encoder *encoder; + unsigned char *_outpos; + size_t _outsize; + i_encoder *_encoder; // Metadata to be attached to received messages. May be NULL. - metadata_t *metadata; + metadata_t *_metadata; // When true, we are still trying to determine whether // the peer is using versioned protocol, and if so, which // version. When false, normal message flow has started. - bool handshaking; + bool _handshaking; static const size_t signature_size = 10; @@ -169,43 +169,43 @@ class stream_engine_t : public io_object_t, public i_engine static const size_t v3_greeting_size = 64; // Expected greeting size. - size_t greeting_size; + size_t _greeting_size; // Greeting received from, and sent to peer - unsigned char greeting_recv[v3_greeting_size]; - unsigned char greeting_send[v3_greeting_size]; + unsigned char _greeting_recv[v3_greeting_size]; + unsigned char _greeting_send[v3_greeting_size]; // Size of greeting received so far - unsigned int greeting_bytes_read; + unsigned int _greeting_bytes_read; // The session this engine is attached to. - zmq::session_base_t *session; + zmq::session_base_t *_session; - const options_t options; + const options_t _options; // String representation of endpoint - std::string endpoint; + std::string _endpoint; - bool plugged; + bool _plugged; - int (stream_engine_t::*next_msg) (msg_t *msg_); + int (stream_engine_t::*_next_msg) (msg_t *msg_); - int (stream_engine_t::*process_msg) (msg_t *msg_); + int (stream_engine_t::*_process_msg) (msg_t *msg_); - bool io_error; + bool _io_error; // Indicates whether the engine is to inject a phantom // subscription message into the incoming stream. // Needed to support old peers. - bool subscription_required; + bool _subscription_required; - mechanism_t *mechanism; + mechanism_t *_mechanism; // True iff the engine couldn't consume the last decoded message. - bool input_stopped; + bool _input_stopped; // True iff the engine doesn't have any message to encode. - bool output_stopped; + bool _output_stopped; // ID of the handshake timer enum @@ -214,7 +214,7 @@ class stream_engine_t : public io_object_t, public i_engine }; // True is linger timer is running. - bool has_handshake_timer; + bool _has_handshake_timer; // Heartbeat stuff enum @@ -223,15 +223,15 @@ class stream_engine_t : public io_object_t, public i_engine heartbeat_timeout_timer_id = 0x81, heartbeat_ttl_timer_id = 0x82 }; - bool has_ttl_timer; - bool has_timeout_timer; - bool has_heartbeat_timer; - int heartbeat_timeout; + bool _has_ttl_timer; + bool _has_timeout_timer; + bool _has_heartbeat_timer; + int _heartbeat_timeout; // Socket - zmq::socket_base_t *socket; + zmq::socket_base_t *_socket; - std::string peer_address; + std::string _peer_address; stream_engine_t (const stream_engine_t &); const stream_engine_t &operator= (const stream_engine_t &); diff --git a/src/tcp_address.cpp b/src/tcp_address.cpp index c4dd8f4a..9f6b3914 100644 --- a/src/tcp_address.cpp +++ b/src/tcp_address.cpp @@ -50,8 +50,8 @@ zmq::tcp_address_t::tcp_address_t () : _has_src_addr (false) { - memset (&address, 0, sizeof (address)); - memset (&source_address, 0, sizeof (source_address)); + memset (&_address, 0, sizeof (_address)); + memset (&_source_address, 0, sizeof (_source_address)); } zmq::tcp_address_t::tcp_address_t (const sockaddr *sa_, socklen_t sa_len_) : @@ -59,14 +59,14 @@ zmq::tcp_address_t::tcp_address_t (const sockaddr *sa_, socklen_t sa_len_) : { zmq_assert (sa_ && sa_len_ > 0); - memset (&address, 0, sizeof (address)); - memset (&source_address, 0, sizeof (source_address)); + memset (&_address, 0, sizeof (_address)); + memset (&_source_address, 0, sizeof (_source_address)); if (sa_->sa_family == AF_INET - && sa_len_ >= static_cast (sizeof (address.ipv4))) - memcpy (&address.ipv4, sa_, sizeof (address.ipv4)); + && sa_len_ >= static_cast (sizeof (_address.ipv4))) + memcpy (&_address.ipv4, sa_, sizeof (_address.ipv4)); else if (sa_->sa_family == AF_INET6 - && sa_len_ >= static_cast (sizeof (address.ipv6))) - memcpy (&address.ipv6, sa_, sizeof (address.ipv6)); + && sa_len_ >= static_cast (sizeof (_address.ipv6))) + memcpy (&_address.ipv6, sa_, sizeof (_address.ipv6)); } zmq::tcp_address_t::~tcp_address_t () @@ -95,7 +95,7 @@ int zmq::tcp_address_t::resolve (const char *name_, bool local_, bool ipv6_) ip_resolver_t src_resolver (src_resolver_opts); const int rc = - src_resolver.resolve (&source_address, src_name.c_str ()); + src_resolver.resolve (&_source_address, src_name.c_str ()); if (rc != 0) return -1; name_ = src_delimiter + 1; @@ -112,12 +112,12 @@ int zmq::tcp_address_t::resolve (const char *name_, bool local_, bool ipv6_) ip_resolver_t resolver (resolver_opts); - return resolver.resolve (&address, name_); + return resolver.resolve (&_address, name_); } int zmq::tcp_address_t::to_string (std::string &addr_) { - if (address.family () != AF_INET && address.family () != AF_INET6) { + if (_address.family () != AF_INET && _address.family () != AF_INET6) { addr_.clear (); return -1; } @@ -132,13 +132,13 @@ int zmq::tcp_address_t::to_string (std::string &addr_) return rc; } - if (address.family () == AF_INET6) { + if (_address.family () == AF_INET6) { std::stringstream s; - s << "tcp://[" << hbuf << "]:" << ntohs (address.ipv6.sin6_port); + s << "tcp://[" << hbuf << "]:" << ntohs (_address.ipv6.sin6_port); addr_ = s.str (); } else { std::stringstream s; - s << "tcp://" << hbuf << ":" << ntohs (address.ipv4.sin_port); + s << "tcp://" << hbuf << ":" << ntohs (_address.ipv4.sin_port); addr_ = s.str (); } return 0; @@ -146,28 +146,28 @@ int zmq::tcp_address_t::to_string (std::string &addr_) const sockaddr *zmq::tcp_address_t::addr () const { - return &address.generic; + return &_address.generic; } socklen_t zmq::tcp_address_t::addrlen () const { - if (address.generic.sa_family == AF_INET6) - return static_cast (sizeof (address.ipv6)); + if (_address.generic.sa_family == AF_INET6) + return static_cast (sizeof (_address.ipv6)); - return static_cast (sizeof (address.ipv4)); + return static_cast (sizeof (_address.ipv4)); } const sockaddr *zmq::tcp_address_t::src_addr () const { - return &source_address.generic; + return &_source_address.generic; } socklen_t zmq::tcp_address_t::src_addrlen () const { - if (address.family () == AF_INET6) - return static_cast (sizeof (source_address.ipv6)); + if (_address.family () == AF_INET6) + return static_cast (sizeof (_source_address.ipv6)); - return static_cast (sizeof (source_address.ipv4)); + return static_cast (sizeof (_source_address.ipv4)); } bool zmq::tcp_address_t::has_src_addr () const @@ -181,18 +181,18 @@ unsigned short zmq::tcp_address_t::family () const sa_family_t zmq::tcp_address_t::family () const #endif { - return address.family (); + return _address.family (); } zmq::tcp_address_mask_t::tcp_address_mask_t () : tcp_address_t (), - address_mask (-1) + _address_mask (-1) { } int zmq::tcp_address_mask_t::mask () const { - return address_mask; + return _address_mask; } int zmq::tcp_address_mask_t::resolve (const char *name_, bool ipv6_) @@ -222,26 +222,26 @@ int zmq::tcp_address_mask_t::resolve (const char *name_, bool ipv6_) ip_resolver_t resolver (resolver_opts); - const int rc = resolver.resolve (&address, addr_str.c_str ()); + const int rc = resolver.resolve (&_address, addr_str.c_str ()); if (rc != 0) return rc; // Parse the cidr mask number. if (mask_str.empty ()) { - if (address.family () == AF_INET6) - address_mask = 128; + if (_address.family () == AF_INET6) + _address_mask = 128; else - address_mask = 32; + _address_mask = 32; } else if (mask_str == "0") - address_mask = 0; + _address_mask = 0; else { const int mask = atoi (mask_str.c_str ()); - if ((mask < 1) || (address.family () == AF_INET6 && mask > 128) - || (address.family () != AF_INET6 && mask > 32)) { + if ((mask < 1) || (_address.family () == AF_INET6 && mask > 128) + || (_address.family () != AF_INET6 && mask > 32)) { errno = EINVAL; return -1; } - address_mask = mask; + _address_mask = mask; } return 0; @@ -249,11 +249,11 @@ int zmq::tcp_address_mask_t::resolve (const char *name_, bool ipv6_) int zmq::tcp_address_mask_t::to_string (std::string &addr_) { - if (address.family () != AF_INET && address.family () != AF_INET6) { + if (_address.family () != AF_INET && _address.family () != AF_INET6) { addr_.clear (); return -1; } - if (address_mask == -1) { + if (_address_mask == -1) { addr_.clear (); return -1; } @@ -266,13 +266,13 @@ int zmq::tcp_address_mask_t::to_string (std::string &addr_) return rc; } - if (address.family () == AF_INET6) { + if (_address.family () == AF_INET6) { std::stringstream s; - s << "[" << hbuf << "]/" << address_mask; + s << "[" << hbuf << "]/" << _address_mask; addr_ = s.str (); } else { std::stringstream s; - s << hbuf << "/" << address_mask; + s << hbuf << "/" << _address_mask; addr_ = s.str (); } return 0; @@ -281,13 +281,13 @@ int zmq::tcp_address_mask_t::to_string (std::string &addr_) bool zmq::tcp_address_mask_t::match_address (const struct sockaddr *ss_, const socklen_t ss_len_) const { - zmq_assert (address_mask != -1 && ss_ != NULL + zmq_assert (_address_mask != -1 && ss_ != NULL && ss_len_ >= (socklen_t) sizeof (struct sockaddr)); - if (ss_->sa_family != address.generic.sa_family) + if (ss_->sa_family != _address.generic.sa_family) return false; - if (address_mask > 0) { + if (_address_mask > 0) { int mask; const uint8_t *our_bytes, *their_bytes; if (ss_->sa_family == AF_INET6) { @@ -296,18 +296,18 @@ bool zmq::tcp_address_mask_t::match_address (const struct sockaddr *ss_, &((reinterpret_cast (ss_)) ->sin6_addr)); our_bytes = - reinterpret_cast (&address.ipv6.sin6_addr); + reinterpret_cast (&_address.ipv6.sin6_addr); mask = sizeof (struct in6_addr) * 8; } else { zmq_assert (ss_len_ == sizeof (struct sockaddr_in)); their_bytes = reinterpret_cast (&( (reinterpret_cast (ss_))->sin_addr)); our_bytes = - reinterpret_cast (&address.ipv4.sin_addr); + reinterpret_cast (&_address.ipv4.sin_addr); mask = sizeof (struct in_addr) * 8; } - if (address_mask < mask) - mask = address_mask; + if (_address_mask < mask) + mask = _address_mask; const size_t full_bytes = mask / 8; if (memcmp (our_bytes, their_bytes, full_bytes)) diff --git a/src/tcp_address.hpp b/src/tcp_address.hpp index b6f6aa21..44c47a2d 100644 --- a/src/tcp_address.hpp +++ b/src/tcp_address.hpp @@ -68,8 +68,8 @@ class tcp_address_t bool has_src_addr () const; protected: - ip_addr_t address; - ip_addr_t source_address; + ip_addr_t _address; + ip_addr_t _source_address; bool _has_src_addr; }; @@ -92,7 +92,7 @@ class tcp_address_mask_t : public tcp_address_t const socklen_t ss_len_) const; private: - int address_mask; + int _address_mask; }; } diff --git a/src/tcp_connecter.cpp b/src/tcp_connecter.cpp index 76e33836..8886a3d6 100644 --- a/src/tcp_connecter.cpp +++ b/src/tcp_connecter.cpp @@ -71,19 +71,19 @@ zmq::tcp_connecter_t::tcp_connecter_t (class io_thread_t *io_thread_, bool delayed_start_) : own_t (io_thread_, options_), io_object_t (io_thread_), - addr (addr_), - s (retired_fd), - handle (static_cast (NULL)), - delayed_start (delayed_start_), - connect_timer_started (false), - reconnect_timer_started (false), - session (session_), - current_reconnect_ivl (options.reconnect_ivl), - socket (session->get_socket ()) + _addr (addr_), + _s (retired_fd), + _handle (static_cast (NULL)), + _delayed_start (delayed_start_), + _connect_timer_started (false), + _reconnect_timer_started (false), + _session (session_), + _current_reconnect_ivl (options.reconnect_ivl), + _socket (_session->get_socket ()) { - zmq_assert (addr); - zmq_assert (addr->protocol == "tcp"); - addr->to_string (endpoint); + zmq_assert (_addr); + zmq_assert (_addr->protocol == "tcp"); + _addr->to_string (_endpoint); // TODO the return value is unused! what if it fails? if this is impossible // or does not matter, change such that endpoint in initialized using an // initializer, and make endpoint const @@ -91,15 +91,15 @@ zmq::tcp_connecter_t::tcp_connecter_t (class io_thread_t *io_thread_, zmq::tcp_connecter_t::~tcp_connecter_t () { - zmq_assert (!connect_timer_started); - zmq_assert (!reconnect_timer_started); - zmq_assert (!handle); - zmq_assert (s == retired_fd); + zmq_assert (!_connect_timer_started); + zmq_assert (!_reconnect_timer_started); + zmq_assert (!_handle); + zmq_assert (_s == retired_fd); } void zmq::tcp_connecter_t::process_plug () { - if (delayed_start) + if (_delayed_start) add_reconnect_timer (); else start_connecting (); @@ -107,21 +107,21 @@ void zmq::tcp_connecter_t::process_plug () void zmq::tcp_connecter_t::process_term (int linger_) { - if (connect_timer_started) { + if (_connect_timer_started) { cancel_timer (connect_timer_id); - connect_timer_started = false; + _connect_timer_started = false; } - if (reconnect_timer_started) { + if (_reconnect_timer_started) { cancel_timer (reconnect_timer_id); - reconnect_timer_started = false; + _reconnect_timer_started = false; } - if (handle) { + if (_handle) { rm_handle (); } - if (s != retired_fd) + if (_s != retired_fd) close (); own_t::process_term (linger_); @@ -137,9 +137,9 @@ void zmq::tcp_connecter_t::in_event () void zmq::tcp_connecter_t::out_event () { - if (connect_timer_started) { + if (_connect_timer_started) { cancel_timer (connect_timer_id); - connect_timer_started = false; + _connect_timer_started = false; } rm_handle (); @@ -155,34 +155,34 @@ void zmq::tcp_connecter_t::out_event () // Create the engine object for this connection. stream_engine_t *engine = - new (std::nothrow) stream_engine_t (fd, options, endpoint); + new (std::nothrow) stream_engine_t (fd, options, _endpoint); alloc_assert (engine); // Attach the engine to the corresponding session object. - send_attach (session, engine); + send_attach (_session, engine); // Shut the connecter down. terminate (); - socket->event_connected (endpoint, fd); + _socket->event_connected (_endpoint, fd); } void zmq::tcp_connecter_t::rm_handle () { - rm_fd (handle); - handle = static_cast (NULL); + rm_fd (_handle); + _handle = static_cast (NULL); } void zmq::tcp_connecter_t::timer_event (int id_) { zmq_assert (id_ == reconnect_timer_id || id_ == connect_timer_id); if (id_ == connect_timer_id) { - connect_timer_started = false; + _connect_timer_started = false; rm_handle (); close (); add_reconnect_timer (); } else if (id_ == reconnect_timer_id) { - reconnect_timer_started = false; + _reconnect_timer_started = false; start_connecting (); } } @@ -194,15 +194,15 @@ void zmq::tcp_connecter_t::start_connecting () // Connect may succeed in synchronous manner. if (rc == 0) { - handle = add_fd (s); + _handle = add_fd (_s); out_event (); } // Connection establishment may be delayed. Poll for its completion. else if (rc == -1 && errno == EINPROGRESS) { - handle = add_fd (s); - set_pollout (handle); - socket->event_connect_delayed (endpoint, zmq_errno ()); + _handle = add_fd (_s); + set_pollout (_handle); + _socket->event_connect_delayed (_endpoint, zmq_errno ()); // add userspace connect timeout add_connect_timer (); @@ -210,7 +210,7 @@ void zmq::tcp_connecter_t::start_connecting () // Handle any other error condition by eventual reconnect. else { - if (s != retired_fd) + if (_s != retired_fd) close (); add_reconnect_timer (); } @@ -220,7 +220,7 @@ void zmq::tcp_connecter_t::add_connect_timer () { if (options.connect_timeout > 0) { add_timer (options.connect_timeout, connect_timer_id); - connect_timer_started = true; + _connect_timer_started = true; } } @@ -228,94 +228,94 @@ void zmq::tcp_connecter_t::add_reconnect_timer () { const int interval = get_new_reconnect_ivl (); add_timer (interval, reconnect_timer_id); - socket->event_connect_retried (endpoint, interval); - reconnect_timer_started = true; + _socket->event_connect_retried (_endpoint, interval); + _reconnect_timer_started = true; } int zmq::tcp_connecter_t::get_new_reconnect_ivl () { // The new interval is the current interval + random value. const int interval = - current_reconnect_ivl + generate_random () % options.reconnect_ivl; + _current_reconnect_ivl + generate_random () % options.reconnect_ivl; // Only change the current reconnect interval if the maximum reconnect // interval was set and if it's larger than the reconnect interval. if (options.reconnect_ivl_max > 0 && options.reconnect_ivl_max > options.reconnect_ivl) // Calculate the next interval - current_reconnect_ivl = - std::min (current_reconnect_ivl * 2, options.reconnect_ivl_max); + _current_reconnect_ivl = + std::min (_current_reconnect_ivl * 2, options.reconnect_ivl_max); return interval; } int zmq::tcp_connecter_t::open () { - zmq_assert (s == retired_fd); + zmq_assert (_s == retired_fd); // Resolve the address - if (addr->resolved.tcp_addr != NULL) { - LIBZMQ_DELETE (addr->resolved.tcp_addr); + if (_addr->resolved.tcp_addr != NULL) { + LIBZMQ_DELETE (_addr->resolved.tcp_addr); } - addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); - alloc_assert (addr->resolved.tcp_addr); - int rc = addr->resolved.tcp_addr->resolve (addr->address.c_str (), false, - options.ipv6); + _addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); + alloc_assert (_addr->resolved.tcp_addr); + int rc = _addr->resolved.tcp_addr->resolve (_addr->address.c_str (), false, + options.ipv6); if (rc != 0) { - LIBZMQ_DELETE (addr->resolved.tcp_addr); + LIBZMQ_DELETE (_addr->resolved.tcp_addr); return -1; } - zmq_assert (addr->resolved.tcp_addr != NULL); - const tcp_address_t *const tcp_addr = addr->resolved.tcp_addr; + zmq_assert (_addr->resolved.tcp_addr != NULL); + const tcp_address_t *const tcp_addr = _addr->resolved.tcp_addr; // Create the socket. - s = open_socket (tcp_addr->family (), SOCK_STREAM, IPPROTO_TCP); + _s = open_socket (tcp_addr->family (), SOCK_STREAM, IPPROTO_TCP); // IPv6 address family not supported, try automatic downgrade to IPv4. - if (s == zmq::retired_fd && tcp_addr->family () == AF_INET6 + if (_s == zmq::retired_fd && tcp_addr->family () == AF_INET6 && errno == EAFNOSUPPORT && options.ipv6) { - rc = addr->resolved.tcp_addr->resolve (addr->address.c_str (), false, - false); + rc = _addr->resolved.tcp_addr->resolve (_addr->address.c_str (), false, + false); if (rc != 0) { - LIBZMQ_DELETE (addr->resolved.tcp_addr); + LIBZMQ_DELETE (_addr->resolved.tcp_addr); return -1; } - s = open_socket (AF_INET, SOCK_STREAM, IPPROTO_TCP); + _s = open_socket (AF_INET, SOCK_STREAM, IPPROTO_TCP); } - if (s == retired_fd) { + if (_s == retired_fd) { return -1; } // On some systems, IPv4 mapping in IPv6 sockets is disabled by default. // Switch it on in such cases. if (tcp_addr->family () == AF_INET6) - enable_ipv4_mapping (s); + enable_ipv4_mapping (_s); // Set the IP Type-Of-Service priority for this socket if (options.tos != 0) - set_ip_type_of_service (s, options.tos); + set_ip_type_of_service (_s, options.tos); // Bind the socket to a device if applicable if (!options.bound_device.empty ()) - bind_to_device (s, options.bound_device); + bind_to_device (_s, options.bound_device); // Set the socket to non-blocking mode so that we get async connect(). - unblock_socket (s); + unblock_socket (_s); // Set the socket to loopback fastpath if configured. if (options.loopback_fastpath) - tcp_tune_loopback_fast_path (s); + tcp_tune_loopback_fast_path (_s); // Set the socket buffer limits for the underlying socket. if (options.sndbuf >= 0) - set_tcp_send_buffer (s, options.sndbuf); + set_tcp_send_buffer (_s, options.sndbuf); if (options.rcvbuf >= 0) - set_tcp_receive_buffer (s, options.rcvbuf); + set_tcp_receive_buffer (_s, options.rcvbuf); // Set the IP Type-Of-Service for the underlying socket if (options.tos != 0) - set_ip_type_of_service (s, options.tos); + set_ip_type_of_service (_s, options.tos); // Set a source address for conversations if (tcp_addr->has_src_addr ()) { @@ -323,23 +323,23 @@ int zmq::tcp_connecter_t::open () // using the same source port on the client. int flag = 1; #ifdef ZMQ_HAVE_WINDOWS - rc = setsockopt (s, SOL_SOCKET, SO_REUSEADDR, + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast (&flag), sizeof (int)); wsa_assert (rc != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS - rc = setsockopt (s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, sizeof (int)); errno_assert (rc == 0); #else - rc = setsockopt (s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); errno_assert (rc == 0); #endif #if defined ZMQ_HAVE_VXWORKS - rc = ::bind (s, (sockaddr *) tcp_addr->src_addr (), + rc = ::bind (_s, (sockaddr *) tcp_addr->src_addr (), tcp_addr->src_addrlen ()); #else - rc = ::bind (s, tcp_addr->src_addr (), tcp_addr->src_addrlen ()); + rc = ::bind (_s, tcp_addr->src_addr (), tcp_addr->src_addrlen ()); #endif if (rc == -1) return -1; @@ -347,9 +347,9 @@ int zmq::tcp_connecter_t::open () // Connect to the remote peer. #if defined ZMQ_HAVE_VXWORKS - rc = ::connect (s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ()); + rc = ::connect (_s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ()); #else - rc = ::connect (s, tcp_addr->addr (), tcp_addr->addrlen ()); + rc = ::connect (_s, tcp_addr->addr (), tcp_addr->addrlen ()); #endif // Connect was successful immediately. if (rc == 0) { @@ -381,7 +381,7 @@ zmq::fd_t zmq::tcp_connecter_t::connect () socklen_t len = sizeof err; #endif - const int rc = getsockopt (s, SOL_SOCKET, SO_ERROR, + const int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, reinterpret_cast (&err), &len); // Assert if the error was caused by 0MQ bug. @@ -414,8 +414,8 @@ zmq::fd_t zmq::tcp_connecter_t::connect () #endif // Return the newly connected socket. - const fd_t result = s; - s = retired_fd; + const fd_t result = _s; + _s = retired_fd; return result; } @@ -431,14 +431,14 @@ bool zmq::tcp_connecter_t::tune_socket (const fd_t fd_) void zmq::tcp_connecter_t::close () { - zmq_assert (s != retired_fd); + zmq_assert (_s != retired_fd); #ifdef ZMQ_HAVE_WINDOWS - const int rc = closesocket (s); + const int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else - const int rc = ::close (s); + const int rc = ::close (_s); errno_assert (rc == 0); #endif - socket->event_closed (endpoint, s); - s = retired_fd; + _socket->event_closed (_endpoint, _s); + _s = retired_fd; } diff --git a/src/tcp_connecter.cpp.orig b/src/tcp_connecter.cpp.orig new file mode 100644 index 00000000..5135bdbe --- /dev/null +++ b/src/tcp_connecter.cpp.orig @@ -0,0 +1,444 @@ +/* + Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file + + This file is part of libzmq, the ZeroMQ core engine in C++. + + libzmq is free software; you can redistribute it and/or modify it under + the terms of the GNU Lesser General Public License (LGPL) as published + by the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + As a special exception, the Contributors give you permission to link + this library with independent modules to produce an executable, + regardless of the license terms of these independent modules, and to + copy and distribute the resulting executable under terms of your choice, + provided that you also meet, for each linked independent module, the + terms and conditions of the license of that module. An independent + module is a module which is not derived from or based on this library. + If you modify this library, you must extend this exception to your + version of the library. + + libzmq is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this program. If not, see . +*/ + +#include "precompiled.hpp" +#include +#include + +#include "macros.hpp" +#include "tcp_connecter.hpp" +#include "stream_engine.hpp" +#include "io_thread.hpp" +#include "random.hpp" +#include "err.hpp" +#include "ip.hpp" +#include "tcp.hpp" +#include "address.hpp" +#include "tcp_address.hpp" +#include "session_base.hpp" + +#if !defined ZMQ_HAVE_WINDOWS +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef ZMQ_HAVE_VXWORKS +#include +#endif +#ifdef ZMQ_HAVE_OPENVMS +#include +#endif +#endif + +#ifdef __APPLE__ +#include +#endif + +zmq::tcp_connecter_t::tcp_connecter_t (class io_thread_t *io_thread_, + class session_base_t *session_, + const options_t &options_, + address_t *addr_, + bool delayed_start_) : + own_t (io_thread_, options_), + io_object_t (io_thread_), + _addr (addr_), + _s (retired_fd), + _handle (static_cast (NULL)), + _delayed_start (delayed_start_), + _connect_timer_started (false), + _reconnect_timer_started (false), + _session (session_), + _current_reconnect_ivl (options.reconnect_ivl), + _socket (_session->get_socket ()) +{ + zmq_assert (_addr); + zmq_assert (_addr->protocol == "tcp"); + _addr->to_string (_endpoint); + // TODO the return value is unused! what if it fails? if this is impossible + // or does not matter, change such that endpoint in initialized using an + // initializer, and make endpoint const +} + +zmq::tcp_connecter_t::~tcp_connecter_t () +{ + zmq_assert (!_connect_timer_started); + zmq_assert (!_reconnect_timer_started); + zmq_assert (!_handle); + zmq_assert (_s == retired_fd); +} + +void zmq::tcp_connecter_t::process_plug () +{ + if (_delayed_start) + add_reconnect_timer (); + else + start_connecting (); +} + +void zmq::tcp_connecter_t::process_term (int linger_) +{ + if (_connect_timer_started) { + cancel_timer (connect_timer_id); + _connect_timer_started = false; + } + + if (_reconnect_timer_started) { + cancel_timer (reconnect_timer_id); + _reconnect_timer_started = false; + } + + if (_handle) { + rm_handle (); + } + + if (_s != retired_fd) + close (); + + own_t::process_term (linger_); +} + +void zmq::tcp_connecter_t::in_event () +{ + // We are not polling for incoming data, so we are actually called + // because of error here. However, we can get error on out event as well + // on some platforms, so we'll simply handle both events in the same way. + out_event (); +} + +void zmq::tcp_connecter_t::out_event () +{ + if (_connect_timer_started) { + cancel_timer (connect_timer_id); + _connect_timer_started = false; + } + + rm_handle (); + + const fd_t fd = connect (); + + // Handle the error condition by attempt to reconnect. + if (fd == retired_fd || !tune_socket (fd)) { + close (); + add_reconnect_timer (); + return; + } + + // Create the engine object for this connection. + stream_engine_t *engine = + new (std::nothrow) stream_engine_t (fd, options, _endpoint); + alloc_assert (engine); + + // Attach the engine to the corresponding session object. + send_attach (_session, engine); + + // Shut the connecter down. + terminate (); + + _socket->event_connected (_endpoint, fd); +} + +void zmq::tcp_connecter_t::rm_handle () +{ + rm_fd (_handle); + _handle = static_cast (NULL); +} + +void zmq::tcp_connecter_t::timer_event (int id_) +{ + zmq_assert (id_ == reconnect_timer_id || id_ == connect_timer_id); + if (id_ == connect_timer_id) { + _connect_timer_started = false; + rm_handle (); + close (); + add_reconnect_timer (); + } else if (id_ == reconnect_timer_id) { + _reconnect_timer_started = false; + start_connecting (); + } +} + +void zmq::tcp_connecter_t::start_connecting () +{ + // Open the connecting socket. + const int rc = open (); + + // Connect may succeed in synchronous manner. + if (rc == 0) { + _handle = add_fd (_s); + out_event (); + } + + // Connection establishment may be delayed. Poll for its completion. + else if (rc == -1 && errno == EINPROGRESS) { + _handle = add_fd (_s); + set_pollout (_handle); + _socket->event_connect_delayed (_endpoint, zmq_errno ()); + + // add userspace connect timeout + add_connect_timer (); + } + + // Handle any other error condition by eventual reconnect. + else { + if (_s != retired_fd) + close (); + add_reconnect_timer (); + } +} + +void zmq::tcp_connecter_t::add_connect_timer () +{ + if (options.connect_timeout > 0) { + add_timer (options.connect_timeout, connect_timer_id); + _connect_timer_started = true; + } +} + +void zmq::tcp_connecter_t::add_reconnect_timer () +{ + const int interval = get_new_reconnect_ivl (); + add_timer (interval, reconnect_timer_id); + _socket->event_connect_retried (_endpoint, interval); + _reconnect_timer_started = true; +} + +int zmq::tcp_connecter_t::get_new_reconnect_ivl () +{ + // The new interval is the current interval + random value. + const int interval = + _current_reconnect_ivl + generate_random () % options.reconnect_ivl; + + // Only change the current reconnect interval if the maximum reconnect + // interval was set and if it's larger than the reconnect interval. + if (options.reconnect_ivl_max > 0 + && options.reconnect_ivl_max > options.reconnect_ivl) + // Calculate the next interval + _current_reconnect_ivl = + std::min (_current_reconnect_ivl * 2, options.reconnect_ivl_max); + return interval; +} + +int zmq::tcp_connecter_t::open () +{ + zmq_assert (_s == retired_fd); + + // Resolve the address + if (_addr->resolved.tcp_addr != NULL) { + LIBZMQ_DELETE (_addr->resolved.tcp_addr); + } + + _addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t (); + alloc_assert (_addr->resolved.tcp_addr); + int rc = _addr->resolved.tcp_addr->resolve (_addr->address.c_str (), false, + options.ipv6); + if (rc != 0) { + LIBZMQ_DELETE (_addr->resolved.tcp_addr); + return -1; + } + zmq_assert (_addr->resolved.tcp_addr != NULL); + const tcp_address_t *const tcp_addr = _addr->resolved.tcp_addr; + + // Create the socket. + _s = open_socket (tcp_addr->family (), SOCK_STREAM, IPPROTO_TCP); + + // IPv6 address family not supported, try automatic downgrade to IPv4. + if (_s == zmq::retired_fd && tcp_addr->family () == AF_INET6 + && errno == EAFNOSUPPORT && options.ipv6) { + rc = _addr->resolved.tcp_addr->resolve (_addr->address.c_str (), false, + false); + if (rc != 0) { + LIBZMQ_DELETE (_addr->resolved.tcp_addr); + return -1; + } + _s = open_socket (AF_INET, SOCK_STREAM, IPPROTO_TCP); + } + + if (_s == retired_fd) { + return -1; + } + + // On some systems, IPv4 mapping in IPv6 sockets is disabled by default. + // Switch it on in such cases. + if (tcp_addr->family () == AF_INET6) + enable_ipv4_mapping (_s); + + // Set the IP Type-Of-Service priority for this socket + if (options.tos != 0) + set_ip_type_of_service (_s, options.tos); + + // Bind the socket to a device if applicable + if (!options.bound_device.empty ()) + bind_to_device (_s, options.bound_device); + + // Set the socket to non-blocking mode so that we get async connect(). + unblock_socket (_s); + + // Set the socket to loopback fastpath if configured. + if (options.loopback_fastpath) + tcp_tune_loopback_fast_path (_s); + + // Set the socket buffer limits for the underlying socket. + if (options.sndbuf >= 0) + set_tcp_send_buffer (_s, options.sndbuf); + if (options.rcvbuf >= 0) + set_tcp_receive_buffer (_s, options.rcvbuf); + + // Set the IP Type-Of-Service for the underlying socket + if (options.tos != 0) + set_ip_type_of_service (_s, options.tos); + + // Set a source address for conversations + if (tcp_addr->has_src_addr ()) { + // Allow reusing of the address, to connect to different servers + // using the same source port on the client. + int flag = 1; +#ifdef ZMQ_HAVE_WINDOWS + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, + reinterpret_cast (&flag), sizeof (int)); + wsa_assert (rc != SOCKET_ERROR); +#elif defined ZMQ_HAVE_VXWORKS + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, + sizeof (int)); + errno_assert (rc == 0); +#else + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); + errno_assert (rc == 0); +#endif + +#if defined ZMQ_HAVE_VXWORKS + rc = ::bind (_s, (sockaddr *) tcp_addr->src_addr (), + tcp_addr->src_addrlen ()); +#else + rc = ::bind (_s, tcp_addr->src_addr (), tcp_addr->src_addrlen ()); +#endif + if (rc == -1) + return -1; + } + + // Connect to the remote peer. +#if defined ZMQ_HAVE_VXWORKS + rc = ::connect (_s, (sockaddr *) tcp_addr->addr (), tcp_addr->addrlen ()); +#else + rc = ::connect (_s, tcp_addr->addr (), tcp_addr->addrlen ()); +#endif + // Connect was successful immediately. + if (rc == 0) { + return 0; + } + + // Translate error codes indicating asynchronous connect has been + // launched to a uniform EINPROGRESS. +#ifdef ZMQ_HAVE_WINDOWS + const int last_error = WSAGetLastError (); + if (last_error == WSAEINPROGRESS || last_error == WSAEWOULDBLOCK) + errno = EINPROGRESS; + else + errno = wsa_error_to_errno (last_error); +#else + if (errno == EINTR) + errno = EINPROGRESS; +#endif + return -1; +} + +zmq::fd_t zmq::tcp_connecter_t::connect () +{ + // Async connect has finished. Check whether an error occurred + int err = 0; +#if defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_VXWORKS + int len = sizeof err; +#else + socklen_t len = sizeof err; +#endif + + const int rc = getsockopt (_s, SOL_SOCKET, SO_ERROR, + reinterpret_cast (&err), &len); + + // Assert if the error was caused by 0MQ bug. + // Networking problems are OK. No need to assert. +#ifdef ZMQ_HAVE_WINDOWS + zmq_assert (rc == 0); + if (err != 0) { + if (err == WSAEBADF || err == WSAENOPROTOOPT || err == WSAENOTSOCK + || err == WSAENOBUFS) { + wsa_assert_no (err); + } + return retired_fd; + } +#else + // Following code should handle both Berkeley-derived socket + // implementations and Solaris. + if (rc == -1) + err = errno; + if (err != 0) { + errno = err; +#if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE + errno_assert (errno != EBADF && errno != ENOPROTOOPT + && errno != ENOTSOCK && errno != ENOBUFS); +#else + errno_assert (errno != ENOPROTOOPT && errno != ENOTSOCK + && errno != ENOBUFS); +#endif + return retired_fd; + } +#endif + + // Return the newly connected socket. + const fd_t result = _s; + _s = retired_fd; + return result; +} + +bool zmq::tcp_connecter_t::tune_socket (const fd_t fd_) +{ + const int rc = tune_tcp_socket (fd_) + | tune_tcp_keepalives ( + fd_, options.tcp_keepalive, options.tcp_keepalive_cnt, + options.tcp_keepalive_idle, options.tcp_keepalive_intvl) + | tune_tcp_maxrt (fd_, options.tcp_maxrt); + return rc == 0; +} + +void zmq::tcp_connecter_t::close () +{ + zmq_assert (_s != retired_fd); +#ifdef ZMQ_HAVE_WINDOWS + const int rc = closesocket (_s); + wsa_assert (rc != SOCKET_ERROR); +#else + const int rc = ::close (_s); + errno_assert (rc == 0); +#endif + _socket->event_closed (_endpoint, _s); + _s = retired_fd; +} diff --git a/src/tcp_connecter.hpp b/src/tcp_connecter.hpp index 5b1dca6d..b8302773 100644 --- a/src/tcp_connecter.hpp +++ b/src/tcp_connecter.hpp @@ -103,33 +103,33 @@ class tcp_connecter_t : public own_t, public io_object_t bool tune_socket (fd_t fd_); // Address to connect to. Owned by session_base_t. - address_t *const addr; + address_t *const _addr; // Underlying socket. - fd_t s; + fd_t _s; // Handle corresponding to the listening socket, if file descriptor is // registered with the poller, or NULL. - handle_t handle; + handle_t _handle; // If true, connecter is waiting a while before trying to connect. - const bool delayed_start; + const bool _delayed_start; // True iff a timer has been started. - bool connect_timer_started; - bool reconnect_timer_started; + bool _connect_timer_started; + bool _reconnect_timer_started; // Reference to the session we belong to. - zmq::session_base_t *const session; + zmq::session_base_t *const _session; // Current reconnect ivl, updated for backoff strategy - int current_reconnect_ivl; + int _current_reconnect_ivl; // String representation of endpoint to connect to - std::string endpoint; + std::string _endpoint; // Socket - zmq::socket_base_t *const socket; + zmq::socket_base_t *const _socket; tcp_connecter_t (const tcp_connecter_t &); const tcp_connecter_t &operator= (const tcp_connecter_t &); diff --git a/src/tcp_listener.cpp b/src/tcp_listener.cpp index 1fdd006a..80112a6b 100644 --- a/src/tcp_listener.cpp +++ b/src/tcp_listener.cpp @@ -65,29 +65,29 @@ zmq::tcp_listener_t::tcp_listener_t (io_thread_t *io_thread_, const options_t &options_) : own_t (io_thread_, options_), io_object_t (io_thread_), - s (retired_fd), - handle (static_cast (NULL)), - socket (socket_) + _s (retired_fd), + _handle (static_cast (NULL)), + _socket (socket_) { } zmq::tcp_listener_t::~tcp_listener_t () { - zmq_assert (s == retired_fd); - zmq_assert (!handle); + zmq_assert (_s == retired_fd); + zmq_assert (!_handle); } void zmq::tcp_listener_t::process_plug () { // Start polling for incoming connections. - handle = add_fd (s); - set_pollin (handle); + _handle = add_fd (_s); + set_pollin (_handle); } void zmq::tcp_listener_t::process_term (int linger_) { - rm_fd (handle); - handle = static_cast (NULL); + rm_fd (_handle); + _handle = static_cast (NULL); close (); own_t::process_term (linger_); } @@ -99,7 +99,7 @@ void zmq::tcp_listener_t::in_event () // If connection was reset by the peer in the meantime, just ignore it. // TODO: Handle specific errors like ENFILE/EMFILE etc. if (fd == retired_fd) { - socket->event_accept_failed (endpoint, zmq_errno ()); + _socket->event_accept_failed (_endpoint, zmq_errno ()); return; } @@ -110,13 +110,13 @@ void zmq::tcp_listener_t::in_event () options.tcp_keepalive_idle, options.tcp_keepalive_intvl); rc = rc | tune_tcp_maxrt (fd, options.tcp_maxrt); if (rc != 0) { - socket->event_accept_failed (endpoint, zmq_errno ()); + _socket->event_accept_failed (_endpoint, zmq_errno ()); return; } // Create the engine object for this connection. stream_engine_t *engine = - new (std::nothrow) stream_engine_t (fd, options, endpoint); + new (std::nothrow) stream_engine_t (fd, options, _endpoint); alloc_assert (engine); // Choose I/O thread to run connecter in. Given that we are already @@ -126,26 +126,26 @@ void zmq::tcp_listener_t::in_event () // Create and launch a session object. session_base_t *session = - session_base_t::create (io_thread, false, socket, options, NULL); + session_base_t::create (io_thread, false, _socket, options, NULL); errno_assert (session); session->inc_seqnum (); launch_child (session); send_attach (session, engine, false); - socket->event_accepted (endpoint, fd); + _socket->event_accepted (_endpoint, fd); } void zmq::tcp_listener_t::close () { - zmq_assert (s != retired_fd); + zmq_assert (_s != retired_fd); #ifdef ZMQ_HAVE_WINDOWS - int rc = closesocket (s); + int rc = closesocket (_s); wsa_assert (rc != SOCKET_ERROR); #else - int rc = ::close (s); + int rc = ::close (_s); errno_assert (rc == 0); #endif - socket->event_closed (endpoint, s); - s = retired_fd; + _socket->event_closed (_endpoint, _s); + _s = retired_fd; } int zmq::tcp_listener_t::get_address (std::string &addr_) @@ -157,7 +157,7 @@ int zmq::tcp_listener_t::get_address (std::string &addr_) #else socklen_t sl = sizeof (ss); #endif - int rc = getsockname (s, reinterpret_cast (&ss), &sl); + int rc = getsockname (_s, reinterpret_cast (&ss), &sl); if (rc != 0) { addr_.clear (); @@ -171,77 +171,78 @@ int zmq::tcp_listener_t::get_address (std::string &addr_) int zmq::tcp_listener_t::set_address (const char *addr_) { // Convert the textual address into address structure. - int rc = address.resolve (addr_, true, options.ipv6); + int rc = _address.resolve (addr_, true, options.ipv6); if (rc != 0) return -1; - address.to_string (endpoint); + _address.to_string (_endpoint); if (options.use_fd != -1) { - s = options.use_fd; - socket->event_listening (endpoint, s); + _s = options.use_fd; + _socket->event_listening (_endpoint, _s); return 0; } // Create a listening socket. - s = open_socket (address.family (), SOCK_STREAM, IPPROTO_TCP); + _s = open_socket (_address.family (), SOCK_STREAM, IPPROTO_TCP); // IPv6 address family not supported, try automatic downgrade to IPv4. - if (s == zmq::retired_fd && address.family () == AF_INET6 + if (_s == zmq::retired_fd && _address.family () == AF_INET6 && errno == EAFNOSUPPORT && options.ipv6) { - rc = address.resolve (addr_, true, false); + rc = _address.resolve (addr_, true, false); if (rc != 0) return rc; - s = open_socket (AF_INET, SOCK_STREAM, IPPROTO_TCP); + _s = open_socket (AF_INET, SOCK_STREAM, IPPROTO_TCP); } - if (s == retired_fd) { + if (_s == retired_fd) { return -1; } - make_socket_noninheritable (s); + make_socket_noninheritable (_s); // On some systems, IPv4 mapping in IPv6 sockets is disabled by default. // Switch it on in such cases. - if (address.family () == AF_INET6) - enable_ipv4_mapping (s); + if (_address.family () == AF_INET6) + enable_ipv4_mapping (_s); // Set the IP Type-Of-Service for the underlying socket if (options.tos != 0) - set_ip_type_of_service (s, options.tos); + set_ip_type_of_service (_s, options.tos); // Set the socket to loopback fastpath if configured. if (options.loopback_fastpath) - tcp_tune_loopback_fast_path (s); + tcp_tune_loopback_fast_path (_s); // Bind the socket to a device if applicable if (!options.bound_device.empty ()) - bind_to_device (s, options.bound_device); + bind_to_device (_s, options.bound_device); // Set the socket buffer limits for the underlying socket. if (options.sndbuf >= 0) - set_tcp_send_buffer (s, options.sndbuf); + set_tcp_send_buffer (_s, options.sndbuf); if (options.rcvbuf >= 0) - set_tcp_receive_buffer (s, options.rcvbuf); + set_tcp_receive_buffer (_s, options.rcvbuf); // Allow reusing of the address. int flag = 1; #ifdef ZMQ_HAVE_WINDOWS - rc = setsockopt (s, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, + rc = setsockopt (_s, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, reinterpret_cast (&flag), sizeof (int)); wsa_assert (rc != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS - rc = setsockopt (s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, sizeof (int)); + rc = + setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, (char *) &flag, sizeof (int)); errno_assert (rc == 0); #else - rc = setsockopt (s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); + rc = setsockopt (_s, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof (int)); errno_assert (rc == 0); #endif // Bind the socket to the network interface and port. #if defined ZMQ_HAVE_VXWORKS - rc = bind (s, (sockaddr *) address.addr (), address.addrlen ()); + rc = bind (_s, (sockaddr *) _address.addr (), _address.addrlen ()); #else - rc = bind (s, address.addr (), address.addrlen ()); + rc = bind (_s, _address.addr (), _address.addrlen ()); #endif #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { @@ -254,7 +255,7 @@ int zmq::tcp_listener_t::set_address (const char *addr_) #endif // Listen for incoming connections. - rc = listen (s, options.backlog); + rc = listen (_s, options.backlog); #ifdef ZMQ_HAVE_WINDOWS if (rc == SOCKET_ERROR) { errno = wsa_error_to_errno (WSAGetLastError ()); @@ -265,7 +266,7 @@ int zmq::tcp_listener_t::set_address (const char *addr_) goto error; #endif - socket->event_listening (endpoint, s); + _socket->event_listening (_endpoint, _s); return 0; error: @@ -280,7 +281,7 @@ zmq::fd_t zmq::tcp_listener_t::accept () // The situation where connection cannot be accepted due to insufficient // resources is considered valid and treated by ignoring the connection. // Accept one connection and deal with different failure modes. - zmq_assert (s != retired_fd); + zmq_assert (_s != retired_fd); struct sockaddr_storage ss; memset (&ss, 0, sizeof (ss)); @@ -290,10 +291,10 @@ zmq::fd_t zmq::tcp_listener_t::accept () socklen_t ss_len = sizeof (ss); #endif #if defined ZMQ_HAVE_SOCK_CLOEXEC && defined HAVE_ACCEPT4 - fd_t sock = ::accept4 (s, (struct sockaddr *) &ss, &ss_len, SOCK_CLOEXEC); + fd_t sock = ::accept4 (_s, (struct sockaddr *) &ss, &ss_len, SOCK_CLOEXEC); #else fd_t sock = - ::accept (s, reinterpret_cast (&ss), &ss_len); + ::accept (_s, reinterpret_cast (&ss), &ss_len); #endif if (sock == retired_fd) { diff --git a/src/tcp_listener.hpp b/src/tcp_listener.hpp index d82848bf..551b6035 100644 --- a/src/tcp_listener.hpp +++ b/src/tcp_listener.hpp @@ -73,19 +73,19 @@ class tcp_listener_t : public own_t, public io_object_t fd_t accept (); // Address to listen on. - tcp_address_t address; + tcp_address_t _address; // Underlying socket. - fd_t s; + fd_t _s; // Handle corresponding to the listening socket. - handle_t handle; + handle_t _handle; // Socket the listener belongs to. - zmq::socket_base_t *socket; + zmq::socket_base_t *_socket; // String representation of endpoint to bind to - std::string endpoint; + std::string _endpoint; tcp_listener_t (const tcp_listener_t &); const tcp_listener_t &operator= (const tcp_listener_t &); diff --git a/src/thread.cpp b/src/thread.cpp index d80f6d12..d1a483b0 100644 --- a/src/thread.cpp +++ b/src/thread.cpp @@ -34,7 +34,7 @@ bool zmq::thread_t::get_started () const { - return started; + return _started; } #ifdef ZMQ_HAVE_WINDOWS @@ -47,37 +47,37 @@ static unsigned int __stdcall thread_routine (void *arg_) #endif { zmq::thread_t *self = (zmq::thread_t *) arg_; - self->tfn (self->arg); + self->_tfn (self->_arg); return 0; } } void zmq::thread_t::start (thread_fn *tfn_, void *arg_) { - tfn = tfn_; - arg = arg_; + _tfn = tfn_; + _arg = arg_; #if defined _WIN32_WCE - descriptor = + _descriptor = (HANDLE) CreateThread (NULL, 0, &::thread_routine, this, 0, NULL); #else - descriptor = + _descriptor = (HANDLE) _beginthreadex (NULL, 0, &::thread_routine, this, 0, NULL); #endif - win_assert (descriptor != NULL); - started = true; + win_assert (_descriptor != NULL); + _started = true; } bool zmq::thread_t::is_current_thread () const { - return GetCurrentThreadId () == GetThreadId (descriptor); + return GetCurrentThreadId () == GetThreadId (_descriptor); } void zmq::thread_t::stop () { - if (started) { - DWORD rc = WaitForSingleObject (descriptor, INFINITE); + if (_started) { + DWORD rc = WaitForSingleObject (_descriptor, INFINITE); win_assert (rc != WAIT_FAILED); - BOOL rc2 = CloseHandle (descriptor); + BOOL rc2 = CloseHandle (_descriptor); win_assert (rc2 != 0); } } @@ -104,50 +104,51 @@ static void *thread_routine (void *arg_) { zmq::thread_t *self = (zmq::thread_t *) arg_; self->applySchedulingParameters (); - self->tfn (self->arg); + self->_tfn (self->_arg); return NULL; } } void zmq::thread_t::start (thread_fn *tfn_, void *arg_) { - tfn = tfn_; - arg = arg_; - descriptor = taskSpawn (NULL, DEFAULT_PRIORITY, DEFAULT_OPTIONS, - DEFAULT_STACK_SIZE, (FUNCPTR) thread_routine, - (int) this, 0, 0, 0, 0, 0, 0, 0, 0, 0); - if (descriptor != NULL || descriptor > 0) - started = true; + _tfn = tfn_; + _arg = arg_; + _descriptor = taskSpawn (NULL, DEFAULT_PRIORITY, DEFAULT_OPTIONS, + DEFAULT_STACK_SIZE, (FUNCPTR) thread_routine, + (int) this, 0, 0, 0, 0, 0, 0, 0, 0, 0); + if (_descriptor != NULL || _descriptor > 0) + _started = true; } void zmq::thread_t::stop () { - if (started) - while ((descriptor != NULL || descriptor > 0) - && taskIdVerify (descriptor) == 0) { + if (_started) + while ((_descriptor != NULL || _descriptor > 0) + && taskIdVerify (_descriptor) == 0) { } } bool zmq::thread_t::is_current_thread () const { - return taskIdSelf () == descriptor; + return taskIdSelf () == _descriptor; } void zmq::thread_t::setSchedulingParameters ( int priority_, int schedulingPolicy_, const std::set &affinity_cpus_) { - thread_priority = priority_; - thread_sched_policy = schedulingPolicy_; - thread_affinity_cpus = affinity_cpus_; + _thread_priority = priority_; + _thread_sched_policy = schedulingPolicy_; + _thread_affinity_cpus = affinity_cpus_; } void zmq::thread_t:: applySchedulingParameters () // to be called in secondary thread context { - int priority = (thread_priority >= 0 ? thread_priority : DEFAULT_PRIORITY); + int priority = + (_thread_priority >= 0 ? _thread_priority : DEFAULT_PRIORITY); priority = (priority < 255 ? priority : DEFAULT_PRIORITY); - if (descriptor != NULL || descriptor > 0) { - taskPrioritySet (descriptor, priority); + if (_descriptor != NULL || _descriptor > 0) { + taskPrioritySet (_descriptor, priority); } } @@ -178,39 +179,39 @@ static void *thread_routine (void *arg_) #endif zmq::thread_t *self = (zmq::thread_t *) arg_; self->applySchedulingParameters (); - self->tfn (self->arg); + self->_tfn (self->_arg); return NULL; } } void zmq::thread_t::start (thread_fn *tfn_, void *arg_) { - tfn = tfn_; - arg = arg_; - int rc = pthread_create (&descriptor, NULL, thread_routine, this); + _tfn = tfn_; + _arg = arg_; + int rc = pthread_create (&_descriptor, NULL, thread_routine, this); posix_assert (rc); - started = true; + _started = true; } void zmq::thread_t::stop () { - if (started) { - int rc = pthread_join (descriptor, NULL); + if (_started) { + int rc = pthread_join (_descriptor, NULL); posix_assert (rc); } } bool zmq::thread_t::is_current_thread () const { - return pthread_self () == descriptor; + return pthread_self () == _descriptor; } void zmq::thread_t::setSchedulingParameters ( int priority_, int schedulingPolicy_, const std::set &affinity_cpus_) { - thread_priority = priority_; - thread_sched_policy = schedulingPolicy_; - thread_affinity_cpus = affinity_cpus_; + _thread_priority = priority_; + _thread_sched_policy = schedulingPolicy_; + _thread_affinity_cpus = affinity_cpus_; } void zmq::thread_t:: @@ -227,11 +228,11 @@ void zmq::thread_t:: return; } #endif - int rc = pthread_getschedparam (descriptor, &policy, ¶m); + int rc = pthread_getschedparam (_descriptor, &policy, ¶m); posix_assert (rc); - if (thread_sched_policy != ZMQ_THREAD_SCHED_POLICY_DFLT) { - policy = thread_sched_policy; + if (_thread_sched_policy != ZMQ_THREAD_SCHED_POLICY_DFLT) { + policy = _thread_sched_policy; } /* Quoting docs: @@ -242,13 +243,13 @@ void zmq::thread_t:: bool use_nice_instead_priority = (policy != SCHED_FIFO) && (policy != SCHED_RR); - if (thread_priority != ZMQ_THREAD_PRIORITY_DFLT) { + if (_thread_priority != ZMQ_THREAD_PRIORITY_DFLT) { if (use_nice_instead_priority) param.sched_priority = 0; // this is the only supported priority for most scheduling policies else param.sched_priority = - thread_priority; // user should provide a value between 1 and 99 + _thread_priority; // user should provide a value between 1 and 99 } #ifdef __NetBSD__ @@ -256,7 +257,7 @@ void zmq::thread_t:: param.sched_priority = -1; #endif - rc = pthread_setschedparam (descriptor, policy, ¶m); + rc = pthread_setschedparam (_descriptor, policy, ¶m); #if defined(__FreeBSD_kernel__) || defined(__FreeBSD__) // If this feature is unavailable at run-time, don't abort. @@ -268,7 +269,7 @@ void zmq::thread_t:: #if !defined ZMQ_HAVE_VXWORKS if (use_nice_instead_priority - && thread_priority != ZMQ_THREAD_PRIORITY_DFLT) { + && _thread_priority != ZMQ_THREAD_PRIORITY_DFLT) { // assume the user wants to decrease the thread's nice value // i.e., increase the chance of this thread being scheduled: try setting that to // maximum priority. @@ -281,11 +282,11 @@ void zmq::thread_t:: #endif #ifdef ZMQ_HAVE_PTHREAD_SET_AFFINITY - if (!thread_affinity_cpus.empty ()) { + if (!_thread_affinity_cpus.empty ()) { cpu_set_t cpuset; CPU_ZERO (&cpuset); - for (std::set::const_iterator it = thread_affinity_cpus.begin (); - it != thread_affinity_cpus.end (); it++) { + for (std::set::const_iterator it = _thread_affinity_cpus.begin (); + it != _thread_affinity_cpus.end (); it++) { CPU_SET ((int) (*it), &cpuset); } rc = @@ -313,15 +314,15 @@ void zmq::thread_t::setThreadName (const char *name_) if (rc) return; #elif defined(ZMQ_HAVE_PTHREAD_SETNAME_2) - int rc = pthread_setname_np (descriptor, name_); + int rc = pthread_setname_np (_descriptor, name_); if (rc) return; #elif defined(ZMQ_HAVE_PTHREAD_SETNAME_3) - int rc = pthread_setname_np (descriptor, name_, NULL); + int rc = pthread_setname_np (_descriptor, name_, NULL); if (rc) return; #elif defined(ZMQ_HAVE_PTHREAD_SET_NAME) - pthread_set_name_np (descriptor, name_); + pthread_set_name_np (_descriptor, name_); #endif } diff --git a/src/thread.hpp b/src/thread.hpp index 524f4aa3..d52c00f8 100644 --- a/src/thread.hpp +++ b/src/thread.hpp @@ -53,11 +53,11 @@ class thread_t { public: inline thread_t () : - tfn (NULL), - arg (NULL), - started (false), - thread_priority (ZMQ_THREAD_PRIORITY_DFLT), - thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT) + _tfn (NULL), + _arg (NULL), + _started (false), + _thread_priority (ZMQ_THREAD_PRIORITY_DFLT), + _thread_sched_policy (ZMQ_THREAD_SCHED_POLICY_DFLT) { } @@ -97,16 +97,16 @@ class thread_t // These are internal members. They should be private, however then // they would not be accessible from the main C routine of the thread. void applySchedulingParameters (); - thread_fn *tfn; - void *arg; + thread_fn *_tfn; + void *_arg; private: - bool started; + bool _started; #ifdef ZMQ_HAVE_WINDOWS - HANDLE descriptor; + HANDLE _descriptor; #elif defined ZMQ_HAVE_VXWORKS - int descriptor; + int _descriptor; enum { DEFAULT_PRIORITY = 100, @@ -114,13 +114,13 @@ class thread_t DEFAULT_STACK_SIZE = 4000 }; #else - pthread_t descriptor; + pthread_t _descriptor; #endif // Thread scheduling parameters. - int thread_priority; - int thread_sched_policy; - std::set thread_affinity_cpus; + int _thread_priority; + int _thread_sched_policy; + std::set _thread_affinity_cpus; thread_t (const thread_t &); const thread_t &operator= (const thread_t &); diff --git a/src/timers.cpp b/src/timers.cpp index c31143e3..e981957f 100644 --- a/src/timers.cpp +++ b/src/timers.cpp @@ -33,19 +33,19 @@ along with this program. If not, see . #include -zmq::timers_t::timers_t () : tag (0xCAFEDADA), next_timer_id (0) +zmq::timers_t::timers_t () : _tag (0xCAFEDADA), _next_timer_id (0) { } zmq::timers_t::~timers_t () { // Mark the timers as dead - tag = 0xdeadbeef; + _tag = 0xdeadbeef; } bool zmq::timers_t::check_tag () { - return tag == 0xCAFEDADA; + return _tag == 0xCAFEDADA; } int zmq::timers_t::add (size_t interval_, timers_timer_fn handler_, void *arg_) @@ -55,58 +55,58 @@ int zmq::timers_t::add (size_t interval_, timers_timer_fn handler_, void *arg_) return -1; } - uint64_t when = clock.now_ms () + interval_; - timer_t timer = {++next_timer_id, interval_, handler_, arg_}; - timers.insert (timersmap_t::value_type (when, timer)); + uint64_t when = _clock.now_ms () + interval_; + timer_t timer = {++_next_timer_id, interval_, handler_, arg_}; + _timers.insert (timersmap_t::value_type (when, timer)); return timer.timer_id; } struct zmq::timers_t::match_by_id { - match_by_id (int timer_id_) : timer_id (timer_id_) {} + match_by_id (int timer_id_) : _timer_id (timer_id_) {} bool operator() (timersmap_t::value_type const &entry_) const { - return entry_.second.timer_id == timer_id; + return entry_.second.timer_id == _timer_id; } private: - int timer_id; + int _timer_id; }; int zmq::timers_t::cancel (int timer_id_) { // check first if timer exists at all - if (timers.end () - == std::find_if (timers.begin (), timers.end (), + if (_timers.end () + == std::find_if (_timers.begin (), _timers.end (), match_by_id (timer_id_))) { errno = EINVAL; return -1; } // check if timer was already canceled - if (cancelled_timers.count (timer_id_)) { + if (_cancelled_timers.count (timer_id_)) { errno = EINVAL; return -1; } - cancelled_timers.insert (timer_id_); + _cancelled_timers.insert (timer_id_); return 0; } int zmq::timers_t::set_interval (int timer_id_, size_t interval_) { - const timersmap_t::iterator end = timers.end (); + const timersmap_t::iterator end = _timers.end (); const timersmap_t::iterator it = - std::find_if (timers.begin (), end, match_by_id (timer_id_)); + std::find_if (_timers.begin (), end, match_by_id (timer_id_)); if (it != end) { timer_t timer = it->second; timer.interval = interval_; - uint64_t when = clock.now_ms () + interval_; - timers.erase (it); - timers.insert (timersmap_t::value_type (when, timer)); + uint64_t when = _clock.now_ms () + interval_; + _timers.erase (it); + _timers.insert (timersmap_t::value_type (when, timer)); return 0; } @@ -117,14 +117,14 @@ int zmq::timers_t::set_interval (int timer_id_, size_t interval_) int zmq::timers_t::reset (int timer_id_) { - const timersmap_t::iterator end = timers.end (); + const timersmap_t::iterator end = _timers.end (); const timersmap_t::iterator it = - std::find_if (timers.begin (), end, match_by_id (timer_id_)); + std::find_if (_timers.begin (), end, match_by_id (timer_id_)); if (it != end) { timer_t timer = it->second; - uint64_t when = clock.now_ms () + timer.interval; - timers.erase (it); - timers.insert (timersmap_t::value_type (when, timer)); + uint64_t when = _clock.now_ms () + timer.interval; + _timers.erase (it); + _timers.insert (timersmap_t::value_type (when, timer)); return 0; } @@ -135,16 +135,16 @@ int zmq::timers_t::reset (int timer_id_) long zmq::timers_t::timeout () { - timersmap_t::iterator it = timers.begin (); + timersmap_t::iterator it = _timers.begin (); - uint64_t now = clock.now_ms (); + uint64_t now = _clock.now_ms (); - while (it != timers.end ()) { + while (it != _timers.end ()) { cancelled_timers_t::iterator cancelled_it = - cancelled_timers.find (it->second.timer_id); + _cancelled_timers.find (it->second.timer_id); // Live timer, lets return the timeout - if (cancelled_it == cancelled_timers.end ()) { + if (cancelled_it == _cancelled_timers.end ()) { if (it->first > now) return static_cast (it->first - now); @@ -154,8 +154,8 @@ long zmq::timers_t::timeout () // Let's remove it from the beginning of the list timersmap_t::iterator old = it; ++it; - timers.erase (old); - cancelled_timers.erase (cancelled_it); + _timers.erase (old); + _cancelled_timers.erase (cancelled_it); } // Wait forever as no timers are alive @@ -164,20 +164,20 @@ long zmq::timers_t::timeout () int zmq::timers_t::execute () { - timersmap_t::iterator it = timers.begin (); + timersmap_t::iterator it = _timers.begin (); - uint64_t now = clock.now_ms (); + uint64_t now = _clock.now_ms (); - while (it != timers.end ()) { + while (it != _timers.end ()) { cancelled_timers_t::iterator cancelled_it = - cancelled_timers.find (it->second.timer_id); + _cancelled_timers.find (it->second.timer_id); // Dead timer, lets remove it and continue - if (cancelled_it != cancelled_timers.end ()) { + if (cancelled_it != _cancelled_timers.end ()) { timersmap_t::iterator old = it; ++it; - timers.erase (old); - cancelled_timers.erase (cancelled_it); + _timers.erase (old); + _cancelled_timers.erase (cancelled_it); continue; } @@ -191,8 +191,8 @@ int zmq::timers_t::execute () timersmap_t::iterator old = it; ++it; - timers.erase (old); - timers.insert (timersmap_t::value_type (now + timer.interval, timer)); + _timers.erase (old); + _timers.insert (timersmap_t::value_type (now + timer.interval, timer)); } return 0; diff --git a/src/timers.hpp b/src/timers.hpp index ca28c1ad..5aabe41c 100644 --- a/src/timers.hpp +++ b/src/timers.hpp @@ -78,12 +78,12 @@ class timers_t private: // Used to check whether the object is a timers class. - uint32_t tag; + uint32_t _tag; - int next_timer_id; + int _next_timer_id; // Clock instance. - clock_t clock; + clock_t _clock; typedef struct timer_t { @@ -94,10 +94,10 @@ class timers_t } timer_t; typedef std::multimap timersmap_t; - timersmap_t timers; + timersmap_t _timers; typedef std::set cancelled_timers_t; - cancelled_timers_t cancelled_timers; + cancelled_timers_t _cancelled_timers; timers_t (const timers_t &); const timers_t &operator= (const timers_t &); diff --git a/src/trie.cpp b/src/trie.cpp index 0e52dc5e..c5ca4e99 100644 --- a/src/trie.cpp +++ b/src/trie.cpp @@ -37,20 +37,20 @@ #include #include -zmq::trie_t::trie_t () : refcnt (0), min (0), count (0), live_nodes (0) +zmq::trie_t::trie_t () : _refcnt (0), _min (0), _count (0), _live_nodes (0) { } zmq::trie_t::~trie_t () { - if (count == 1) { - zmq_assert (next.node); - LIBZMQ_DELETE (next.node); - } else if (count > 1) { - for (unsigned short i = 0; i != count; ++i) { - LIBZMQ_DELETE (next.table[i]); + if (_count == 1) { + zmq_assert (_next.node); + LIBZMQ_DELETE (_next.node); + } else if (_count > 1) { + for (unsigned short i = 0; i != _count; ++i) { + LIBZMQ_DELETE (_next.table[i]); } - free (next.table); + free (_next.table); } } @@ -58,86 +58,86 @@ bool zmq::trie_t::add (unsigned char *prefix_, size_t size_) { // We are at the node corresponding to the prefix. We are done. if (!size_) { - ++refcnt; - return refcnt == 1; + ++_refcnt; + return _refcnt == 1; } unsigned char c = *prefix_; - if (c < min || c >= min + count) { + if (c < _min || c >= _min + _count) { // The character is out of range of currently handled // characters. We have to extend the table. - if (!count) { - min = c; - count = 1; - next.node = NULL; - } else if (count == 1) { - unsigned char oldc = min; - trie_t *oldp = next.node; - count = (min < c ? c - min : min - c) + 1; - next.table = - static_cast (malloc (sizeof (trie_t *) * count)); - alloc_assert (next.table); - for (unsigned short i = 0; i != count; ++i) - next.table[i] = 0; - min = std::min (min, c); - next.table[oldc - min] = oldp; - } else if (min < c) { + if (!_count) { + _min = c; + _count = 1; + _next.node = NULL; + } else if (_count == 1) { + unsigned char oldc = _min; + trie_t *oldp = _next.node; + _count = (_min < c ? c - _min : _min - c) + 1; + _next.table = + static_cast (malloc (sizeof (trie_t *) * _count)); + alloc_assert (_next.table); + for (unsigned short i = 0; i != _count; ++i) + _next.table[i] = 0; + _min = std::min (_min, c); + _next.table[oldc - _min] = oldp; + } else if (_min < c) { // The new character is above the current character range. - unsigned short old_count = count; - count = c - min + 1; - next.table = static_cast ( - realloc ((void *) next.table, sizeof (trie_t *) * count)); - zmq_assert (next.table); - for (unsigned short i = old_count; i != count; i++) - next.table[i] = NULL; + unsigned short old_count = _count; + _count = c - _min + 1; + _next.table = static_cast ( + realloc ((void *) _next.table, sizeof (trie_t *) * _count)); + zmq_assert (_next.table); + for (unsigned short i = old_count; i != _count; i++) + _next.table[i] = NULL; } else { // The new character is below the current character range. - unsigned short old_count = count; - count = (min + old_count) - c; - next.table = static_cast ( - realloc ((void *) next.table, sizeof (trie_t *) * count)); - zmq_assert (next.table); - memmove (next.table + min - c, next.table, + unsigned short old_count = _count; + _count = (_min + old_count) - c; + _next.table = static_cast ( + realloc ((void *) _next.table, sizeof (trie_t *) * _count)); + zmq_assert (_next.table); + memmove (_next.table + _min - c, _next.table, old_count * sizeof (trie_t *)); - for (unsigned short i = 0; i != min - c; i++) - next.table[i] = NULL; - min = c; + for (unsigned short i = 0; i != _min - c; i++) + _next.table[i] = NULL; + _min = c; } } // If next node does not exist, create one. - if (count == 1) { - if (!next.node) { - next.node = new (std::nothrow) trie_t; - alloc_assert (next.node); - ++live_nodes; - zmq_assert (live_nodes == 1); + if (_count == 1) { + if (!_next.node) { + _next.node = new (std::nothrow) trie_t; + alloc_assert (_next.node); + ++_live_nodes; + zmq_assert (_live_nodes == 1); } - return next.node->add (prefix_ + 1, size_ - 1); + return _next.node->add (prefix_ + 1, size_ - 1); } - if (!next.table[c - min]) { - next.table[c - min] = new (std::nothrow) trie_t; - alloc_assert (next.table[c - min]); - ++live_nodes; - zmq_assert (live_nodes > 1); + if (!_next.table[c - _min]) { + _next.table[c - _min] = new (std::nothrow) trie_t; + alloc_assert (_next.table[c - _min]); + ++_live_nodes; + zmq_assert (_live_nodes > 1); } - return next.table[c - min]->add (prefix_ + 1, size_ - 1); + return _next.table[c - _min]->add (prefix_ + 1, size_ - 1); } bool zmq::trie_t::rm (unsigned char *prefix_, size_t size_) { // TODO: Shouldn't an error be reported if the key does not exist? if (!size_) { - if (!refcnt) + if (!_refcnt) return false; - refcnt--; - return refcnt == 0; + _refcnt--; + return _refcnt == 0; } unsigned char c = *prefix_; - if (!count || c < min || c >= min + count) + if (!_count || c < _min || c >= _min + _count) return false; - trie_t *next_node = count == 1 ? next.node : next.table[c - min]; + trie_t *next_node = _count == 1 ? _next.node : _next.table[c - _min]; if (!next_node) return false; @@ -147,88 +147,88 @@ bool zmq::trie_t::rm (unsigned char *prefix_, size_t size_) // Prune redundant nodes if (next_node->is_redundant ()) { LIBZMQ_DELETE (next_node); - zmq_assert (count > 0); + zmq_assert (_count > 0); - if (count == 1) { + if (_count == 1) { // The just pruned node is was the only live node - next.node = 0; - count = 0; - --live_nodes; - zmq_assert (live_nodes == 0); + _next.node = 0; + _count = 0; + --_live_nodes; + zmq_assert (_live_nodes == 0); } else { - next.table[c - min] = 0; - zmq_assert (live_nodes > 1); - --live_nodes; + _next.table[c - _min] = 0; + zmq_assert (_live_nodes > 1); + --_live_nodes; // Compact the table if possible - if (live_nodes == 1) { + if (_live_nodes == 1) { // We can switch to using the more compact single-node // representation since the table only contains one live node trie_t *node = 0; // Since we always compact the table the pruned node must // either be the left-most or right-most ptr in the node // table - if (c == min) { + if (c == _min) { // The pruned node is the left-most node ptr in the // node table => keep the right-most node - node = next.table[count - 1]; - min += count - 1; - } else if (c == min + count - 1) { + node = _next.table[_count - 1]; + _min += _count - 1; + } else if (c == _min + _count - 1) { // The pruned node is the right-most node ptr in the // node table => keep the left-most node - node = next.table[0]; + node = _next.table[0]; } zmq_assert (node); - free (next.table); - next.node = node; - count = 1; - } else if (c == min) { + free (_next.table); + _next.node = node; + _count = 1; + } else if (c == _min) { // We can compact the table "from the left". // Find the left-most non-null node ptr, which we'll use as // our new min - unsigned char new_min = min; - for (unsigned short i = 1; i < count; ++i) { - if (next.table[i]) { - new_min = i + min; + unsigned char new_min = _min; + for (unsigned short i = 1; i < _count; ++i) { + if (_next.table[i]) { + new_min = i + _min; break; } } - zmq_assert (new_min != min); + zmq_assert (new_min != _min); - trie_t **old_table = next.table; - zmq_assert (new_min > min); - zmq_assert (count > new_min - min); + trie_t **old_table = _next.table; + zmq_assert (new_min > _min); + zmq_assert (_count > new_min - _min); - count = count - (new_min - min); - next.table = - static_cast (malloc (sizeof (trie_t *) * count)); - alloc_assert (next.table); + _count = _count - (new_min - _min); + _next.table = + static_cast (malloc (sizeof (trie_t *) * _count)); + alloc_assert (_next.table); - memmove (next.table, old_table + (new_min - min), - sizeof (trie_t *) * count); + memmove (_next.table, old_table + (new_min - _min), + sizeof (trie_t *) * _count); free (old_table); - min = new_min; - } else if (c == min + count - 1) { + _min = new_min; + } else if (c == _min + _count - 1) { // We can compact the table "from the right". // Find the right-most non-null node ptr, which we'll use to // determine the new table size - unsigned short new_count = count; - for (unsigned short i = 1; i < count; ++i) { - if (next.table[count - 1 - i]) { - new_count = count - i; + unsigned short new_count = _count; + for (unsigned short i = 1; i < _count; ++i) { + if (_next.table[_count - 1 - i]) { + new_count = _count - i; break; } } - zmq_assert (new_count != count); - count = new_count; + zmq_assert (new_count != _count); + _count = new_count; - trie_t **old_table = next.table; - next.table = - static_cast (malloc (sizeof (trie_t *) * count)); - alloc_assert (next.table); + trie_t **old_table = _next.table; + _next.table = + static_cast (malloc (sizeof (trie_t *) * _count)); + alloc_assert (_next.table); - memmove (next.table, old_table, sizeof (trie_t *) * count); + memmove (_next.table, old_table, sizeof (trie_t *) * _count); free (old_table); } } @@ -243,7 +243,7 @@ bool zmq::trie_t::check (unsigned char *data_, size_t size_) trie_t *current = this; while (true) { // We've found a corresponding subscription! - if (current->refcnt) + if (current->_refcnt) return true; // We've checked all the data and haven't found matching subscription. @@ -253,14 +253,14 @@ bool zmq::trie_t::check (unsigned char *data_, size_t size_) // If there's no corresponding slot for the first character // of the prefix, the message does not match. unsigned char c = *data_; - if (c < current->min || c >= current->min + current->count) + if (c < current->_min || c >= current->_min + current->_count) return false; // Move to the next character. - if (current->count == 1) - current = current->next.node; + if (current->_count == 1) + current = current->_next.node; else { - current = current->next.table[c - current->min]; + current = current->_next.table[c - current->_min]; if (!current) return false; } @@ -286,7 +286,7 @@ void zmq::trie_t::apply_helper (unsigned char **buff_, void *arg_) { // If this node is a subscription, apply the function. - if (refcnt) + if (_refcnt) func_ (*buff_, buffsize_, arg_); // Adjust the buffer. @@ -297,27 +297,27 @@ void zmq::trie_t::apply_helper (unsigned char **buff_, } // If there are no subnodes in the trie, return. - if (count == 0) + if (_count == 0) return; // If there's one subnode (optimisation). - if (count == 1) { - (*buff_)[buffsize_] = min; + if (_count == 1) { + (*buff_)[buffsize_] = _min; buffsize_++; - next.node->apply_helper (buff_, buffsize_, maxbuffsize_, func_, arg_); + _next.node->apply_helper (buff_, buffsize_, maxbuffsize_, func_, arg_); return; } // If there are multiple subnodes. - for (unsigned short c = 0; c != count; c++) { - (*buff_)[buffsize_] = min + c; - if (next.table[c]) - next.table[c]->apply_helper (buff_, buffsize_ + 1, maxbuffsize_, - func_, arg_); + for (unsigned short c = 0; c != _count; c++) { + (*buff_)[buffsize_] = _min + c; + if (_next.table[c]) + _next.table[c]->apply_helper (buff_, buffsize_ + 1, maxbuffsize_, + func_, arg_); } } bool zmq::trie_t::is_redundant () const { - return refcnt == 0 && live_nodes == 0; + return _refcnt == 0 && _live_nodes == 0; } diff --git a/src/trie.hpp b/src/trie.hpp index 9f7073f6..27376748 100644 --- a/src/trie.hpp +++ b/src/trie.hpp @@ -67,15 +67,15 @@ class trie_t void *arg_); bool is_redundant () const; - uint32_t refcnt; - unsigned char min; - unsigned short count; - unsigned short live_nodes; + uint32_t _refcnt; + unsigned char _min; + unsigned short _count; + unsigned short _live_nodes; union { class trie_t *node; class trie_t **table; - } next; + } _next; trie_t (const trie_t &); const trie_t &operator= (const trie_t &); diff --git a/src/udp_address.cpp b/src/udp_address.cpp index bdcbbf44..d0dda564 100644 --- a/src/udp_address.cpp +++ b/src/udp_address.cpp @@ -45,10 +45,12 @@ #include #endif -zmq::udp_address_t::udp_address_t () : bind_interface (-1), is_multicast (false) +zmq::udp_address_t::udp_address_t () : + _bind_interface (-1), + _is_multicast (false) { - bind_address = ip_addr_t::any (AF_INET); - target_address = ip_addr_t::any (AF_INET); + _bind_address = ip_addr_t::any (AF_INET); + _target_address = ip_addr_t::any (AF_INET); } zmq::udp_address_t::~udp_address_t () @@ -60,7 +62,7 @@ int zmq::udp_address_t::resolve (const char *name_, bool bind_, bool ipv6_) // No IPv6 support yet bool has_interface = false; - address = name_; + _address = name_; // If we have a semicolon then we should have an interface specifier in the // URL @@ -82,13 +84,13 @@ int zmq::udp_address_t::resolve (const char *name_, bool bind_, bool ipv6_) ip_resolver_t src_resolver (src_resolver_opts); - const int rc = src_resolver.resolve (&bind_address, src_name.c_str ()); + const int rc = src_resolver.resolve (&_bind_address, src_name.c_str ()); if (rc != 0) { return -1; } - if (bind_address.is_multicast ()) { + if (_bind_address.is_multicast ()) { // It doesn't make sense to have a multicast address as a source errno = EINVAL; return -1; @@ -100,12 +102,12 @@ int zmq::udp_address_t::resolve (const char *name_, bool bind_, bool ipv6_) // resolve an interface index from an address, so we only support it // when an actual interface name is provided. if (src_name == "*") { - bind_interface = 0; + _bind_interface = 0; } else { - bind_interface = if_nametoindex (src_name.c_str ()); - if (bind_interface == 0) { + _bind_interface = if_nametoindex (src_name.c_str ()); + if (_bind_interface == 0) { // Error, probably not an interface name. - bind_interface = -1; + _bind_interface = -1; } } @@ -123,49 +125,49 @@ int zmq::udp_address_t::resolve (const char *name_, bool bind_, bool ipv6_) ip_resolver_t resolver (resolver_opts); - int rc = resolver.resolve (&target_address, name_); + int rc = resolver.resolve (&_target_address, name_); if (rc != 0) { return -1; } - is_multicast = target_address.is_multicast (); - uint16_t port = target_address.port (); + _is_multicast = _target_address.is_multicast (); + uint16_t port = _target_address.port (); if (has_interface) { // If we have an interface specifier then the target address must be a // multicast address - if (!is_multicast) { + if (!_is_multicast) { errno = EINVAL; return -1; } - bind_address.set_port (port); + _bind_address.set_port (port); } else { // If we don't have an explicit interface specifier then the URL is // ambiguous: if the target address is multicast then it's the // destination address and the bind address is ANY, if it's unicast // then it's the bind address when 'bind_' is true and the destination // otherwise - if (is_multicast || !bind_) { - bind_address = ip_addr_t::any (target_address.family ()); - bind_address.set_port (port); - bind_interface = 0; + if (_is_multicast || !bind_) { + _bind_address = ip_addr_t::any (_target_address.family ()); + _bind_address.set_port (port); + _bind_interface = 0; } else { // If we were asked for a bind socket and the address // provided was not multicast then it was really meant as // a bind address and the target_address is useless. - bind_address = target_address; + _bind_address = _target_address; } } - if (bind_address.family () != target_address.family ()) { + if (_bind_address.family () != _target_address.family ()) { errno = EINVAL; return -1; } // For IPv6 multicast we *must* have an interface index since we can't // bind by address. - if (ipv6_ && is_multicast && bind_interface < 0) { + if (ipv6_ && _is_multicast && _bind_interface < 0) { errno = ENODEV; return -1; } @@ -175,32 +177,32 @@ int zmq::udp_address_t::resolve (const char *name_, bool bind_, bool ipv6_) int zmq::udp_address_t::family () const { - return bind_address.family (); + return _bind_address.family (); } bool zmq::udp_address_t::is_mcast () const { - return is_multicast; + return _is_multicast; } const zmq::ip_addr_t *zmq::udp_address_t::bind_addr () const { - return &bind_address; + return &_bind_address; } int zmq::udp_address_t::bind_if () const { - return bind_interface; + return _bind_interface; } const zmq::ip_addr_t *zmq::udp_address_t::target_addr () const { - return &target_address; + return &_target_address; } int zmq::udp_address_t::to_string (std::string &addr_) { // XXX what do (factor TCP code?) - addr_ = address; + addr_ = _address; return 0; } diff --git a/src/udp_address.hpp b/src/udp_address.hpp index 6c412e16..c32f7064 100644 --- a/src/udp_address.hpp +++ b/src/udp_address.hpp @@ -62,11 +62,11 @@ class udp_address_t const ip_addr_t *target_addr () const; private: - ip_addr_t bind_address; - int bind_interface; - ip_addr_t target_address; - bool is_multicast; - std::string address; + ip_addr_t _bind_address; + int _bind_interface; + ip_addr_t _target_address; + bool _is_multicast; + std::string _address; }; } diff --git a/src/udp_engine.cpp b/src/udp_engine.cpp index 4d6051ae..a9691930 100644 --- a/src/udp_engine.cpp +++ b/src/udp_engine.cpp @@ -56,30 +56,30 @@ along with this program. If not, see . #endif zmq::udp_engine_t::udp_engine_t (const options_t &options_) : - plugged (false), - fd (-1), - session (NULL), - handle (static_cast (NULL)), - address (NULL), - options (options_), - send_enabled (false), - recv_enabled (false) + _plugged (false), + _fd (-1), + _session (NULL), + _handle (static_cast (NULL)), + _address (NULL), + _options (options_), + _send_enabled (false), + _recv_enabled (false) { } zmq::udp_engine_t::~udp_engine_t () { - zmq_assert (!plugged); + zmq_assert (!_plugged); - if (fd != retired_fd) { + if (_fd != retired_fd) { #ifdef ZMQ_HAVE_WINDOWS - int rc = closesocket (fd); + int rc = closesocket (_fd); wsa_assert (rc != SOCKET_ERROR); #else - int rc = close (fd); + int rc = close (_fd); errno_assert (rc == 0); #endif - fd = retired_fd; + _fd = retired_fd; } } @@ -87,44 +87,44 @@ int zmq::udp_engine_t::init (address_t *address_, bool send_, bool recv_) { zmq_assert (address_); zmq_assert (send_ || recv_); - send_enabled = send_; - recv_enabled = recv_; - address = address_; + _send_enabled = send_; + _recv_enabled = recv_; + _address = address_; - fd = open_socket (address->resolved.udp_addr->family (), SOCK_DGRAM, - IPPROTO_UDP); - if (fd == retired_fd) + _fd = open_socket (_address->resolved.udp_addr->family (), SOCK_DGRAM, + IPPROTO_UDP); + if (_fd == retired_fd) return -1; - unblock_socket (fd); + unblock_socket (_fd); return 0; } void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) { - zmq_assert (!plugged); - plugged = true; + zmq_assert (!_plugged); + _plugged = true; - zmq_assert (!session); + zmq_assert (!_session); zmq_assert (session_); - session = session_; + _session = session_; // Connect to I/O threads poller object. io_object_t::plug (io_thread_); - handle = add_fd (fd); + _handle = add_fd (_fd); - const udp_address_t *const udp_addr = address->resolved.udp_addr; + const udp_address_t *const udp_addr = _address->resolved.udp_addr; // Bind the socket to a device if applicable - if (!options.bound_device.empty ()) - bind_to_device (fd, options.bound_device); + if (!_options.bound_device.empty ()) + bind_to_device (_fd, _options.bound_device); - if (send_enabled) { - if (!options.raw_socket) { + if (_send_enabled) { + if (!_options.raw_socket) { const ip_addr_t *out = udp_addr->target_addr (); - out_address = out->as_sockaddr (); - out_addrlen = out->sockaddr_len (); + _out_address = out->as_sockaddr (); + _out_address_len = out->sockaddr_len (); if (out->is_multicast ()) { int level; @@ -138,9 +138,9 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) optname = IP_MULTICAST_LOOP; } - int loop = options.multicast_loop; + int loop = _options.multicast_loop; int rc = - setsockopt (fd, level, optname, + setsockopt (_fd, level, optname, reinterpret_cast (&loop), sizeof (loop)); #ifdef ZMQ_HAVE_WINDOWS @@ -155,7 +155,7 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) if (bind_if > 0) { // If a bind interface is provided we tell the // kernel to use it to send multicast packets - rc = setsockopt (fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, + rc = setsockopt (_fd, IPPROTO_IPV6, IPV6_MULTICAST_IF, reinterpret_cast (&bind_if), sizeof (bind_if)); } else { @@ -166,7 +166,7 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) udp_addr->bind_addr ()->ipv4.sin_addr; if (bind_addr.s_addr != INADDR_ANY) { - rc = setsockopt (fd, IPPROTO_IP, IP_MULTICAST_IF, + rc = setsockopt (_fd, IPPROTO_IP, IP_MULTICAST_IF, reinterpret_cast (&bind_addr), sizeof (bind_addr)); } else { @@ -182,16 +182,16 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) } } else { /// XXX fixme ? - out_address = reinterpret_cast (&raw_address); - out_addrlen = sizeof (sockaddr_in); + _out_address = reinterpret_cast (&_raw_address); + _out_address_len = sizeof (sockaddr_in); } - set_pollout (handle); + set_pollout (_handle); } - if (recv_enabled) { + if (_recv_enabled) { int on = 1; - int rc = setsockopt (fd, SOL_SOCKET, SO_REUSEADDR, + int rc = setsockopt (_fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast (&on), sizeof (on)); #ifdef ZMQ_HAVE_WINDOWS wsa_assert (rc != SOCKET_ERROR); @@ -216,10 +216,10 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) } #ifdef ZMQ_HAVE_VXWORKS - rc = bind (fd, (sockaddr *) real_bind_addr->as_sockaddr (), + rc = bind (_fd, (sockaddr *) real_bind_addr->as_sockaddr (), real_bind_addr->sockaddr_len ()); #else - rc = bind (fd, real_bind_addr->as_sockaddr (), + rc = bind (_fd, real_bind_addr->as_sockaddr (), real_bind_addr->sockaddr_len ()); #endif @@ -238,13 +238,13 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) mreq.imr_interface = bind_addr->ipv4.sin_addr; rc = - setsockopt (fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, + setsockopt (_fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, reinterpret_cast (&mreq), sizeof (mreq)); errno_assert (rc == 0); } else if (mcast_addr->family () == AF_INET6) { struct ipv6_mreq mreq; - int iface = address->resolved.udp_addr->bind_if (); + int iface = _address->resolved.udp_addr->bind_if (); zmq_assert (iface >= -1); @@ -252,7 +252,7 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) mreq.ipv6mr_interface = iface; rc = - setsockopt (fd, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, + setsockopt (_fd, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, reinterpret_cast (&mreq), sizeof (mreq)); errno_assert (rc == 0); @@ -267,7 +267,7 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) errno_assert (rc == 0); #endif } - set_pollin (handle); + set_pollin (_handle); // Call restart output to drop all join/leave commands restart_output (); @@ -276,10 +276,10 @@ void zmq::udp_engine_t::plug (io_thread_t *io_thread_, session_base_t *session_) void zmq::udp_engine_t::terminate () { - zmq_assert (plugged); - plugged = false; + zmq_assert (_plugged); + _plugged = false; - rm_fd (handle); + rm_fd (_handle); // Disconnect from I/O threads poller object. io_object_t::unplug (); @@ -309,7 +309,7 @@ void zmq::udp_engine_t::sockaddr_to_msg (zmq::msg_t *msg_, sockaddr_in *addr_) int zmq::udp_engine_t::resolve_raw_address (char *name_, size_t length_) { - memset (&raw_address, 0, sizeof raw_address); + memset (&_raw_address, 0, sizeof _raw_address); const char *delimiter = NULL; @@ -340,11 +340,11 @@ int zmq::udp_engine_t::resolve_raw_address (char *name_, size_t length_) return -1; } - raw_address.sin_family = AF_INET; - raw_address.sin_port = htons (port); - raw_address.sin_addr.s_addr = inet_addr (addr_str.c_str ()); + _raw_address.sin_family = AF_INET; + _raw_address.sin_port = htons (port); + _raw_address.sin_addr.s_addr = inet_addr (addr_str.c_str ()); - if (raw_address.sin_addr.s_addr == INADDR_NONE) { + if (_raw_address.sin_addr.s_addr == INADDR_NONE) { errno = EINVAL; return -1; } @@ -355,18 +355,18 @@ int zmq::udp_engine_t::resolve_raw_address (char *name_, size_t length_) void zmq::udp_engine_t::out_event () { msg_t group_msg; - int rc = session->pull_msg (&group_msg); + int rc = _session->pull_msg (&group_msg); errno_assert (rc == 0 || (rc == -1 && errno == EAGAIN)); if (rc == 0) { msg_t body_msg; - rc = session->pull_msg (&body_msg); + rc = _session->pull_msg (&body_msg); size_t group_size = group_msg.size (); size_t body_size = body_msg.size (); size_t size; - if (options.raw_socket) { + if (_options.raw_socket) { rc = resolve_raw_address (static_cast (group_msg.data ()), group_size); @@ -383,14 +383,14 @@ void zmq::udp_engine_t::out_event () size = body_size; - memcpy (out_buffer, body_msg.data (), body_size); + memcpy (_out_buffer, body_msg.data (), body_size); } else { size = group_size + body_size + 1; // TODO: check if larger than maximum size - out_buffer[0] = static_cast (group_size); - memcpy (out_buffer + 1, group_msg.data (), group_size); - memcpy (out_buffer + 1 + group_size, body_msg.data (), body_size); + _out_buffer[0] = static_cast (group_size); + memcpy (_out_buffer + 1, group_msg.data (), group_size); + memcpy (_out_buffer + 1 + group_size, body_msg.data (), body_size); } rc = group_msg.close (); @@ -400,20 +400,20 @@ void zmq::udp_engine_t::out_event () errno_assert (rc == 0); #ifdef ZMQ_HAVE_WINDOWS - rc = sendto (fd, reinterpret_cast (out_buffer), - static_cast (size), 0, out_address, - static_cast (out_addrlen)); + rc = sendto (_fd, reinterpret_cast (_out_buffer), + static_cast (size), 0, _out_address, + static_cast (_out_address_len)); wsa_assert (rc != SOCKET_ERROR); #elif defined ZMQ_HAVE_VXWORKS - rc = sendto (fd, (caddr_t) out_buffer, size, 0, - (sockaddr *) out_address, (int) out_addrlen); + rc = sendto (_fd, (caddr_t) _out_buffer, size, 0, + (sockaddr *) _out_address, (int) _out_address_len); errno_assert (rc != -1); #else - rc = sendto (fd, out_buffer, size, 0, out_address, out_addrlen); + rc = sendto (_fd, _out_buffer, size, 0, _out_address, _out_address_len); errno_assert (rc != -1); #endif } else - reset_pollout (handle); + reset_pollout (_handle); } const char *zmq::udp_engine_t::get_endpoint () const @@ -424,12 +424,12 @@ const char *zmq::udp_engine_t::get_endpoint () const void zmq::udp_engine_t::restart_output () { // If we don't support send we just drop all messages - if (!send_enabled) { + if (!_send_enabled) { msg_t msg; - while (session->pull_msg (&msg) == 0) + while (_session->pull_msg (&msg) == 0) msg.close (); } else { - set_pollout (handle); + set_pollout (_handle); out_event (); } } @@ -440,7 +440,7 @@ void zmq::udp_engine_t::in_event () socklen_t in_addrlen = sizeof (sockaddr_storage); #ifdef ZMQ_HAVE_WINDOWS int nbytes = - recvfrom (fd, reinterpret_cast (in_buffer), MAX_UDP_MSG, 0, + recvfrom (_fd, reinterpret_cast (_in_buffer), MAX_UDP_MSG, 0, reinterpret_cast (&in_address), &in_addrlen); const int last_error = WSAGetLastError (); if (nbytes == SOCKET_ERROR) { @@ -449,7 +449,7 @@ void zmq::udp_engine_t::in_event () return; } #elif defined ZMQ_HAVE_VXWORKS - int nbytes = recvfrom (fd, (char *) in_buffer, MAX_UDP_MSG, 0, + int nbytes = recvfrom (_fd, (char *) _in_buffer, MAX_UDP_MSG, 0, (sockaddr *) &in_address, (int *) &in_addrlen); if (nbytes == -1) { errno_assert (errno != EBADF && errno != EFAULT && errno != ENOMEM @@ -457,7 +457,7 @@ void zmq::udp_engine_t::in_event () return; } #else - int nbytes = recvfrom (fd, in_buffer, MAX_UDP_MSG, 0, + int nbytes = recvfrom (_fd, _in_buffer, MAX_UDP_MSG, 0, (sockaddr *) &in_address, &in_addrlen); if (nbytes == -1) { #if !defined(TARGET_OS_IPHONE) || !TARGET_OS_IPHONE @@ -475,7 +475,7 @@ void zmq::udp_engine_t::in_event () int body_offset; msg_t msg; - if (options.raw_socket) { + if (_options.raw_socket) { zmq_assert (in_address.ss_family == AF_INET); sockaddr_to_msg (&msg, reinterpret_cast (&in_address)); @@ -483,8 +483,8 @@ void zmq::udp_engine_t::in_event () body_offset = 0; } else { const char *group_buffer = - reinterpret_cast (in_buffer) + 1; - int group_size = in_buffer[0]; + reinterpret_cast (_in_buffer) + 1; + int group_size = _in_buffer[0]; rc = msg.init_size (group_size); errno_assert (rc == 0); @@ -499,7 +499,7 @@ void zmq::udp_engine_t::in_event () body_offset = 1 + group_size; } // Push group description to session - rc = session->push_msg (&msg); + rc = _session->push_msg (&msg); errno_assert (rc == 0 || (rc == -1 && errno == EAGAIN)); // Group description message doesn't fit in the pipe, drop @@ -507,7 +507,7 @@ void zmq::udp_engine_t::in_event () rc = msg.close (); errno_assert (rc == 0); - reset_pollin (handle); + reset_pollin (_handle); return; } @@ -515,30 +515,30 @@ void zmq::udp_engine_t::in_event () errno_assert (rc == 0); rc = msg.init_size (body_size); errno_assert (rc == 0); - memcpy (msg.data (), in_buffer + body_offset, body_size); + memcpy (msg.data (), _in_buffer + body_offset, body_size); // Push message body to session - rc = session->push_msg (&msg); + rc = _session->push_msg (&msg); // Message body doesn't fit in the pipe, drop and reset session state if (rc != 0) { rc = msg.close (); errno_assert (rc == 0); - session->reset (); - reset_pollin (handle); + _session->reset (); + reset_pollin (_handle); return; } rc = msg.close (); errno_assert (rc == 0); - session->flush (); + _session->flush (); } void zmq::udp_engine_t::restart_input () { - if (!recv_enabled) + if (!_recv_enabled) return; - set_pollin (handle); + set_pollin (_handle); in_event (); } diff --git a/src/udp_engine.hpp b/src/udp_engine.hpp index 488b6ce5..d038c7c5 100644 --- a/src/udp_engine.hpp +++ b/src/udp_engine.hpp @@ -49,23 +49,23 @@ class udp_engine_t : public io_object_t, public i_engine int resolve_raw_address (char *addr_, size_t length_); void sockaddr_to_msg (zmq::msg_t *msg_, sockaddr_in *addr_); - bool plugged; + bool _plugged; - fd_t fd; - session_base_t *session; - handle_t handle; - address_t *address; + fd_t _fd; + session_base_t *_session; + handle_t _handle; + address_t *_address; - options_t options; + options_t _options; - sockaddr_in raw_address; - const struct sockaddr *out_address; - socklen_t out_addrlen; + sockaddr_in _raw_address; + const struct sockaddr *_out_address; + socklen_t _out_address_len; - unsigned char out_buffer[MAX_UDP_MSG]; - unsigned char in_buffer[MAX_UDP_MSG]; - bool send_enabled; - bool recv_enabled; + unsigned char _out_buffer[MAX_UDP_MSG]; + unsigned char _in_buffer[MAX_UDP_MSG]; + bool _send_enabled; + bool _recv_enabled; }; } diff --git a/src/v1_decoder.cpp b/src/v1_decoder.cpp index 76e25a3c..af55bd3f 100644 --- a/src/v1_decoder.cpp +++ b/src/v1_decoder.cpp @@ -40,18 +40,18 @@ zmq::v1_decoder_t::v1_decoder_t (size_t bufsize_, int64_t maxmsgsize_) : decoder_base_t (bufsize_), - maxmsgsize (maxmsgsize_) + _max_msg_size (maxmsgsize_) { - int rc = in_progress.init (); + int rc = _in_progress.init (); errno_assert (rc == 0); // At the beginning, read one byte and go to one_byte_size_ready state. - next_step (tmpbuf, 1, &v1_decoder_t::one_byte_size_ready); + next_step (_tmpbuf, 1, &v1_decoder_t::one_byte_size_ready); } zmq::v1_decoder_t::~v1_decoder_t () { - int rc = in_progress.close (); + int rc = _in_progress.close (); errno_assert (rc == 0); } @@ -60,33 +60,33 @@ int zmq::v1_decoder_t::one_byte_size_ready (unsigned char const *) // First byte of size is read. If it is 0xff read 8-byte size. // Otherwise allocate the buffer for message data and read the // message data into it. - if (*tmpbuf == 0xff) - next_step (tmpbuf, 8, &v1_decoder_t::eight_byte_size_ready); + if (*_tmpbuf == 0xff) + next_step (_tmpbuf, 8, &v1_decoder_t::eight_byte_size_ready); else { // There has to be at least one byte (the flags) in the message). - if (!*tmpbuf) { + if (!*_tmpbuf) { errno = EPROTO; return -1; } - if (maxmsgsize >= 0 - && static_cast (*tmpbuf - 1) > maxmsgsize) { + if (_max_msg_size >= 0 + && static_cast (*_tmpbuf - 1) > _max_msg_size) { errno = EMSGSIZE; return -1; } - int rc = in_progress.close (); + int rc = _in_progress.close (); assert (rc == 0); - rc = in_progress.init_size (*tmpbuf - 1); + rc = _in_progress.init_size (*_tmpbuf - 1); if (rc != 0) { errno_assert (errno == ENOMEM); - rc = in_progress.init (); + rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } - next_step (tmpbuf, 1, &v1_decoder_t::flags_ready); + next_step (_tmpbuf, 1, &v1_decoder_t::flags_ready); } return 0; } @@ -95,7 +95,7 @@ int zmq::v1_decoder_t::eight_byte_size_ready (unsigned char const *) { // 8-byte payload length is read. Allocate the buffer // for message body and read the message data into it. - const uint64_t payload_length = get_uint64 (tmpbuf); + const uint64_t payload_length = get_uint64 (_tmpbuf); // There has to be at least one byte (the flags) in the message). if (payload_length == 0) { @@ -104,8 +104,8 @@ int zmq::v1_decoder_t::eight_byte_size_ready (unsigned char const *) } // Message size must not exceed the maximum allowed size. - if (maxmsgsize >= 0 - && payload_length - 1 > static_cast (maxmsgsize)) { + if (_max_msg_size >= 0 + && payload_length - 1 > static_cast (_max_msg_size)) { errno = EMSGSIZE; return -1; } @@ -118,27 +118,27 @@ int zmq::v1_decoder_t::eight_byte_size_ready (unsigned char const *) const size_t msg_size = static_cast (payload_length - 1); - int rc = in_progress.close (); + int rc = _in_progress.close (); assert (rc == 0); - rc = in_progress.init_size (msg_size); + rc = _in_progress.init_size (msg_size); if (rc != 0) { errno_assert (errno == ENOMEM); - rc = in_progress.init (); + rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } - next_step (tmpbuf, 1, &v1_decoder_t::flags_ready); + next_step (_tmpbuf, 1, &v1_decoder_t::flags_ready); return 0; } int zmq::v1_decoder_t::flags_ready (unsigned char const *) { // Store the flags from the wire into the message structure. - in_progress.set_flags (tmpbuf[0] & msg_t::more); + _in_progress.set_flags (_tmpbuf[0] & msg_t::more); - next_step (in_progress.data (), in_progress.size (), + next_step (_in_progress.data (), _in_progress.size (), &v1_decoder_t::message_ready); return 0; @@ -148,6 +148,6 @@ int zmq::v1_decoder_t::message_ready (unsigned char const *) { // Message is completely read. Push it further and start reading // new message. (in_progress is a 0-byte message after this point.) - next_step (tmpbuf, 1, &v1_decoder_t::one_byte_size_ready); + next_step (_tmpbuf, 1, &v1_decoder_t::one_byte_size_ready); return 1; } diff --git a/src/v1_decoder.hpp b/src/v1_decoder.hpp index 6ef5f2c7..ca290394 100644 --- a/src/v1_decoder.hpp +++ b/src/v1_decoder.hpp @@ -42,7 +42,7 @@ class v1_decoder_t : public decoder_base_t v1_decoder_t (size_t bufsize_, int64_t maxmsgsize_); ~v1_decoder_t (); - virtual msg_t *msg () { return &in_progress; } + virtual msg_t *msg () { return &_in_progress; } private: int one_byte_size_ready (unsigned char const *); @@ -50,10 +50,10 @@ class v1_decoder_t : public decoder_base_t int flags_ready (unsigned char const *); int message_ready (unsigned char const *); - unsigned char tmpbuf[8]; - msg_t in_progress; + unsigned char _tmpbuf[8]; + msg_t _in_progress; - const int64_t maxmsgsize; + const int64_t _max_msg_size; v1_decoder_t (const v1_decoder_t &); void operator= (const v1_decoder_t &); diff --git a/src/v1_encoder.cpp b/src/v1_encoder.cpp index 672071fa..faca3516 100644 --- a/src/v1_encoder.cpp +++ b/src/v1_encoder.cpp @@ -63,13 +63,13 @@ void zmq::v1_encoder_t::message_ready () // For longer messages write 0xff escape character followed by 8-byte // message size. In both cases 'flags' field follows. if (size < 255) { - tmpbuf[0] = static_cast (size); - tmpbuf[1] = (in_progress->flags () & msg_t::more); - next_step (tmpbuf, 2, &v1_encoder_t::size_ready, false); + _tmpbuf[0] = static_cast (size); + _tmpbuf[1] = (in_progress->flags () & msg_t::more); + next_step (_tmpbuf, 2, &v1_encoder_t::size_ready, false); } else { - tmpbuf[0] = 0xff; - put_uint64 (tmpbuf + 1, size); - tmpbuf[9] = (in_progress->flags () & msg_t::more); - next_step (tmpbuf, 10, &v1_encoder_t::size_ready, false); + _tmpbuf[0] = 0xff; + put_uint64 (_tmpbuf + 1, size); + _tmpbuf[9] = (in_progress->flags () & msg_t::more); + next_step (_tmpbuf, 10, &v1_encoder_t::size_ready, false); } } diff --git a/src/v1_encoder.hpp b/src/v1_encoder.hpp index 974740e7..a16f9a9b 100644 --- a/src/v1_encoder.hpp +++ b/src/v1_encoder.hpp @@ -46,7 +46,7 @@ class v1_encoder_t : public encoder_base_t void size_ready (); void message_ready (); - unsigned char tmpbuf[10]; + unsigned char _tmpbuf[10]; v1_encoder_t (const v1_encoder_t &); const v1_encoder_t &operator= (const v1_encoder_t &); diff --git a/src/v2_decoder.cpp b/src/v2_decoder.cpp index fff7ec2c..3c29da2d 100644 --- a/src/v2_decoder.cpp +++ b/src/v2_decoder.cpp @@ -42,51 +42,51 @@ zmq::v2_decoder_t::v2_decoder_t (size_t bufsize_, int64_t maxmsgsize_, bool zero_copy_) : decoder_base_t (bufsize_), - msg_flags (0), - zero_copy (zero_copy_), - maxmsgsize (maxmsgsize_) + _msg_flags (0), + _zero_copy (zero_copy_), + _max_msg_size (maxmsgsize_) { - int rc = in_progress.init (); + int rc = _in_progress.init (); errno_assert (rc == 0); // At the beginning, read one byte and go to flags_ready state. - next_step (tmpbuf, 1, &v2_decoder_t::flags_ready); + next_step (_tmpbuf, 1, &v2_decoder_t::flags_ready); } zmq::v2_decoder_t::~v2_decoder_t () { - int rc = in_progress.close (); + int rc = _in_progress.close (); errno_assert (rc == 0); } int zmq::v2_decoder_t::flags_ready (unsigned char const *) { - msg_flags = 0; - if (tmpbuf[0] & v2_protocol_t::more_flag) - msg_flags |= msg_t::more; - if (tmpbuf[0] & v2_protocol_t::command_flag) - msg_flags |= msg_t::command; + _msg_flags = 0; + if (_tmpbuf[0] & v2_protocol_t::more_flag) + _msg_flags |= msg_t::more; + if (_tmpbuf[0] & v2_protocol_t::command_flag) + _msg_flags |= msg_t::command; // The payload length is either one or eight bytes, // depending on whether the 'large' bit is set. - if (tmpbuf[0] & v2_protocol_t::large_flag) - next_step (tmpbuf, 8, &v2_decoder_t::eight_byte_size_ready); + if (_tmpbuf[0] & v2_protocol_t::large_flag) + next_step (_tmpbuf, 8, &v2_decoder_t::eight_byte_size_ready); else - next_step (tmpbuf, 1, &v2_decoder_t::one_byte_size_ready); + next_step (_tmpbuf, 1, &v2_decoder_t::one_byte_size_ready); return 0; } int zmq::v2_decoder_t::one_byte_size_ready (unsigned char const *read_from_) { - return size_ready (tmpbuf[0], read_from_); + return size_ready (_tmpbuf[0], read_from_); } int zmq::v2_decoder_t::eight_byte_size_ready (unsigned char const *read_from_) { // The payload size is encoded as 64-bit unsigned integer. // The most significant byte comes first. - const uint64_t msg_size = get_uint64 (tmpbuf); + const uint64_t msg_size = get_uint64 (_tmpbuf); return size_ready (msg_size, read_from_); } @@ -95,8 +95,8 @@ int zmq::v2_decoder_t::size_ready (uint64_t msg_size_, unsigned char const *read_pos_) { // Message size must not exceed the maximum allowed size. - if (maxmsgsize >= 0) - if (unlikely (msg_size_ > static_cast (maxmsgsize))) { + if (_max_msg_size >= 0) + if (unlikely (msg_size_ > static_cast (_max_msg_size))) { errno = EMSGSIZE; return -1; } @@ -107,31 +107,31 @@ int zmq::v2_decoder_t::size_ready (uint64_t msg_size_, return -1; } - int rc = in_progress.close (); + int rc = _in_progress.close (); assert (rc == 0); // the current message can exceed the current buffer. We have to copy the buffer // data into a new message and complete it in the next receive. shared_message_memory_allocator &allocator = get_allocator (); - if (unlikely (!zero_copy + if (unlikely (!_zero_copy || ((unsigned char *) read_pos_ + msg_size_ > (allocator.data () + allocator.size ())))) { // a new message has started, but the size would exceed the pre-allocated arena // this happens every time when a message does not fit completely into the buffer - rc = in_progress.init_size (static_cast (msg_size_)); + rc = _in_progress.init_size (static_cast (msg_size_)); } else { // construct message using n bytes from the buffer as storage // increase buffer ref count // if the message will be a large message, pass a valid refcnt memory location as well rc = - in_progress.init (const_cast (read_pos_), - static_cast (msg_size_), - shared_message_memory_allocator::call_dec_ref, - allocator.buffer (), allocator.provide_content ()); + _in_progress.init (const_cast (read_pos_), + static_cast (msg_size_), + shared_message_memory_allocator::call_dec_ref, + allocator.buffer (), allocator.provide_content ()); // For small messages, data has been copied and refcount does not have to be increased - if (in_progress.is_zcmsg ()) { + if (_in_progress.is_zcmsg ()) { allocator.advance_content (); allocator.inc_ref (); } @@ -139,20 +139,20 @@ int zmq::v2_decoder_t::size_ready (uint64_t msg_size_, if (unlikely (rc)) { errno_assert (errno == ENOMEM); - rc = in_progress.init (); + rc = _in_progress.init (); errno_assert (rc == 0); errno = ENOMEM; return -1; } - in_progress.set_flags (msg_flags); + _in_progress.set_flags (_msg_flags); // this sets read_pos to // the message data address if the data needs to be copied // for small message / messages exceeding the current buffer // or // to the current start address in the buffer because the message // was constructed to use n bytes from the address passed as argument - next_step (in_progress.data (), in_progress.size (), + next_step (_in_progress.data (), _in_progress.size (), &v2_decoder_t::message_ready); return 0; @@ -162,6 +162,6 @@ int zmq::v2_decoder_t::message_ready (unsigned char const *) { // Message is completely read. Signal this to the caller // and prepare to decode next message. - next_step (tmpbuf, 1, &v2_decoder_t::flags_ready); + next_step (_tmpbuf, 1, &v2_decoder_t::flags_ready); return 1; } diff --git a/src/v2_decoder.hpp b/src/v2_decoder.hpp index 1d1a81be..fe3b61fb 100644 --- a/src/v2_decoder.hpp +++ b/src/v2_decoder.hpp @@ -46,7 +46,7 @@ class v2_decoder_t virtual ~v2_decoder_t (); // i_decoder interface. - virtual msg_t *msg () { return &in_progress; } + virtual msg_t *msg () { return &_in_progress; } private: int flags_ready (unsigned char const *); @@ -56,12 +56,12 @@ class v2_decoder_t int size_ready (uint64_t size_, unsigned char const *); - unsigned char tmpbuf[8]; - unsigned char msg_flags; - msg_t in_progress; + unsigned char _tmpbuf[8]; + unsigned char _msg_flags; + msg_t _in_progress; - const bool zero_copy; - const int64_t maxmsgsize; + const bool _zero_copy; + const int64_t _max_msg_size; v2_decoder_t (const v2_decoder_t &); void operator= (const v2_decoder_t &); diff --git a/src/v2_encoder.cpp b/src/v2_encoder.cpp index 8efc4c5b..893b7d7e 100644 --- a/src/v2_encoder.cpp +++ b/src/v2_encoder.cpp @@ -48,7 +48,7 @@ zmq::v2_encoder_t::~v2_encoder_t () void zmq::v2_encoder_t::message_ready () { // Encode flags. - unsigned char &protocol_flags = tmpbuf[0]; + unsigned char &protocol_flags = _tmp_buf[0]; protocol_flags = 0; if (in_progress->flags () & msg_t::more) protocol_flags |= v2_protocol_t::more_flag; @@ -62,11 +62,11 @@ void zmq::v2_encoder_t::message_ready () // messages, 64-bit unsigned integer in network byte order is used. const size_t size = in_progress->size (); if (unlikely (size > 255)) { - put_uint64 (tmpbuf + 1, size); - next_step (tmpbuf, 9, &v2_encoder_t::size_ready, false); + put_uint64 (_tmp_buf + 1, size); + next_step (_tmp_buf, 9, &v2_encoder_t::size_ready, false); } else { - tmpbuf[1] = static_cast (size); - next_step (tmpbuf, 2, &v2_encoder_t::size_ready, false); + _tmp_buf[1] = static_cast (size); + next_step (_tmp_buf, 2, &v2_encoder_t::size_ready, false); } } diff --git a/src/v2_encoder.hpp b/src/v2_encoder.hpp index c7efc604..746a061c 100644 --- a/src/v2_encoder.hpp +++ b/src/v2_encoder.hpp @@ -46,7 +46,7 @@ class v2_encoder_t : public encoder_base_t void size_ready (); void message_ready (); - unsigned char tmpbuf[9]; + unsigned char _tmp_buf[9]; v2_encoder_t (const v2_encoder_t &); const v2_encoder_t &operator= (const v2_encoder_t &); diff --git a/src/xpub.cpp b/src/xpub.cpp index a7fcda89..d35a26c0 100644 --- a/src/xpub.cpp +++ b/src/xpub.cpp @@ -39,39 +39,39 @@ zmq::xpub_t::xpub_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - verbose_subs (false), - verbose_unsubs (false), - more (false), - lossy (true), - manual (false), - pending_pipes (), - welcome_msg () + _verbose_subs (false), + _verbose_unsubs (false), + _more (false), + _lossy (true), + _manual (false), + _pending_pipes (), + _welcome_msg () { - last_pipe = NULL; + _last_pipe = NULL; options.type = ZMQ_XPUB; - welcome_msg.init (); + _welcome_msg.init (); } zmq::xpub_t::~xpub_t () { - welcome_msg.close (); + _welcome_msg.close (); } void zmq::xpub_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) { zmq_assert (pipe_); - dist.attach (pipe_); + _dist.attach (pipe_); // If subscribe_to_all_ is specified, the caller would like to subscribe // to all data on this pipe, implicitly. if (subscribe_to_all_) - subscriptions.add (NULL, 0, pipe_); + _subscriptions.add (NULL, 0, pipe_); // if welcome message exists, send a copy of it - if (welcome_msg.size () > 0) { + if (_welcome_msg.size () > 0) { msg_t copy; copy.init (); - int rc = copy.copy (welcome_msg); + int rc = copy.copy (_welcome_msg); errno_assert (rc == 0); bool ok = pipe_->write (©); zmq_assert (ok); @@ -94,51 +94,51 @@ void zmq::xpub_t::xread_activated (pipe_t *pipe_) const size_t size = sub.size (); metadata_t *metadata = sub.metadata (); if (size > 0 && (*data == 0 || *data == 1)) { - if (manual) { + if (_manual) { // Store manual subscription to use on termination if (*data == 0) - manual_subscriptions.rm (data + 1, size - 1, pipe_); + _manual_subscriptions.rm (data + 1, size - 1, pipe_); else - manual_subscriptions.add (data + 1, size - 1, pipe_); + _manual_subscriptions.add (data + 1, size - 1, pipe_); - pending_pipes.push_back (pipe_); - pending_data.push_back (blob_t (data, size)); + _pending_pipes.push_back (pipe_); + _pending_data.push_back (blob_t (data, size)); if (metadata) metadata->add_ref (); - pending_metadata.push_back (metadata); - pending_flags.push_back (0); + _pending_metadata.push_back (metadata); + _pending_flags.push_back (0); } else { bool notify; if (*data == 0) { mtrie_t::rm_result rm_result = - subscriptions.rm (data + 1, size - 1, pipe_); + _subscriptions.rm (data + 1, size - 1, pipe_); // TODO reconsider what to do if rm_result == mtrie_t::not_found notify = - rm_result != mtrie_t::values_remain || verbose_unsubs; + rm_result != mtrie_t::values_remain || _verbose_unsubs; } else { bool first_added = - subscriptions.add (data + 1, size - 1, pipe_); - notify = first_added || verbose_subs; + _subscriptions.add (data + 1, size - 1, pipe_); + notify = first_added || _verbose_subs; } // If the request was a new subscription, or the subscription // was removed, or verbose mode is enabled, store it so that // it can be passed to the user on next recv call. if (options.type == ZMQ_XPUB && notify) { - pending_data.push_back (blob_t (data, size)); + _pending_data.push_back (blob_t (data, size)); if (metadata) metadata->add_ref (); - pending_metadata.push_back (metadata); - pending_flags.push_back (0); + _pending_metadata.push_back (metadata); + _pending_flags.push_back (0); } } } else { // Process user message coming upstream from xsub socket - pending_data.push_back (blob_t (data, size)); + _pending_data.push_back (blob_t (data, size)); if (metadata) metadata->add_ref (); - pending_metadata.push_back (metadata); - pending_flags.push_back (sub.flags ()); + _pending_metadata.push_back (metadata); + _pending_flags.push_back (sub.flags ()); } sub.close (); } @@ -146,7 +146,7 @@ void zmq::xpub_t::xread_activated (pipe_t *pipe_) void zmq::xpub_t::xwrite_activated (pipe_t *pipe_) { - dist.activated (pipe_); + _dist.activated (pipe_); } int zmq::xpub_t::xsetsockopt (int option_, @@ -161,34 +161,35 @@ int zmq::xpub_t::xsetsockopt (int option_, return -1; } if (option_ == ZMQ_XPUB_VERBOSE) { - verbose_subs = (*static_cast (optval_) != 0); - verbose_unsubs = false; + _verbose_subs = (*static_cast (optval_) != 0); + _verbose_unsubs = false; } else if (option_ == ZMQ_XPUB_VERBOSER) { - verbose_subs = (*static_cast (optval_) != 0); - verbose_unsubs = verbose_subs; + _verbose_subs = (*static_cast (optval_) != 0); + _verbose_unsubs = _verbose_subs; } else if (option_ == ZMQ_XPUB_NODROP) - lossy = (*static_cast (optval_) == 0); + _lossy = (*static_cast (optval_) == 0); else if (option_ == ZMQ_XPUB_MANUAL) - manual = (*static_cast (optval_) != 0); - } else if (option_ == ZMQ_SUBSCRIBE && manual) { - if (last_pipe != NULL) - subscriptions.add ((unsigned char *) optval_, optvallen_, - last_pipe); - } else if (option_ == ZMQ_UNSUBSCRIBE && manual) { - if (last_pipe != NULL) - subscriptions.rm ((unsigned char *) optval_, optvallen_, last_pipe); + _manual = (*static_cast (optval_) != 0); + } else if (option_ == ZMQ_SUBSCRIBE && _manual) { + if (_last_pipe != NULL) + _subscriptions.add ((unsigned char *) optval_, optvallen_, + _last_pipe); + } else if (option_ == ZMQ_UNSUBSCRIBE && _manual) { + if (_last_pipe != NULL) + _subscriptions.rm ((unsigned char *) optval_, optvallen_, + _last_pipe); } else if (option_ == ZMQ_XPUB_WELCOME_MSG) { - welcome_msg.close (); + _welcome_msg.close (); if (optvallen_ > 0) { - int rc = welcome_msg.init_size (optvallen_); + int rc = _welcome_msg.init_size (optvallen_); errno_assert (rc == 0); unsigned char *data = - static_cast (welcome_msg.data ()); + static_cast (_welcome_msg.data ()); memcpy (data, optval_, optvallen_); } else - welcome_msg.init (); + _welcome_msg.init (); } else { errno = EINVAL; return -1; @@ -205,27 +206,27 @@ static void stub (zmq::mtrie_t::prefix_t data_, size_t size_, void *arg_) void zmq::xpub_t::xpipe_terminated (pipe_t *pipe_) { - if (manual) { + if (_manual) { // Remove the pipe from the trie and send corresponding manual // unsubscriptions upstream. - manual_subscriptions.rm (pipe_, send_unsubscription, this, false); + _manual_subscriptions.rm (pipe_, send_unsubscription, this, false); // Remove pipe without actually sending the message as it was taken // care of by the manual call above. subscriptions is the real mtrie, // so the pipe must be removed from there or it will be left over. - subscriptions.rm (pipe_, stub, (void *) NULL, false); + _subscriptions.rm (pipe_, stub, (void *) NULL, false); } else { // Remove the pipe from the trie. If there are topics that nobody // is interested in anymore, send corresponding unsubscriptions // upstream. - subscriptions.rm (pipe_, send_unsubscription, this, !verbose_unsubs); + _subscriptions.rm (pipe_, send_unsubscription, this, !_verbose_unsubs); } - dist.pipe_terminated (pipe_); + _dist.pipe_terminated (pipe_); } void zmq::xpub_t::mark_as_matching (pipe_t *pipe_, xpub_t *self_) { - self_->dist.match (pipe_); + self_->_dist.match (pipe_); } int zmq::xpub_t::xsend (msg_t *msg_) @@ -233,23 +234,23 @@ int zmq::xpub_t::xsend (msg_t *msg_) bool msg_more = (msg_->flags () & msg_t::more) != 0; // For the first part of multi-part message, find the matching pipes. - if (!more) { - subscriptions.match (static_cast (msg_->data ()), - msg_->size (), mark_as_matching, this); + if (!_more) { + _subscriptions.match (static_cast (msg_->data ()), + msg_->size (), mark_as_matching, this); // If inverted matching is used, reverse the selection now if (options.invert_matching) { - dist.reverse_match (); + _dist.reverse_match (); } } int rc = -1; // Assume we fail - if (lossy || dist.check_hwm ()) { - if (dist.send_to_matching (msg_) == 0) { + if (_lossy || _dist.check_hwm ()) { + if (_dist.send_to_matching (msg_) == 0) { // If we are at the end of multi-part message we can mark // all the pipes as non-matching. if (!msg_more) - dist.unmatch (); - more = msg_more; + _dist.unmatch (); + _more = msg_more; rc = 0; // Yay, sent successfully } } else @@ -259,47 +260,47 @@ int zmq::xpub_t::xsend (msg_t *msg_) bool zmq::xpub_t::xhas_out () { - return dist.has_out (); + return _dist.has_out (); } int zmq::xpub_t::xrecv (msg_t *msg_) { // If there is at least one - if (pending_data.empty ()) { + if (_pending_data.empty ()) { errno = EAGAIN; return -1; } // User is reading a message, set last_pipe and remove it from the deque - if (manual && !pending_pipes.empty ()) { - last_pipe = pending_pipes.front (); - pending_pipes.pop_front (); + if (_manual && !_pending_pipes.empty ()) { + _last_pipe = _pending_pipes.front (); + _pending_pipes.pop_front (); } int rc = msg_->close (); errno_assert (rc == 0); - rc = msg_->init_size (pending_data.front ().size ()); + rc = msg_->init_size (_pending_data.front ().size ()); errno_assert (rc == 0); - memcpy (msg_->data (), pending_data.front ().data (), - pending_data.front ().size ()); + memcpy (msg_->data (), _pending_data.front ().data (), + _pending_data.front ().size ()); // set metadata only if there is some - if (metadata_t *metadata = pending_metadata.front ()) { + if (metadata_t *metadata = _pending_metadata.front ()) { msg_->set_metadata (metadata); // Remove ref corresponding to vector placement metadata->drop_ref (); } - msg_->set_flags (pending_flags.front ()); - pending_data.pop_front (); - pending_metadata.pop_front (); - pending_flags.pop_front (); + msg_->set_flags (_pending_flags.front ()); + _pending_data.pop_front (); + _pending_metadata.pop_front (); + _pending_flags.pop_front (); return 0; } bool zmq::xpub_t::xhas_in () { - return !pending_data.empty (); + return !_pending_data.empty (); } void zmq::xpub_t::send_unsubscription (zmq::mtrie_t::prefix_t data_, @@ -313,13 +314,13 @@ void zmq::xpub_t::send_unsubscription (zmq::mtrie_t::prefix_t data_, *unsub.data () = 0; if (size_ > 0) memcpy (unsub.data () + 1, data_, size_); - self_->pending_data.ZMQ_PUSH_OR_EMPLACE_BACK (ZMQ_MOVE (unsub)); - self_->pending_metadata.push_back (NULL); - self_->pending_flags.push_back (0); + self_->_pending_data.ZMQ_PUSH_OR_EMPLACE_BACK (ZMQ_MOVE (unsub)); + self_->_pending_metadata.push_back (NULL); + self_->_pending_flags.push_back (0); - if (self_->manual) { - self_->last_pipe = NULL; - self_->pending_pipes.push_back (NULL); + if (self_->_manual) { + self_->_last_pipe = NULL; + self_->_pending_pipes.push_back (NULL); } } } diff --git a/src/xpub.hpp b/src/xpub.hpp index c552732e..757c700b 100644 --- a/src/xpub.hpp +++ b/src/xpub.hpp @@ -72,45 +72,45 @@ class xpub_t : public socket_base_t static void mark_as_matching (zmq::pipe_t *pipe_, xpub_t *arg_); // List of all subscriptions mapped to corresponding pipes. - mtrie_t subscriptions; + mtrie_t _subscriptions; // List of manual subscriptions mapped to corresponding pipes. - mtrie_t manual_subscriptions; + mtrie_t _manual_subscriptions; // Distributor of messages holding the list of outbound pipes. - dist_t dist; + dist_t _dist; // If true, send all subscription messages upstream, not just // unique ones - bool verbose_subs; + bool _verbose_subs; // If true, send all unsubscription messages upstream, not just // unique ones - bool verbose_unsubs; + bool _verbose_unsubs; // True if we are in the middle of sending a multi-part message. - bool more; + bool _more; // Drop messages if HWM reached, otherwise return with EAGAIN - bool lossy; + bool _lossy; // Subscriptions will not bed added automatically, only after calling set option with ZMQ_SUBSCRIBE or ZMQ_UNSUBSCRIBE - bool manual; + bool _manual; // Last pipe that sent subscription message, only used if xpub is on manual - pipe_t *last_pipe; + pipe_t *_last_pipe; // Pipes that sent subscriptions messages that have not yet been processed, only used if xpub is on manual - std::deque pending_pipes; + std::deque _pending_pipes; // Welcome message to send to pipe when attached - msg_t welcome_msg; + msg_t _welcome_msg; // List of pending (un)subscriptions, ie. those that were already // applied to the trie, but not yet received by the user. - std::deque pending_data; - std::deque pending_metadata; - std::deque pending_flags; + std::deque _pending_data; + std::deque _pending_metadata; + std::deque _pending_flags; xpub_t (const xpub_t &); const xpub_t &operator= (const xpub_t &); diff --git a/src/xsub.cpp b/src/xsub.cpp index 2a9dfd21..2de77a49 100644 --- a/src/xsub.cpp +++ b/src/xsub.cpp @@ -36,8 +36,8 @@ zmq::xsub_t::xsub_t (class ctx_t *parent_, uint32_t tid_, int sid_) : socket_base_t (parent_, tid_, sid_), - has_message (false), - more (false) + _has_message (false), + _more (false) { options.type = ZMQ_XSUB; @@ -45,13 +45,13 @@ zmq::xsub_t::xsub_t (class ctx_t *parent_, uint32_t tid_, int sid_) : // subscription commands are sent to the wire. options.linger.store (0); - int rc = message.init (); + int rc = _message.init (); errno_assert (rc == 0); } zmq::xsub_t::~xsub_t () { - int rc = message.close (); + int rc = _message.close (); errno_assert (rc == 0); } @@ -60,34 +60,34 @@ void zmq::xsub_t::xattach_pipe (pipe_t *pipe_, bool subscribe_to_all_) LIBZMQ_UNUSED (subscribe_to_all_); zmq_assert (pipe_); - fq.attach (pipe_); - dist.attach (pipe_); + _fq.attach (pipe_); + _dist.attach (pipe_); // Send all the cached subscriptions to the new upstream peer. - subscriptions.apply (send_subscription, pipe_); + _subscriptions.apply (send_subscription, pipe_); pipe_->flush (); } void zmq::xsub_t::xread_activated (pipe_t *pipe_) { - fq.activated (pipe_); + _fq.activated (pipe_); } void zmq::xsub_t::xwrite_activated (pipe_t *pipe_) { - dist.activated (pipe_); + _dist.activated (pipe_); } void zmq::xsub_t::xpipe_terminated (pipe_t *pipe_) { - fq.pipe_terminated (pipe_); - dist.pipe_terminated (pipe_); + _fq.pipe_terminated (pipe_); + _dist.pipe_terminated (pipe_); } void zmq::xsub_t::xhiccuped (pipe_t *pipe_) { // Send all the cached subscriptions to the hiccuped pipe. - subscriptions.apply (send_subscription, pipe_); + _subscriptions.apply (send_subscription, pipe_); pipe_->flush (); } @@ -102,16 +102,16 @@ int zmq::xsub_t::xsend (msg_t *msg_) // however this is alread done on the XPUB side and // doing it here as well breaks ZMQ_XPUB_VERBOSE // when there are forwarding devices involved. - subscriptions.add (data + 1, size - 1); - return dist.send_to_all (msg_); + _subscriptions.add (data + 1, size - 1); + return _dist.send_to_all (msg_); } if (size > 0 && *data == 0) { // Process unsubscribe message - if (subscriptions.rm (data + 1, size - 1)) - return dist.send_to_all (msg_); + if (_subscriptions.rm (data + 1, size - 1)) + return _dist.send_to_all (msg_); } else // User message sent upstream to XPUB socket - return dist.send_to_all (msg_); + return _dist.send_to_all (msg_); int rc = msg_->close (); errno_assert (rc == 0); @@ -131,11 +131,11 @@ int zmq::xsub_t::xrecv (msg_t *msg_) { // If there's already a message prepared by a previous call to zmq_poll, // return it straight ahead. - if (has_message) { - int rc = msg_->move (message); + if (_has_message) { + int rc = msg_->move (_message); errno_assert (rc == 0); - has_message = false; - more = (msg_->flags () & msg_t::more) != 0; + _has_message = false; + _more = (msg_->flags () & msg_t::more) != 0; return 0; } @@ -144,7 +144,7 @@ int zmq::xsub_t::xrecv (msg_t *msg_) // semantics. while (true) { // Get a message using fair queueing algorithm. - int rc = fq.recv (msg_); + int rc = _fq.recv (msg_); // If there's no message available, return immediately. // The same when error occurs. @@ -153,15 +153,15 @@ int zmq::xsub_t::xrecv (msg_t *msg_) // Check whether the message matches at least one subscription. // Non-initial parts of the message are passed - if (more || !options.filter || match (msg_)) { - more = (msg_->flags () & msg_t::more) != 0; + if (_more || !options.filter || match (msg_)) { + _more = (msg_->flags () & msg_t::more) != 0; return 0; } // Message doesn't match. Pop any remaining parts of the message // from the pipe. while (msg_->flags () & msg_t::more) { - rc = fq.recv (msg_); + rc = _fq.recv (msg_); errno_assert (rc == 0); } } @@ -170,19 +170,19 @@ int zmq::xsub_t::xrecv (msg_t *msg_) bool zmq::xsub_t::xhas_in () { // There are subsequent parts of the partly-read message available. - if (more) + if (_more) return true; // If there's already a message prepared by a previous call to zmq_poll, // return straight ahead. - if (has_message) + if (_has_message) return true; // TODO: This can result in infinite loop in the case of continuous // stream of non-matching messages. while (true) { // Get a message using fair queueing algorithm. - int rc = fq.recv (&message); + int rc = _fq.recv (&_message); // If there's no message available, return immediately. // The same when error occurs. @@ -192,15 +192,15 @@ bool zmq::xsub_t::xhas_in () } // Check whether the message matches at least one subscription. - if (!options.filter || match (&message)) { - has_message = true; + if (!options.filter || match (&_message)) { + _has_message = true; return true; } // Message doesn't match. Pop any remaining parts of the message // from the pipe. - while (message.flags () & msg_t::more) { - rc = fq.recv (&message); + while (_message.flags () & msg_t::more) { + rc = _fq.recv (&_message); errno_assert (rc == 0); } } @@ -208,12 +208,12 @@ bool zmq::xsub_t::xhas_in () const zmq::blob_t &zmq::xsub_t::get_credential () const { - return fq.get_credential (); + return _fq.get_credential (); } bool zmq::xsub_t::match (msg_t *msg_) { - bool matching = subscriptions.check ( + bool matching = _subscriptions.check ( static_cast (msg_->data ()), msg_->size ()); return matching ^ options.invert_matching; diff --git a/src/xsub.hpp b/src/xsub.hpp index 481292cd..cc50224d 100644 --- a/src/xsub.hpp +++ b/src/xsub.hpp @@ -71,22 +71,22 @@ class xsub_t : public socket_base_t send_subscription (unsigned char *data_, size_t size_, void *arg_); // Fair queueing object for inbound pipes. - fq_t fq; + fq_t _fq; // Object for distributing the subscriptions upstream. - dist_t dist; + dist_t _dist; // The repository of subscriptions. - trie_t subscriptions; + trie_t _subscriptions; // If true, 'message' contains a matching message to return on the // next recv call. - bool has_message; - msg_t message; + bool _has_message; + msg_t _message; // If true, part of a multipart message was already received, but // there are following parts still waiting. - bool more; + bool _more; xsub_t (const xsub_t &); const xsub_t &operator= (const xsub_t &); diff --git a/src/ypipe.hpp b/src/ypipe.hpp index 5293f56b..0f2b9e72 100644 --- a/src/ypipe.hpp +++ b/src/ypipe.hpp @@ -50,12 +50,12 @@ template class ypipe_t : public ypipe_base_t inline ypipe_t () { // Insert terminator element into the queue. - queue.push (); + _queue.push (); // Let all the pointers to point to the terminator. // (unless pipe is dead, in which case c is set to NULL). - r = w = f = &queue.back (); - c.set (&queue.back ()); + _r = _w = _f = &_queue.back (); + _c.set (&_queue.back ()); } // The destructor doesn't have to be virtual. It is made virtual @@ -78,12 +78,12 @@ template class ypipe_t : public ypipe_base_t inline void write (const T &value_, bool incomplete_) { // Place the value to the queue, add new terminator element. - queue.back () = value_; - queue.push (); + _queue.back () = value_; + _queue.push (); // Move the "flush up to here" poiter. if (!incomplete_) - f = &queue.back (); + _f = &_queue.back (); } #ifdef ZMQ_HAVE_OPENVMS @@ -94,10 +94,10 @@ template class ypipe_t : public ypipe_base_t // item exists, false otherwise. inline bool unwrite (T *value_) { - if (f == &queue.back ()) + if (_f == &_queue.back ()) return false; - queue.unpush (); - *value_ = queue.back (); + _queue.unpush (); + *value_ = _queue.back (); return true; } @@ -107,24 +107,24 @@ template class ypipe_t : public ypipe_base_t inline bool flush () { // If there are no un-flushed items, do nothing. - if (w == f) + if (_w == _f) return true; // Try to set 'c' to 'f'. - if (c.cas (w, f) != w) { + if (_c.cas (_w, _f) != _w) { // Compare-and-swap was unseccessful because 'c' is NULL. // This means that the reader is asleep. Therefore we don't // care about thread-safeness and update c in non-atomic // manner. We'll return false to let the caller know // that reader is sleeping. - c.set (f); - w = f; + _c.set (_f); + _w = _f; return false; } // Reader is alive. Nothing special to do now. Just move // the 'first un-flushed item' pointer to 'f'. - w = f; + _w = _f; return true; } @@ -132,20 +132,20 @@ template class ypipe_t : public ypipe_base_t inline bool check_read () { // Was the value prefetched already? If so, return. - if (&queue.front () != r && r) + if (&_queue.front () != _r && _r) return true; // There's no prefetched value, so let us prefetch more values. // Prefetching is to simply retrieve the // pointer from c in atomic fashion. If there are no // items to prefetch, set c to NULL (using compare-and-swap). - r = c.cas (&queue.front (), NULL); + _r = _c.cas (&_queue.front (), NULL); // If there are no elements prefetched, exit. // During pipe's lifetime r should never be NULL, however, // it can happen during pipe shutdown when items // are being deallocated. - if (&queue.front () == r || !r) + if (&_queue.front () == _r || !_r) return false; // There was at least one value prefetched. @@ -162,8 +162,8 @@ template class ypipe_t : public ypipe_base_t // There was at least one value prefetched. // Return it to the caller. - *value_ = queue.front (); - queue.pop (); + *value_ = _queue.front (); + _queue.pop (); return true; } @@ -175,7 +175,7 @@ template class ypipe_t : public ypipe_base_t bool rc = check_read (); zmq_assert (rc); - return (*fn_) (queue.front ()); + return (*fn_) (_queue.front ()); } protected: @@ -183,24 +183,24 @@ template class ypipe_t : public ypipe_base_t // Front of the queue points to the first prefetched item, back of // the pipe points to last un-flushed item. Front is used only by // reader thread, while back is used only by writer thread. - yqueue_t queue; + yqueue_t _queue; // Points to the first un-flushed item. This variable is used // exclusively by writer thread. - T *w; + T *_w; // Points to the first un-prefetched item. This variable is used // exclusively by reader thread. - T *r; + T *_r; // Points to the first item to be flushed in the future. - T *f; + T *_f; // The single point of contention between writer and reader thread. // Points past the last flushed item. If it is NULL, // reader is asleep. This pointer should be always accessed using // atomic operations. - atomic_ptr_t c; + atomic_ptr_t _c; // Disable copying of ypipe object. ypipe_t (const ypipe_t &); diff --git a/src/yqueue.hpp b/src/yqueue.hpp index 82c76fe2..9e8b3ec0 100644 --- a/src/yqueue.hpp +++ b/src/yqueue.hpp @@ -65,60 +65,60 @@ template class yqueue_t // Create the queue. inline yqueue_t () { - begin_chunk = allocate_chunk (); - alloc_assert (begin_chunk); - begin_pos = 0; - back_chunk = NULL; - back_pos = 0; - end_chunk = begin_chunk; - end_pos = 0; + _begin_chunk = allocate_chunk (); + alloc_assert (_begin_chunk); + _begin_pos = 0; + _back_chunk = NULL; + _back_pos = 0; + _end_chunk = _begin_chunk; + _end_pos = 0; } // Destroy the queue. inline ~yqueue_t () { while (true) { - if (begin_chunk == end_chunk) { - free (begin_chunk); + if (_begin_chunk == _end_chunk) { + free (_begin_chunk); break; } - chunk_t *o = begin_chunk; - begin_chunk = begin_chunk->next; + chunk_t *o = _begin_chunk; + _begin_chunk = _begin_chunk->next; free (o); } - chunk_t *sc = spare_chunk.xchg (NULL); + chunk_t *sc = _spare_chunk.xchg (NULL); free (sc); } // Returns reference to the front element of the queue. // If the queue is empty, behaviour is undefined. - inline T &front () { return begin_chunk->values[begin_pos]; } + inline T &front () { return _begin_chunk->values[_begin_pos]; } // Returns reference to the back element of the queue. // If the queue is empty, behaviour is undefined. - inline T &back () { return back_chunk->values[back_pos]; } + inline T &back () { return _back_chunk->values[_back_pos]; } // Adds an element to the back end of the queue. inline void push () { - back_chunk = end_chunk; - back_pos = end_pos; + _back_chunk = _end_chunk; + _back_pos = _end_pos; - if (++end_pos != N) + if (++_end_pos != N) return; - chunk_t *sc = spare_chunk.xchg (NULL); + chunk_t *sc = _spare_chunk.xchg (NULL); if (sc) { - end_chunk->next = sc; - sc->prev = end_chunk; + _end_chunk->next = sc; + sc->prev = _end_chunk; } else { - end_chunk->next = allocate_chunk (); - alloc_assert (end_chunk->next); - end_chunk->next->prev = end_chunk; + _end_chunk->next = allocate_chunk (); + alloc_assert (_end_chunk->next); + _end_chunk->next->prev = _end_chunk; } - end_chunk = end_chunk->next; - end_pos = 0; + _end_chunk = _end_chunk->next; + _end_pos = 0; } // Removes element from the back end of the queue. In other words @@ -131,40 +131,40 @@ template class yqueue_t inline void unpush () { // First, move 'back' one position backwards. - if (back_pos) - --back_pos; + if (_back_pos) + --_back_pos; else { - back_pos = N - 1; - back_chunk = back_chunk->prev; + _back_pos = N - 1; + _back_chunk = _back_chunk->prev; } // Now, move 'end' position backwards. Note that obsolete end chunk // is not used as a spare chunk. The analysis shows that doing so // would require free and atomic operation per chunk deallocated // instead of a simple free. - if (end_pos) - --end_pos; + if (_end_pos) + --_end_pos; else { - end_pos = N - 1; - end_chunk = end_chunk->prev; - free (end_chunk->next); - end_chunk->next = NULL; + _end_pos = N - 1; + _end_chunk = _end_chunk->prev; + free (_end_chunk->next); + _end_chunk->next = NULL; } } // Removes an element from the front end of the queue. inline void pop () { - if (++begin_pos == N) { - chunk_t *o = begin_chunk; - begin_chunk = begin_chunk->next; - begin_chunk->prev = NULL; - begin_pos = 0; + if (++_begin_pos == N) { + chunk_t *o = _begin_chunk; + _begin_chunk = _begin_chunk->next; + _begin_chunk->prev = NULL; + _begin_pos = 0; - // 'o' has been more recently used than spare_chunk, + // 'o' has been more recently used than _spare_chunk, // so for cache reasons we'll get rid of the spare and // use 'o' as the spare. - chunk_t *cs = spare_chunk.xchg (o); + chunk_t *cs = _spare_chunk.xchg (o); free (cs); } } @@ -194,17 +194,17 @@ template class yqueue_t // while begin & end positions are always valid. Begin position is // accessed exclusively be queue reader (front/pop), while back and // end positions are accessed exclusively by queue writer (back/push). - chunk_t *begin_chunk; - int begin_pos; - chunk_t *back_chunk; - int back_pos; - chunk_t *end_chunk; - int end_pos; + chunk_t *_begin_chunk; + int _begin_pos; + chunk_t *_back_chunk; + int _back_pos; + chunk_t *_end_chunk; + int _end_pos; // People are likely to produce and consume at similar rates. In // this scenario holding onto the most recently freed chunk saves // us from having to call malloc/free. - atomic_ptr_t spare_chunk; + atomic_ptr_t _spare_chunk; // Disable copying of yqueue. yqueue_t (const yqueue_t &); diff --git a/src/zap_client.cpp b/src/zap_client.cpp index d04e42c6..81fdf500 100644 --- a/src/zap_client.cpp +++ b/src/zap_client.cpp @@ -255,7 +255,7 @@ zap_client_common_handshake_t::zap_client_common_handshake_t ( mechanism_base_t (session_, options_), zap_client_t (session_, peer_address_, options_), state (waiting_for_hello), - zap_reply_ok_state (zap_reply_ok_state_) + _zap_reply_ok_state (zap_reply_ok_state_) { } @@ -283,7 +283,7 @@ void zap_client_common_handshake_t::handle_zap_status_code () // i.e. 200, 300, 400 or 500 switch (status_code[0]) { case '2': - state = zap_reply_ok_state; + state = _zap_reply_ok_state; break; case '3': // a 300 error code (temporary failure) diff --git a/src/zap_client.hpp b/src/zap_client.hpp index 4e7b2ce2..b51ca4be 100644 --- a/src/zap_client.hpp +++ b/src/zap_client.hpp @@ -94,7 +94,7 @@ class zap_client_common_handshake_t : public zap_client_t state_t state; private: - const state_t zap_reply_ok_state; + const state_t _zap_reply_ok_state; }; }