[DEV] add v1.66.0

This commit is contained in:
2018-01-12 21:47:58 +01:00
parent 87059bb1af
commit a97e9ae7d4
49032 changed files with 7668950 additions and 0 deletions

26
libs/atomic/README.md Normal file
View File

@@ -0,0 +1,26 @@
# ![Boost.Atomic](doc/logo.png)
Boost.Atomic, part of collection of the [Boost C++ Libraries](http://github.com/boostorg), implements atomic operations for various CPU architectures, reflecting and extending the standard interface defined in C++11.
### Directories
* **build** - Boost.Atomic build scripts
* **doc** - QuickBook documentation sources
* **include** - Interface headers of Boost.Atomic
* **src** - Compilable source code of Boost.Atomic
* **test** - Boost.Atomic unit tests
### More information
* [Documentation](http://boost.org/libs/atomic)
* [Report bugs](https://svn.boost.org/trac/boost/newticket?component=atomic;version=Boost%20Release%20Branch). Be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well.
* Submit your patches as pull requests against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).
### Build status
Master: [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/master?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/master) [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=master)](https://travis-ci.org/boostorg/atomic)
Develop: [![AppVeyor](https://ci.appveyor.com/api/projects/status/c64xu59bydnmb7kt/branch/develop?svg=true)](https://ci.appveyor.com/project/Lastique/atomic/branch/develop) [![Travis CI](https://travis-ci.org/boostorg/atomic.svg?branch=develop)](https://travis-ci.org/boostorg/atomic)
### License
Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt).

View File

@@ -0,0 +1,37 @@
# Boost.Atomic Library Jamfile
#
# Copyright Helge Bahmann 2011.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import common ;
project boost/atomic
: requirements
<threading>multi
<link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
<link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
<define>BOOST_ATOMIC_SOURCE
<target-os>windows:<define>BOOST_USE_WINDOWS_H
<target-os>windows:<define>_WIN32_WINNT=0x0500
<toolset>gcc,<target-os>windows:<linkflags>"-lkernel32"
: usage-requirements
<link>shared:<define>BOOST_ATOMIC_DYN_LINK=1
<link>static:<define>BOOST_ATOMIC_STATIC_LINK=1
: source-location ../src
;
alias atomic_sources
: lockpool.cpp
;
explicit atomic_sources ;
lib boost_atomic
: atomic_sources
;
boost-install boost_atomic ;

View File

@@ -0,0 +1,36 @@
# Boost.Atomic library documentation Jamfile
#
# Copyright Helge Bahmann 2011.
# Copyright Tim Blechmann 2012.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import quickbook ;
import boostbook : boostbook ;
xml atomic : atomic.qbk ;
boostbook standalone
: atomic
: <xsl:param>boost.root=../../../..
<xsl:param>boost.libraries=../../../libraries.htm
<format>pdf:<xsl:param>boost.url.prefix=http://www.boost.org/doc/libs/release/libs/atomic/doc/html
;
install css : [ glob $(BOOST_ROOT)/doc/src/*.css ]
: <location>html ;
install images : [ glob $(BOOST_ROOT)/doc/src/images/*.png ]
: <location>html/images ;
explicit css ;
explicit images ;
###############################################################################
alias boostdoc
: atomic
:
:
: ;
explicit boostdoc ;
alias boostrelease ;
explicit boostrelease ;

1071
libs/atomic/doc/atomic.qbk Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,398 @@
[/
/ Copyright (c) 2009 Helge Bahmann
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]
[section:example_reference_counters Reference counting]
The purpose of a ['reference counter] is to count the number
of pointers to an object. The object can be destroyed as
soon as the reference counter reaches zero.
[section Implementation]
[c++]
#include <boost/intrusive_ptr.hpp>
#include <boost/atomic.hpp>
class X {
public:
typedef boost::intrusive_ptr<X> pointer;
X() : refcount_(0) {}
private:
mutable boost::atomic<int> refcount_;
friend void intrusive_ptr_add_ref(const X * x)
{
x->refcount_.fetch_add(1, boost::memory_order_relaxed);
}
friend void intrusive_ptr_release(const X * x)
{
if (x->refcount_.fetch_sub(1, boost::memory_order_release) == 1) {
boost::atomic_thread_fence(boost::memory_order_acquire);
delete x;
}
}
};
[endsect]
[section Usage]
[c++]
X::pointer x = new X;
[endsect]
[section Discussion]
Increasing the reference counter can always be done with
[^memory_order_relaxed]: New references to an object can only
be formed from an existing reference, and passing an existing
reference from one thread to another must already provide any
required synchronization.
It is important to enforce any possible access to the object in
one thread (through an existing reference) to ['happen before]
deleting the object in a different thread. This is achieved
by a "release" operation after dropping a reference (any
access to the object through this reference must obviously
happened before), and an "acquire" operation before
deleting the object.
It would be possible to use [^memory_order_acq_rel] for the
[^fetch_sub] operation, but this results in unneeded "acquire"
operations when the reference counter does not yet reach zero
and may impose a performance penalty.
[endsect]
[endsect]
[section:example_spinlock Spinlock]
The purpose of a ['spin lock] is to prevent multiple threads
from concurrently accessing a shared data structure. In contrast
to a mutex, threads will busy-wait and waste CPU cycles instead
of yielding the CPU to another thread. ['Do not use spinlocks
unless you are certain that you understand the consequences.]
[section Implementation]
[c++]
#include <boost/atomic.hpp>
class spinlock {
private:
typedef enum {Locked, Unlocked} LockState;
boost::atomic<LockState> state_;
public:
spinlock() : state_(Unlocked) {}
void lock()
{
while (state_.exchange(Locked, boost::memory_order_acquire) == Locked) {
/* busy-wait */
}
}
void unlock()
{
state_.store(Unlocked, boost::memory_order_release);
}
};
[endsect]
[section Usage]
[c++]
spinlock s;
s.lock();
// access data structure here
s.unlock();
[endsect]
[section Discussion]
The purpose of the spinlock is to make sure that one access
to the shared data structure always strictly "happens before"
another. The usage of acquire/release in lock/unlock is required
and sufficient to guarantee this ordering.
It would be correct to write the "lock" operation in the following
way:
[c++]
lock()
{
while (state_.exchange(Locked, boost::memory_order_relaxed) == Locked) {
/* busy-wait */
}
atomic_thread_fence(boost::memory_order_acquire);
}
This "optimization" is however a) useless and b) may in fact hurt:
a) Since the thread will be busily spinning on a blocked spinlock,
it does not matter if it will waste the CPU cycles with just
"exchange" operations or with both useless "exchange" and "acquire"
operations. b) A tight "exchange" loop without any
memory-synchronizing instruction introduced through an "acquire"
operation will on some systems monopolize the memory subsystem
and degrade the performance of other system components.
[endsect]
[endsect]
[section:singleton Singleton with double-checked locking pattern]
The purpose of the ['Singleton with double-checked locking pattern] is to ensure
that at most one instance of a particular object is created.
If one instance has been created already, access to the existing
object should be as light-weight as possible.
[section Implementation]
[c++]
#include <boost/atomic.hpp>
#include <boost/thread/mutex.hpp>
class X {
public:
static X * instance()
{
X * tmp = instance_.load(boost::memory_order_consume);
if (!tmp) {
boost::mutex::scoped_lock guard(instantiation_mutex);
tmp = instance_.load(boost::memory_order_consume);
if (!tmp) {
tmp = new X;
instance_.store(tmp, boost::memory_order_release);
}
}
return tmp;
}
private:
static boost::atomic<X *> instance_;
static boost::mutex instantiation_mutex;
};
boost::atomic<X *> X::instance_(0);
[endsect]
[section Usage]
[c++]
X * x = X::instance();
// dereference x
[endsect]
[section Discussion]
The mutex makes sure that only one instance of the object is
ever created. The [^instance] method must make sure that any
dereference of the object strictly "happens after" creating
the instance in another thread. The use of [^memory_order_release]
after creating and initializing the object and [^memory_order_consume]
before dereferencing the object provides this guarantee.
It would be permissible to use [^memory_order_acquire] instead of
[^memory_order_consume], but this provides a stronger guarantee
than is required since only operations depending on the value of
the pointer need to be ordered.
[endsect]
[endsect]
[section:example_ringbuffer Wait-free ring buffer]
A ['wait-free ring buffer] provides a mechanism for relaying objects
from one single "producer" thread to one single "consumer" thread without
any locks. The operations on this data structure are "wait-free" which
means that each operation finishes within a constant number of steps.
This makes this data structure suitable for use in hard real-time systems
or for communication with interrupt/signal handlers.
[section Implementation]
[c++]
#include <boost/atomic.hpp>
template<typename T, size_t Size>
class ringbuffer {
public:
ringbuffer() : head_(0), tail_(0) {}
bool push(const T & value)
{
size_t head = head_.load(boost::memory_order_relaxed);
size_t next_head = next(head);
if (next_head == tail_.load(boost::memory_order_acquire))
return false;
ring_[head] = value;
head_.store(next_head, boost::memory_order_release);
return true;
}
bool pop(T & value)
{
size_t tail = tail_.load(boost::memory_order_relaxed);
if (tail == head_.load(boost::memory_order_acquire))
return false;
value = ring_[tail];
tail_.store(next(tail), boost::memory_order_release);
return true;
}
private:
size_t next(size_t current)
{
return (current + 1) % Size;
}
T ring_[Size];
boost::atomic<size_t> head_, tail_;
};
[endsect]
[section Usage]
[c++]
ringbuffer<int, 32> r;
// try to insert an element
if (r.push(42)) { /* succeeded */ }
else { /* buffer full */ }
// try to retrieve an element
int value;
if (r.pop(value)) { /* succeeded */ }
else { /* buffer empty */ }
[endsect]
[section Discussion]
The implementation makes sure that the ring indices do
not "lap-around" each other to ensure that no elements
are either lost or read twice.
Furthermore it must guarantee that read-access to a
particular object in [^pop] "happens after" it has been
written in [^push]. This is achieved by writing [^head_ ]
with "release" and reading it with "acquire". Conversely
the implementation also ensures that read access to
a particular ring element "happens before" before
rewriting this element with a new value by accessing [^tail_]
with appropriate ordering constraints.
[endsect]
[endsect]
[section:mp_queue Wait-free multi-producer queue]
The purpose of the ['wait-free multi-producer queue] is to allow
an arbitrary number of producers to enqueue objects which are
retrieved and processed in FIFO order by a single consumer.
[section Implementation]
[c++]
template<typename T>
class waitfree_queue {
public:
struct node {
T data;
node * next;
};
void push(const T &data)
{
node * n = new node;
n->data = data;
node * stale_head = head_.load(boost::memory_order_relaxed);
do {
n->next = stale_head;
} while (!head_.compare_exchange_weak(stale_head, n, boost::memory_order_release));
}
node * pop_all(void)
{
T * last = pop_all_reverse(), * first = 0;
while(last) {
T * tmp = last;
last = last->next;
tmp->next = first;
first = tmp;
}
return first;
}
waitfree_queue() : head_(0) {}
// alternative interface if ordering is of no importance
node * pop_all_reverse(void)
{
return head_.exchange(0, boost::memory_order_consume);
}
private:
boost::atomic<node *> head_;
};
[endsect]
[section Usage]
[c++]
waitfree_queue<int> q;
// insert elements
q.push(42);
q.push(2);
// pop elements
waitfree_queue<int>::node * x = q.pop_all()
while(x) {
X * tmp = x;
x = x->next;
// process tmp->data, probably delete it afterwards
delete tmp;
}
[endsect]
[section Discussion]
The implementation guarantees that all objects enqueued are
processed in the order they were enqueued by building a singly-linked
list of object in reverse processing order. The queue is atomically
emptied by the consumer and brought into correct order.
It must be guaranteed that any access to an object to be enqueued
by the producer "happens before" any access by the consumer. This
is assured by inserting objects into the list with ['release] and
dequeuing them with ['consume] memory order. It is not
necessary to use ['acquire] memory order in [^waitfree_queue::pop_all]
because all operations involved depend on the value of
the atomic pointer through dereference
[endsect]
[endsect]

BIN
libs/atomic/doc/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

1053
libs/atomic/doc/logo.svg Normal file

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 38 KiB

View File

@@ -0,0 +1,312 @@
[/
/ Copyright (c) 2009 Helge Bahmann
/
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
/]
[section:template_organization Organization of class template layers]
The implementation uses multiple layers of template classes that
inherit from the next lower level each and refine or adapt the respective
underlying class:
* [^boost::atomic<T>] is the topmost-level, providing
the external interface. Implementation-wise, it does not add anything
(except for hiding copy constructor and assignment operator).
* [^boost::detail::atomic::internal_atomic&<T,S=sizeof(T),I=is_integral_type<T> >]:
This layer is mainly responsible for providing the overloaded operators
mapping to API member functions (e.g. [^+=] to [^fetch_add]).
The defaulted template parameter [^I] allows
to expose the correct API functions (via partial template
specialization): For non-integral types, it only
publishes the various [^exchange] functions
as well as load and store, for integral types it
additionally exports arithmetic and logic operations.
[br]
Depending on whether the given type is integral, it
inherits from either [^boost::detail::atomic::platform_atomic<T,S=sizeof(T)>]
or [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>].
There is however some special-casing: for non-integral types
of size 1, 2, 4 or 8, it will coerce the datatype into an integer representation
and delegate to [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]
-- the rationale is that platform implementors only need to provide
integer-type operations.
* [^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]
must provide the full set of operations for an integral type T
(i.e. [^load], [^store], [^exchange],
[^compare_exchange_weak], [^compare_exchange_strong],
[^fetch_add], [^fetch_sub], [^fetch_and],
[^fetch_or], [^fetch_xor], [^is_lock_free]).
The default implementation uses locking to emulate atomic operations, so
this is the level at which implementors should provide template specializations
to add support for platform-specific atomic operations.
[br]
The two separate template parameters allow separate specialization
on size and type (which, with fixed size, cannot
specify more than signedness/unsignedness). The rationale is that
most platform-specific atomic operations usually depend only on the
operand size, so that common implementations for signed/unsigned
types are possible. Signedness allows to properly to choose sign-extending
instructions for the [^load] operation, avoiding later
conversion. The expectation is that in most implementations this will
be a normal assignment in C, possibly accompanied by memory
fences, so that the compiler can automatically choose the correct
instruction.
* At the lowest level, [^boost::detail::atomic::platform_atomic<T,S=sizeof(T)>]
provides the most basic atomic operations ([^load], [^store],
[^exchange], [^compare_exchange_weak],
[^compare_exchange_strong]) for arbitrarily generic data types.
The default implementation uses locking as a fallback mechanism.
Implementors generally do not have to specialize at this level
(since these will not be used for the common integral type sizes
of 1, 2, 4 and 8 bytes), but if s/he can if s/he so wishes to
provide truly atomic operations for "odd" data type sizes.
Some amount of care must be taken as the "raw" data type
passed in from the user through [^boost::atomic<T>]
is visible here -- it thus needs to be type-punned or otherwise
manipulated byte-by-byte to avoid using overloaded assignment,
comparison operators and copy constructors.
[endsect]
[section:platform_atomic_implementation Implementing platform-specific atomic operations]
In principle implementors are responsible for providing the
full range of named member functions of an atomic object
(i.e. [^load], [^store], [^exchange],
[^compare_exchange_weak], [^compare_exchange_strong],
[^fetch_add], [^fetch_sub], [^fetch_and],
[^fetch_or], [^fetch_xor], [^is_lock_free]).
These must be implemented as partial template specializations for
[^boost::detail::atomic::platform_atomic_integral<T,S=sizeof(T)>]:
[c++]
template<typename T>
class platform_atomic_integral<T, 4>
{
public:
explicit platform_atomic_integral(T v) : i(v) {}
platform_atomic_integral(void) {}
T load(memory_order order=memory_order_seq_cst) const volatile
{
// platform-specific code
}
void store(T v, memory_order order=memory_order_seq_cst) volatile
{
// platform-specific code
}
private:
volatile T i;
};
As noted above, it will usually suffice to specialize on the second
template argument, indicating the size of the data type in bytes.
[section:automatic_buildup Templates for automatic build-up]
Often only a portion of the required operations can be
usefully mapped to machine instructions. Several helper template
classes are provided that can automatically synthesize missing methods to
complete an implementation.
At the minimum, an implementor must provide the
[^load], [^store],
[^compare_exchange_weak] and
[^is_lock_free] methods:
[c++]
template<typename T>
class my_atomic_32 {
public:
my_atomic_32() {}
my_atomic_32(T initial_value) : value(initial_value) {}
T load(memory_order order=memory_order_seq_cst) volatile const
{
// platform-specific code
}
void store(T new_value, memory_order order=memory_order_seq_cst) volatile
{
// platform-specific code
}
bool compare_exchange_weak(T &expected, T desired,
memory_order success_order,
memory_order_failure_order) volatile
{
// platform-specific code
}
bool is_lock_free() const volatile {return true;}
protected:
// typedef is required for classes inheriting from this
typedef T integral_type;
private:
T value;
};
The template [^boost::detail::atomic::build_atomic_from_minimal]
can then take care of the rest:
[c++]
template<typename T>
class platform_atomic_integral<T, 4>
: public boost::detail::atomic::build_atomic_from_minimal<my_atomic_32<T> >
{
public:
typedef build_atomic_from_minimal<my_atomic_32<T> > super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
There are several helper classes to assist in building "complete"
atomic implementations from different starting points:
* [^build_atomic_from_minimal] requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^build_atomic_from_exchange] requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
* [^build_atomic_from_add] requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
* [^fetch_add]
* [^build_atomic_from_typical] (<I>supported on gcc only</I>) requires
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
* [^fetch_add_var] (protected method)
* [^fetch_inc] (protected method)
* [^fetch_dec] (protected method)
This will generate a [^fetch_add] method
that calls [^fetch_inc]/[^fetch_dec]
when the given parameter is a compile-time constant
equal to +1 or -1 respectively, and [^fetch_add_var]
in all other cases. This provides a mechanism for
optimizing the extremely common case of an atomic
variable being used as a counter.
The prototypes for these methods to be implemented is:
[c++]
template<typename T>
class my_atomic {
public:
T fetch_inc(memory_order order) volatile;
T fetch_dec(memory_order order) volatile;
T fetch_add_var(T counter, memory_order order) volatile;
};
These helper templates are defined in [^boost/atomic/detail/builder.hpp].
[endsect]
[section:automatic_buildup_small Build sub-word-sized atomic data types]
There is one other helper template that can build sub-word-sized
atomic data types even though the underlying architecture allows
only word-sized atomic operations:
[c++]
template<typename T>
class platform_atomic_integral<T, 1> :
public build_atomic_from_larger_type<my_atomic_32<uint32_t>, T>
{
public:
typedef build_atomic_from_larger_type<my_atomic_32<uint32_t>, T> super;
explicit platform_atomic_integral(T v) : super(v) {}
platform_atomic_integral(void) {}
};
The above would create an atomic data type of 1 byte size, and
use masking and shifts to map it to 32-bit atomic operations.
The base type must implement [^load], [^store]
and [^compare_exchange_weak] for this to work.
[endsect]
[section:other_sizes Atomic data types for unusual object sizes]
In unusual circumstances, an implementor may also opt to specialize
[^public boost::detail::atomic::platform_atomic<T,S=sizeof(T)>]
to provide support for atomic objects not fitting an integral size.
If you do that, keep the following things in mind:
* There is no reason to ever do this for object sizes
of 1, 2, 4 and 8
* Only the following methods need to be implemented:
* [^load]
* [^store]
* [^compare_exchange_weak] (4-operand version)
* [^compare_exchange_strong] (4-operand version)
* [^exchange]
The type of the data to be stored in the atomic
variable (template parameter [^T])
is exposed to this class, and the type may have
overloaded assignment and comparison operators --
using these overloaded operators however will result
in an error. The implementor is responsible for
accessing the objects in a way that does not
invoke either of these operators (using e.g.
[^memcpy] or type-casts).
[endsect]
[endsect]
[section:platform_atomic_fences Fences]
Platform implementors need to provide a function performing
the action required for [funcref boost::atomic_thread_fence atomic_thread_fence]
(the fallback implementation will just perform an atomic operation
on an integer object). This is achieved by specializing the
[^boost::detail::atomic::platform_atomic_thread_fence] template
function in the following way:
[c++]
template<>
void platform_atomic_thread_fence(memory_order order)
{
// platform-specific code here
}
[endsect]
[section:platform_atomic_puttogether Putting it altogether]
The template specializations should be put into a header file
in the [^boost/atomic/detail] directory, preferably
specifying supported compiler and architecture in its name.
The file [^boost/atomic/detail/platform.hpp] must
subsequently be modified to conditionally include the new
header.
[endsect]

13
libs/atomic/index.html Normal file
View File

@@ -0,0 +1,13 @@
<html>
<head>
<meta http-equiv="refresh" content="0; URL=../../doc/html/atomic.html">
</head>
<body>
Automatic redirection failed, please go to
<a href="../../doc/html/atomic.html">../../doc/html/atomic.html</a> &nbsp;<hr>
<p>&copy; Copyright Beman Dawes, 2001</p>
<p>Distributed under the Boost Software License, Version 1.0. (See accompanying
file <a href="../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or copy
at <a href="http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</p>
</body>
</html>

View File

@@ -0,0 +1,18 @@
{
"key": "atomic",
"name": "Atomic",
"authors": [
"Helge Bahmann",
"Tim Blechmann",
"Andrey Semashev"
],
"description": "C++11-style atomic<>.",
"category": [
"Concurrent"
],
"maintainers": [
"Helge Bahmann <hcb -at- chaoticmind.net>",
"Tim Blechmann <tim -at- klingt.org>",
"Andrey Semashev <andrey.semashev -at- gmail.com>"
]
}

View File

@@ -0,0 +1,161 @@
/*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* Copyright (c) 2011 Helge Bahmann
* Copyright (c) 2013-2014 Andrey Semashev
*/
/*!
* \file lockpool.cpp
*
* This file contains implementation of the lockpool used to emulate atomic ops.
*/
#include <cstddef>
#include <boost/config.hpp>
#include <boost/assert.hpp>
#include <boost/memory_order.hpp>
#include <boost/atomic/capabilities.hpp>
#if BOOST_ATOMIC_FLAG_LOCK_FREE == 2
#include <boost/atomic/detail/operations_lockfree.hpp>
#elif !defined(BOOST_HAS_PTHREADS)
#error Boost.Atomic: Unsupported target platform, POSIX threads are required when native atomic operations are not available
#else
#include <pthread.h>
#define BOOST_ATOMIC_USE_PTHREAD
#endif
#include <boost/atomic/detail/lockpool.hpp>
#include <boost/atomic/detail/pause.hpp>
#if defined(BOOST_MSVC)
#pragma warning(push)
// 'struct_name' : structure was padded due to __declspec(align())
#pragma warning(disable: 4324)
#endif
namespace boost {
namespace atomics {
namespace detail {
namespace {
// This seems to be the maximum across all modern CPUs
// NOTE: This constant is made as a macro because some compilers (gcc 4.4 for one) don't allow enums or namespace scope constants in alignment attributes
#define BOOST_ATOMIC_CACHE_LINE_SIZE 64
#if defined(BOOST_ATOMIC_USE_PTHREAD)
typedef pthread_mutex_t lock_type;
#else
typedef atomics::detail::operations< 1u, false > lock_operations;
typedef lock_operations::storage_type lock_type;
#endif
enum
{
padding_size = (sizeof(lock_type) <= BOOST_ATOMIC_CACHE_LINE_SIZE ?
(BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type)) :
(BOOST_ATOMIC_CACHE_LINE_SIZE - sizeof(lock_type) % BOOST_ATOMIC_CACHE_LINE_SIZE))
};
template< unsigned int PaddingSize >
struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock
{
lock_type lock;
// The additional padding is needed to avoid false sharing between locks
char padding[PaddingSize];
};
template< >
struct BOOST_ALIGNMENT(BOOST_ATOMIC_CACHE_LINE_SIZE) padded_lock< 0u >
{
lock_type lock;
};
typedef padded_lock< padding_size > padded_lock_t;
static padded_lock_t g_lock_pool[41]
#if defined(BOOST_ATOMIC_USE_PTHREAD)
=
{
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER }, { PTHREAD_MUTEX_INITIALIZER },
{ PTHREAD_MUTEX_INITIALIZER }
}
#endif
;
} // namespace
#if !defined(BOOST_ATOMIC_USE_PTHREAD)
// NOTE: This function must NOT be inline. Otherwise MSVC 9 will sometimes generate broken code for modulus operation which result in crashes.
BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
{
while (lock_operations::test_and_set(*static_cast< lock_type* >(m_lock), memory_order_acquire))
{
do
{
atomics::detail::pause();
}
while (!!lock_operations::load(*static_cast< lock_type* >(m_lock), memory_order_relaxed));
}
}
BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
{
lock_operations::clear(*static_cast< lock_type* >(m_lock), memory_order_release);
}
BOOST_ATOMIC_DECL void signal_fence() BOOST_NOEXCEPT;
#else // !defined(BOOST_ATOMIC_USE_PTHREAD)
BOOST_ATOMIC_DECL lockpool::scoped_lock::scoped_lock(const volatile void* addr) BOOST_NOEXCEPT :
m_lock(&g_lock_pool[reinterpret_cast< std::size_t >(addr) % (sizeof(g_lock_pool) / sizeof(*g_lock_pool))].lock)
{
BOOST_VERIFY(pthread_mutex_lock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
}
BOOST_ATOMIC_DECL lockpool::scoped_lock::~scoped_lock() BOOST_NOEXCEPT
{
BOOST_VERIFY(pthread_mutex_unlock(static_cast< pthread_mutex_t* >(m_lock)) == 0);
}
#endif // !defined(BOOST_ATOMIC_USE_PTHREAD)
BOOST_ATOMIC_DECL void lockpool::thread_fence() BOOST_NOEXCEPT
{
#if BOOST_ATOMIC_THREAD_FENCE > 0
atomics::detail::thread_fence(memory_order_seq_cst);
#else
// Emulate full fence by locking/unlocking a mutex
scoped_lock lock(0);
#endif
}
BOOST_ATOMIC_DECL void lockpool::signal_fence() BOOST_NOEXCEPT
{
// This function is intentionally non-inline, even if empty. This forces the compiler to treat its call as a compiler barrier.
#if BOOST_ATOMIC_SIGNAL_FENCE > 0
atomics::detail::signal_fence(memory_order_seq_cst);
#endif
}
} // namespace detail
} // namespace atomics
} // namespace boost
#if defined(BOOST_MSVC)
#pragma warning(pop)
#endif

View File

@@ -0,0 +1,31 @@
# Boost.Atomic Library test Jamfile
#
# Copyright (c) 2011 Helge Bahmann
# Copyright (c) 2012 Tim Blechmann
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import testing ;
project boost/atomic/test
: requirements
<threading>multi
<library>/boost/thread//boost_thread
<library>/boost/atomic//boost_atomic
<target-os>windows:<define>BOOST_USE_WINDOWS_H
<target-os>windows:<define>_WIN32_WINNT=0x0500
<toolset>gcc,<target-os>windows:<linkflags>"-lkernel32"
;
test-suite atomic
: [ run native_api.cpp ]
[ run fallback_api.cpp ]
[ run atomicity.cpp ]
[ run ordering.cpp ]
[ run lockfree.cpp ]
[ compile-fail cf_arith_void_ptr.cpp ]
[ compile-fail cf_arith_func_ptr.cpp ]
[ compile-fail cf_arith_mem_ptr.cpp ]
;

View File

@@ -0,0 +1,845 @@
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2017 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_ATOMIC_API_TEST_HELPERS_HPP
#define BOOST_ATOMIC_API_TEST_HELPERS_HPP
#include <boost/atomic.hpp>
#include <cstddef>
#include <cstring>
#include <limits>
#include <iostream>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/type_traits/integral_constant.hpp>
#include <boost/type_traits/is_signed.hpp>
#include <boost/type_traits/is_unsigned.hpp>
struct test_stream_type
{
typedef std::ios_base& (*ios_base_manip)(std::ios_base&);
typedef std::basic_ios< char, std::char_traits< char > >& (*basic_ios_manip)(std::basic_ios< char, std::char_traits< char > >&);
typedef std::ostream& (*stream_manip)(std::ostream&);
template< typename T >
test_stream_type const& operator<< (T const& value) const
{
std::cerr << value;
return *this;
}
test_stream_type const& operator<< (ios_base_manip manip) const
{
std::cerr << manip;
return *this;
}
test_stream_type const& operator<< (basic_ios_manip manip) const
{
std::cerr << manip;
return *this;
}
test_stream_type const& operator<< (stream_manip manip) const
{
std::cerr << manip;
return *this;
}
// Make sure characters are printed as numbers if tests fail
test_stream_type const& operator<< (char value) const
{
std::cerr << static_cast< int >(value);
return *this;
}
test_stream_type const& operator<< (signed char value) const
{
std::cerr << static_cast< int >(value);
return *this;
}
test_stream_type const& operator<< (unsigned char value) const
{
std::cerr << static_cast< unsigned int >(value);
return *this;
}
test_stream_type const& operator<< (short value) const
{
std::cerr << static_cast< int >(value);
return *this;
}
test_stream_type const& operator<< (unsigned short value) const
{
std::cerr << static_cast< unsigned int >(value);
return *this;
}
#if defined(BOOST_HAS_INT128)
// Some GCC versions don't provide output operators for __int128
test_stream_type const& operator<< (boost::int128_type const& v) const
{
std::cerr << static_cast< long long >(v);
return *this;
}
test_stream_type const& operator<< (boost::uint128_type const& v) const
{
std::cerr << static_cast< unsigned long long >(v);
return *this;
}
#endif // defined(BOOST_HAS_INT128)
};
const test_stream_type test_stream = {};
#define BOOST_LIGHTWEIGHT_TEST_OSTREAM test_stream
#include <boost/core/lightweight_test.hpp>
/* provide helpers that exercise whether the API
functions of "boost::atomic" provide the correct
operational semantic in the case of sequential
execution */
static void
test_flag_api(void)
{
#ifndef BOOST_ATOMIC_NO_ATOMIC_FLAG_INIT
boost::atomic_flag f = BOOST_ATOMIC_FLAG_INIT;
#else
boost::atomic_flag f;
#endif
BOOST_TEST( !f.test_and_set() );
BOOST_TEST( f.test_and_set() );
f.clear();
BOOST_TEST( !f.test_and_set() );
}
template<typename T>
void test_base_operators(T value1, T value2, T value3)
{
/* explicit load/store */
{
boost::atomic<T> a(value1);
BOOST_TEST_EQ( a.load(), value1 );
}
{
boost::atomic<T> a(value1);
a.store(value2);
BOOST_TEST_EQ( a.load(), value2 );
}
/* overloaded assignment/conversion */
{
boost::atomic<T> a(value1);
BOOST_TEST( value1 == a );
}
{
boost::atomic<T> a;
a = value2;
BOOST_TEST( value2 == a );
}
/* exchange-type operators */
{
boost::atomic<T> a(value1);
T n = a.exchange(value2);
BOOST_TEST_EQ( a.load(), value2 );
BOOST_TEST_EQ( n, value1 );
}
{
boost::atomic<T> a(value1);
T expected = value1;
bool success = a.compare_exchange_strong(expected, value3);
BOOST_TEST( success );
BOOST_TEST_EQ( a.load(), value3 );
BOOST_TEST_EQ( expected, value1 );
}
{
boost::atomic<T> a(value1);
T expected = value2;
bool success = a.compare_exchange_strong(expected, value3);
BOOST_TEST( !success );
BOOST_TEST_EQ( a.load(), value1 );
BOOST_TEST_EQ( expected, value1 );
}
{
boost::atomic<T> a(value1);
T expected;
bool success;
do {
expected = value1;
success = a.compare_exchange_weak(expected, value3);
} while(!success);
BOOST_TEST( success );
BOOST_TEST_EQ( a.load(), value3 );
BOOST_TEST_EQ( expected, value1 );
}
{
boost::atomic<T> a(value1);
T expected;
bool success;
do {
expected = value2;
success = a.compare_exchange_weak(expected, value3);
if (expected != value2)
break;
} while(!success);
BOOST_TEST( !success );
BOOST_TEST_EQ( a.load(), value1 );
BOOST_TEST_EQ( expected, value1 );
}
}
// T requires an int constructor
template <typename T>
void test_constexpr_ctor()
{
#ifndef BOOST_NO_CXX11_CONSTEXPR
const T value(0);
const boost::atomic<T> tester(value);
BOOST_TEST( tester == value );
#endif
}
//! The type traits provides max and min values of type D that can be added/subtracted to T(0) without signed overflow
template< typename T, typename D, bool IsSigned = boost::is_signed< D >::value >
struct distance_limits
{
static D min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return (std::numeric_limits< D >::min)();
}
static D max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return (std::numeric_limits< D >::max)();
}
};
#if defined(BOOST_MSVC)
#pragma warning(push)
// 'static_cast': truncation of constant value. There is no actual truncation happening because
// the cast is only performed if the value fits in the range of the result.
#pragma warning(disable: 4309)
#endif
template< typename T, typename D >
struct distance_limits< T*, D, true >
{
static D min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
const std::ptrdiff_t ptrdiff = (std::numeric_limits< std::ptrdiff_t >::min)() / static_cast< std::ptrdiff_t >(sizeof(T));
const D diff = (std::numeric_limits< D >::min)();
// Both values are negative. Return the closest value to zero.
return diff < ptrdiff ? static_cast< D >(ptrdiff) : diff;
}
static D max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
const std::ptrdiff_t ptrdiff = (std::numeric_limits< std::ptrdiff_t >::max)() / static_cast< std::ptrdiff_t >(sizeof(T));
const D diff = (std::numeric_limits< D >::max)();
// Both values are positive. Return the closest value to zero.
return diff > ptrdiff ? static_cast< D >(ptrdiff) : diff;
}
};
template< typename T, typename D >
struct distance_limits< T*, D, false >
{
static D min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return (std::numeric_limits< D >::min)();
}
static D max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
const std::size_t ptrdiff = static_cast< std::size_t >((std::numeric_limits< std::ptrdiff_t >::max)()) / sizeof(T);
const D diff = (std::numeric_limits< D >::max)();
return diff > ptrdiff ? static_cast< D >(ptrdiff) : diff;
}
};
#if defined(BOOST_HAS_INT128)
// At least libstdc++ does not specialize std::numeric_limits for __int128 in strict mode (i.e. with GNU extensions disabled).
// So we have to specialize the limits ourself. We assume two's complement signed representation.
template< typename T, bool IsSigned >
struct distance_limits< T, boost::int128_type, IsSigned >
{
static boost::int128_type min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return -(max)() - 1;
}
static boost::int128_type max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return static_cast< boost::int128_type >((~static_cast< boost::uint128_type >(0u)) >> 1);
}
};
template< typename T, bool IsSigned >
struct distance_limits< T, boost::uint128_type, IsSigned >
{
static boost::uint128_type min BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return 0u;
}
static boost::uint128_type max BOOST_PREVENT_MACRO_SUBSTITUTION () BOOST_NOEXCEPT
{
return ~static_cast< boost::uint128_type >(0u);
}
};
#endif // defined(BOOST_HAS_INT128)
#if defined(BOOST_MSVC)
#pragma warning(pop)
#endif
template<typename T, typename D, typename AddType>
void test_additive_operators_with_type_and_test()
{
// Note: This set of tests is extracted to a separate function because otherwise MSVC-10 for x64 generates broken code
const T zero_value = 0;
const D zero_diff = 0;
const D one_diff = 1;
const AddType zero_add = 0;
{
boost::atomic<T> a(zero_value);
bool f = a.add_and_test(zero_diff);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), zero_value );
f = a.add_and_test(one_diff);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(zero_add + one_diff) );
}
{
boost::atomic<T> a(zero_value);
bool f = a.add_and_test((distance_limits< T, D >::max)());
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(zero_add + (distance_limits< T, D >::max)()) );
}
{
boost::atomic<T> a(zero_value);
bool f = a.add_and_test((distance_limits< T, D >::min)());
BOOST_TEST_EQ( f, ((distance_limits< T, D >::min)() == 0) );
BOOST_TEST_EQ( a.load(), T(zero_add + (distance_limits< T, D >::min)()) );
}
{
boost::atomic<T> a(zero_value);
bool f = a.sub_and_test(zero_diff);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), zero_value );
f = a.sub_and_test(one_diff);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(zero_add - one_diff) );
}
{
boost::atomic<T> a(zero_value);
bool f = a.sub_and_test((distance_limits< T, D >::max)());
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(zero_add - (distance_limits< T, D >::max)()) );
}
{
boost::atomic<T> a(zero_value);
bool f = a.sub_and_test((distance_limits< T, D >::min)());
BOOST_TEST_EQ( f, ((distance_limits< T, D >::min)() == 0) );
BOOST_TEST_EQ( a.load(), T(zero_add - (distance_limits< T, D >::min)()) );
}
}
template<typename T, typename D, typename AddType>
void test_additive_operators_with_type(T value, D delta)
{
/* note: the tests explicitly cast the result of any addition
to the type to be tested to force truncation of the result to
the correct range in case of overflow */
/* explicit add/sub */
{
boost::atomic<T> a(value);
T n = a.fetch_add(delta);
BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
BOOST_TEST_EQ( n, value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_sub(delta);
BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
BOOST_TEST_EQ( n, value );
}
/* overloaded modify/assign*/
{
boost::atomic<T> a(value);
T n = (a += delta);
BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
BOOST_TEST_EQ( n, T((AddType)value + delta) );
}
{
boost::atomic<T> a(value);
T n = (a -= delta);
BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
BOOST_TEST_EQ( n, T((AddType)value - delta) );
}
/* overloaded increment/decrement */
{
boost::atomic<T> a(value);
T n = a++;
BOOST_TEST_EQ( a.load(), T((AddType)value + 1) );
BOOST_TEST_EQ( n, value );
}
{
boost::atomic<T> a(value);
T n = ++a;
BOOST_TEST_EQ( a.load(), T((AddType)value + 1) );
BOOST_TEST_EQ( n, T((AddType)value + 1) );
}
{
boost::atomic<T> a(value);
T n = a--;
BOOST_TEST_EQ( a.load(), T((AddType)value - 1) );
BOOST_TEST_EQ( n, value );
}
{
boost::atomic<T> a(value);
T n = --a;
BOOST_TEST_EQ( a.load(), T((AddType)value - 1) );
BOOST_TEST_EQ( n, T((AddType)value - 1) );
}
// Opaque operations
{
boost::atomic<T> a(value);
a.opaque_add(delta);
BOOST_TEST_EQ( a.load(), T((AddType)value + delta) );
}
{
boost::atomic<T> a(value);
a.opaque_sub(delta);
BOOST_TEST_EQ( a.load(), T((AddType)value - delta) );
}
// Modify and test operations
test_additive_operators_with_type_and_test< T, D, AddType >();
}
template<typename T, typename D>
void test_additive_operators(T value, D delta)
{
test_additive_operators_with_type<T, D, T>(value, delta);
}
template< typename T >
void test_negation()
{
{
boost::atomic<T> a((T)1);
T n = a.fetch_negate();
BOOST_TEST_EQ( a.load(), (T)-1 );
BOOST_TEST_EQ( n, (T)1 );
n = a.fetch_negate();
BOOST_TEST_EQ( a.load(), (T)1 );
BOOST_TEST_EQ( n, (T)-1 );
}
{
boost::atomic<T> a((T)1);
a.opaque_negate();
BOOST_TEST_EQ( a.load(), (T)-1 );
a.opaque_negate();
BOOST_TEST_EQ( a.load(), (T)1 );
}
}
template<typename T>
void test_additive_wrap(T value)
{
{
boost::atomic<T> a(value);
T n = a.fetch_add(1) + (T)1;
BOOST_TEST_EQ( a.load(), n );
}
{
boost::atomic<T> a(value);
T n = a.fetch_sub(1) - (T)1;
BOOST_TEST_EQ( a.load(), n );
}
}
template<typename T>
void test_bit_operators(T value, T delta)
{
/* explicit and/or/xor */
{
boost::atomic<T> a(value);
T n = a.fetch_and(delta);
BOOST_TEST_EQ( a.load(), T(value & delta) );
BOOST_TEST_EQ( n, value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_or(delta);
BOOST_TEST_EQ( a.load(), T(value | delta) );
BOOST_TEST_EQ( n, value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_xor(delta);
BOOST_TEST_EQ( a.load(), T(value ^ delta) );
BOOST_TEST_EQ( n, value );
}
{
boost::atomic<T> a(value);
T n = a.fetch_complement();
BOOST_TEST_EQ( a.load(), T(~value) );
BOOST_TEST_EQ( n, value );
}
/* overloaded modify/assign */
{
boost::atomic<T> a(value);
T n = (a &= delta);
BOOST_TEST_EQ( a.load(), T(value & delta) );
BOOST_TEST_EQ( n, T(value & delta) );
}
{
boost::atomic<T> a(value);
T n = (a |= delta);
BOOST_TEST_EQ( a.load(), T(value | delta) );
BOOST_TEST_EQ( n, T(value | delta) );
}
{
boost::atomic<T> a(value);
T n = (a ^= delta);
BOOST_TEST_EQ( a.load(), T(value ^ delta) );
BOOST_TEST_EQ( n, T(value ^ delta) );
}
// Opaque operations
{
boost::atomic<T> a(value);
a.opaque_and(delta);
BOOST_TEST_EQ( a.load(), T(value & delta) );
}
{
boost::atomic<T> a(value);
a.opaque_or(delta);
BOOST_TEST_EQ( a.load(), T(value | delta) );
}
{
boost::atomic<T> a(value);
a.opaque_xor(delta);
BOOST_TEST_EQ( a.load(), T(value ^ delta) );
}
{
boost::atomic<T> a(value);
a.opaque_complement();
BOOST_TEST_EQ( a.load(), T(~value) );
}
// Modify and test operations
{
boost::atomic<T> a((T)1);
bool f = a.and_and_test((T)1);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(1) );
f = a.and_and_test((T)0);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(0) );
f = a.and_and_test((T)0);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(0) );
}
{
boost::atomic<T> a((T)0);
bool f = a.or_and_test((T)0);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(0) );
f = a.or_and_test((T)1);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(1) );
f = a.or_and_test((T)1);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(1) );
}
{
boost::atomic<T> a((T)0);
bool f = a.xor_and_test((T)0);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(0) );
f = a.xor_and_test((T)1);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(1) );
f = a.xor_and_test((T)1);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(0) );
}
// Bit test and modify operations
{
boost::atomic<T> a((T)42);
bool f = a.bit_test_and_set(0);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(43) );
f = a.bit_test_and_set(1);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(43) );
f = a.bit_test_and_set(2);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(47) );
}
{
boost::atomic<T> a((T)42);
bool f = a.bit_test_and_reset(0);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(42) );
f = a.bit_test_and_reset(1);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(40) );
f = a.bit_test_and_set(2);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(44) );
}
{
boost::atomic<T> a((T)42);
bool f = a.bit_test_and_complement(0);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(43) );
f = a.bit_test_and_complement(1);
BOOST_TEST_EQ( f, true );
BOOST_TEST_EQ( a.load(), T(41) );
f = a.bit_test_and_complement(2);
BOOST_TEST_EQ( f, false );
BOOST_TEST_EQ( a.load(), T(45) );
}
}
template<typename T>
void do_test_integral_api(boost::false_type)
{
BOOST_TEST( sizeof(boost::atomic<T>) >= sizeof(T));
test_base_operators<T>(42, 43, 44);
test_additive_operators<T, T>(42, 17);
test_bit_operators<T>((T)0x5f5f5f5f5f5f5f5fULL, (T)0xf5f5f5f5f5f5f5f5ULL);
/* test for unsigned overflow/underflow */
test_additive_operators<T, T>((T)-1, 1);
test_additive_operators<T, T>(0, 1);
/* test for signed overflow/underflow */
test_additive_operators<T, T>(((T)-1) >> (sizeof(T) * 8 - 1), 1);
test_additive_operators<T, T>(1 + (((T)-1) >> (sizeof(T) * 8 - 1)), 1);
}
template<typename T>
void do_test_integral_api(boost::true_type)
{
do_test_integral_api<T>(boost::false_type());
test_additive_wrap<T>(0u);
BOOST_CONSTEXPR_OR_CONST T all_ones = ~(T)0u;
test_additive_wrap<T>(all_ones);
BOOST_CONSTEXPR_OR_CONST T max_signed_twos_compl = all_ones >> 1;
test_additive_wrap<T>(all_ones ^ max_signed_twos_compl);
test_additive_wrap<T>(max_signed_twos_compl);
}
template<typename T>
inline void test_integral_api(void)
{
do_test_integral_api<T>(boost::is_unsigned<T>());
if (boost::is_signed<T>::value)
test_negation<T>();
}
template<typename T>
void test_pointer_api(void)
{
BOOST_TEST_GE( sizeof(boost::atomic<T *>), sizeof(T *));
BOOST_TEST_GE( sizeof(boost::atomic<void *>), sizeof(T *));
T values[3];
test_base_operators<T*>(&values[0], &values[1], &values[2]);
test_additive_operators<T*>(&values[1], 1);
test_base_operators<void*>(&values[0], &values[1], &values[2]);
#if defined(BOOST_HAS_INTPTR_T)
boost::atomic<void *> ptr;
boost::atomic<boost::intptr_t> integral;
BOOST_TEST_EQ( ptr.is_lock_free(), integral.is_lock_free() );
#endif
}
enum test_enum
{
foo, bar, baz
};
static void
test_enum_api(void)
{
test_base_operators(foo, bar, baz);
}
template<typename T>
struct test_struct
{
typedef T value_type;
value_type i;
inline bool operator==(const test_struct & c) const {return i == c.i;}
inline bool operator!=(const test_struct & c) const {return i != c.i;}
};
template< typename Char, typename Traits, typename T >
inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, test_struct< T > const& s)
{
test_stream << "{" << s.i << "}";
return strm;
}
template<typename T>
void
test_struct_api(void)
{
T a = {1}, b = {2}, c = {3};
test_base_operators(a, b, c);
{
boost::atomic<T> sa;
boost::atomic<typename T::value_type> si;
BOOST_TEST_EQ( sa.is_lock_free(), si.is_lock_free() );
}
}
template<typename T>
struct test_struct_x2
{
typedef T value_type;
value_type i, j;
inline bool operator==(const test_struct_x2 & c) const {return i == c.i && j == c.j;}
inline bool operator!=(const test_struct_x2 & c) const {return i != c.i && j != c.j;}
};
template< typename Char, typename Traits, typename T >
inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, test_struct_x2< T > const& s)
{
test_stream << "{" << s.i << ", " << s.j << "}";
return strm;
}
template<typename T>
void
test_struct_x2_api(void)
{
T a = {1, 1}, b = {2, 2}, c = {3, 3};
test_base_operators(a, b, c);
}
struct large_struct
{
long data[64];
inline bool operator==(const large_struct & c) const
{
return std::memcmp(data, &c.data, sizeof(data)) == 0;
}
inline bool operator!=(const large_struct & c) const
{
return std::memcmp(data, &c.data, sizeof(data)) != 0;
}
};
template< typename Char, typename Traits >
inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, large_struct const&)
{
strm << "[large_struct]";
return strm;
}
static void
test_large_struct_api(void)
{
large_struct a = {{1}}, b = {{2}}, c = {{3}};
test_base_operators(a, b, c);
}
struct test_struct_with_ctor
{
typedef unsigned int value_type;
value_type i;
test_struct_with_ctor() : i(0x01234567) {}
inline bool operator==(const test_struct_with_ctor & c) const {return i == c.i;}
inline bool operator!=(const test_struct_with_ctor & c) const {return i != c.i;}
};
template< typename Char, typename Traits >
inline std::basic_ostream< Char, Traits >& operator<< (std::basic_ostream< Char, Traits >& strm, test_struct_with_ctor const&)
{
strm << "[test_struct_with_ctor]";
return strm;
}
static void
test_struct_with_ctor_api(void)
{
{
test_struct_with_ctor s;
boost::atomic<test_struct_with_ctor> sa;
// Check that the default constructor was called
BOOST_TEST( sa.load() == s );
}
test_struct_with_ctor a, b, c;
a.i = 1;
b.i = 2;
c.i = 3;
test_base_operators(a, b, c);
}
#endif

View File

@@ -0,0 +1,282 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Attempt to determine whether the operations on atomic variables
// do in fact behave atomically: Let multiple threads race modifying
// a shared atomic variable and verify that it behaves as expected.
//
// We assume that "observable race condition" events are exponentially
// distributed, with unknown "average time between observable races"
// (which is just the reciprocal of exp distribution parameter lambda).
// Use a non-atomic implementation that intentionally exhibits a
// (hopefully tight) race to compute the maximum-likelihood estimate
// for this time. From this, compute an estimate that covers the
// unknown value with 0.995 confidence (using chi square quantile).
//
// Use this estimate to pick a timeout for the race tests of the
// atomic implementations such that under the assumed distribution
// we get 0.995 probability to detect a race (if there is one).
//
// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
// operations truly behave atomic if this test program does not
// report an error.
#include <boost/atomic.hpp>
#include <algorithm>
#include <boost/ref.hpp>
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/date_time/posix_time/time_formatters.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/thread_time.hpp>
#include <boost/thread/locks.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/core/lightweight_test.hpp>
/* helper class to let two instances of a function race against each
other, with configurable timeout and early abort on detection of error */
class concurrent_runner {
public:
/* concurrently run the function in two threads, until either timeout
or one of the functions returns "false"; returns true if timeout
was reached, or false if early abort and updates timeout accordingly */
static bool
execute(
const boost::function<bool(size_t)> & fn,
boost::posix_time::time_duration & timeout)
{
concurrent_runner runner(fn);
runner.wait_finish(timeout);
return !runner.failure();
}
concurrent_runner(
const boost::function<bool(size_t)> & fn)
: finished_(false), failure_(false)
{
boost::thread(boost::bind(&concurrent_runner::thread_function, this, fn, 0)).swap(first_thread_);
boost::thread(boost::bind(&concurrent_runner::thread_function, this, fn, 1)).swap(second_thread_);
}
void
wait_finish(boost::posix_time::time_duration & timeout)
{
boost::system_time start = boost::get_system_time();
boost::system_time end = start + timeout;
{
boost::mutex::scoped_lock guard(m_);
while (boost::get_system_time() < end && !finished())
c_.timed_wait(guard, end);
}
finished_.store(true, boost::memory_order_relaxed);
first_thread_.join();
second_thread_.join();
boost::posix_time::time_duration duration = boost::get_system_time() - start;
if (duration < timeout)
timeout = duration;
}
bool
finished(void) const throw() {
return finished_.load(boost::memory_order_relaxed);
}
bool
failure(void) const throw() {
return failure_;
}
private:
void
thread_function(boost::function<bool(size_t)> function, size_t instance)
{
while (!finished()) {
if (!function(instance)) {
boost::mutex::scoped_lock guard(m_);
failure_ = true;
finished_.store(true, boost::memory_order_relaxed);
c_.notify_all();
break;
}
}
}
boost::mutex m_;
boost::condition_variable c_;
boost::atomic<bool> finished_;
bool failure_;
boost::thread first_thread_;
boost::thread second_thread_;
};
bool
racy_add(volatile unsigned int & value, size_t instance)
{
size_t shift = instance * 8;
unsigned int mask = 0xff << shift;
for (size_t n = 0; n < 255; n++) {
unsigned int tmp = value;
value = tmp + (1 << shift);
if ((tmp & mask) != (n << shift))
return false;
}
unsigned int tmp = value;
value = tmp & ~mask;
if ((tmp & mask) != mask)
return false;
return true;
}
/* compute estimate for average time between races being observable, in usecs */
static double
estimate_avg_race_time(void)
{
double sum = 0.0;
/* take 10 samples */
for (size_t n = 0; n < 10; n++) {
boost::posix_time::time_duration timeout(0, 0, 10);
volatile unsigned int value(0);
bool success = concurrent_runner::execute(
boost::bind(racy_add, boost::ref(value), _1),
timeout
);
if (success) {
BOOST_ERROR("Failed to establish baseline time for reproducing race condition");
}
sum = sum + timeout.total_microseconds();
}
/* determine maximum likelihood estimate for average time between
race observations */
double avg_race_time_mle = (sum / 10);
/* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
return avg_race_time_995;
}
template<typename value_type, size_t shift_>
bool
test_arithmetic(boost::atomic<value_type> & shared_value, size_t instance)
{
size_t shift = instance * 8;
value_type mask = 0xff << shift;
value_type increment = 1 << shift;
value_type expected = 0;
for (size_t n = 0; n < 255; n++) {
value_type tmp = shared_value.fetch_add(increment, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift) )
return false;
expected ++;
}
for (size_t n = 0; n < 255; n++) {
value_type tmp = shared_value.fetch_sub(increment, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift) )
return false;
expected --;
}
return true;
}
template<typename value_type, size_t shift_>
bool
test_bitops(boost::atomic<value_type> & shared_value, size_t instance)
{
size_t shift = instance * 8;
value_type mask = 0xff << shift;
value_type expected = 0;
for (size_t k = 0; k < 8; k++) {
value_type mod = 1 << k;
value_type tmp = shared_value.fetch_or(mod << shift, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift))
return false;
expected = expected | mod;
}
for (size_t k = 0; k < 8; k++) {
value_type tmp = shared_value.fetch_and( ~ (1 << (shift + k)), boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift))
return false;
expected = expected & ~(1<<k);
}
for (size_t k = 0; k < 8; k++) {
value_type mod = 255 ^ (1 << k);
value_type tmp = shared_value.fetch_xor(mod << shift, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift))
return false;
expected = expected ^ mod;
}
value_type tmp = shared_value.fetch_and( ~mask, boost::memory_order_relaxed);
if ( (tmp & mask) != (expected << shift) )
return false;
return true;
}
int main(int, char *[])
{
boost::posix_time::time_duration reciprocal_lambda;
double avg_race_time = estimate_avg_race_time();
/* 5.298 = 0.995 quantile of exponential distribution */
const boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time));
{
boost::atomic<unsigned int> value(0);
/* testing two different operations in this loop, therefore
enlarge timeout */
boost::posix_time::time_duration tmp(timeout * 2);
bool success = concurrent_runner::execute(
boost::bind(test_arithmetic<unsigned int, 0>, boost::ref(value), _1),
tmp
);
BOOST_TEST(success); // concurrent arithmetic error
}
{
boost::atomic<unsigned int> value(0);
/* testing three different operations in this loop, therefore
enlarge timeout */
boost::posix_time::time_duration tmp(timeout * 3);
bool success = concurrent_runner::execute(
boost::bind(test_bitops<unsigned int, 0>, boost::ref(value), _1),
tmp
);
BOOST_TEST(success); // concurrent bit operations error
}
return boost::report_errors();
}

View File

@@ -0,0 +1,17 @@
// Copyright (c) 2017 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic.hpp>
int main(int, char *[])
{
// The test verifies that atomic<> does not provide arithmetic operations on function pointers
typedef void (*func_ptr)(int);
boost::atomic< func_ptr > a;
a.fetch_add(1);
return 1;
}

View File

@@ -0,0 +1,22 @@
// Copyright (c) 2017 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic.hpp>
struct foo
{
int n;
};
int main(int, char *[])
{
// The test verifies that atomic<> does not provide arithmetic operations on member pointers
typedef int (foo::*mem_ptr);
boost::atomic< mem_ptr > a;
a.fetch_add(1);
return 1;
}

View File

@@ -0,0 +1,16 @@
// Copyright (c) 2017 Andrey Semashev
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic.hpp>
int main(int, char *[])
{
// The test verifies that atomic<> does not provide arithmetic operations on void pointers
boost::atomic< void* > a;
a.fetch_add(1);
return 1;
}

View File

@@ -0,0 +1,61 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/* force fallback implementation using locks */
#define BOOST_ATOMIC_FORCE_FALLBACK 1
#include <boost/atomic.hpp>
#include <boost/cstdint.hpp>
#include "api_test_helpers.hpp"
int main(int, char *[])
{
test_flag_api();
test_integral_api<char>();
test_integral_api<signed char>();
test_integral_api<unsigned char>();
test_integral_api<boost::uint8_t>();
test_integral_api<boost::int8_t>();
test_integral_api<short>();
test_integral_api<unsigned short>();
test_integral_api<boost::uint16_t>();
test_integral_api<boost::int16_t>();
test_integral_api<int>();
test_integral_api<unsigned int>();
test_integral_api<boost::uint32_t>();
test_integral_api<boost::int32_t>();
test_integral_api<long>();
test_integral_api<unsigned long>();
test_integral_api<boost::uint64_t>();
test_integral_api<boost::int64_t>();
test_integral_api<long long>();
test_integral_api<unsigned long long>();
test_pointer_api<int>();
test_enum_api();
test_struct_api<test_struct<boost::uint8_t> >();
test_struct_api<test_struct<boost::uint16_t> >();
test_struct_api<test_struct<boost::uint32_t> >();
test_struct_api<test_struct<boost::uint64_t> >();
// https://svn.boost.org/trac/boost/ticket/10994
test_struct_x2_api<test_struct_x2<boost::uint64_t> >();
// https://svn.boost.org/trac/boost/ticket/9985
test_struct_api<test_struct<double> >();
test_large_struct_api();
// Test that boost::atomic<T> only requires T to be trivially copyable.
// Other non-trivial constructors are allowed.
test_struct_with_ctor_api();
return boost::report_errors();
}

View File

@@ -0,0 +1,205 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Verify that definition of the "LOCK_FREE" macros and the
// "is_lock_free" members is consistent and matches expectations.
// Also, if any operation is lock-free, then the platform
// implementation must provide overridden fence implementations.
#include <boost/atomic.hpp>
#include <iostream>
#include <boost/config.hpp>
#include <boost/core/lightweight_test.hpp>
static const char * lock_free_level[] = {
"never",
"sometimes",
"always"
};
template<typename T>
void
verify_lock_free(const char * type_name, int lock_free_macro_val, int lock_free_expect)
{
BOOST_TEST(lock_free_macro_val >= 0 && lock_free_macro_val <= 2);
BOOST_TEST(lock_free_macro_val == lock_free_expect);
boost::atomic<T> value;
if (lock_free_macro_val == 0)
BOOST_TEST(!value.is_lock_free());
if (lock_free_macro_val == 2)
BOOST_TEST(value.is_lock_free());
BOOST_TEST(boost::atomic<T>::is_always_lock_free == (lock_free_expect == 2));
std::cout << "atomic<" << type_name << "> is " << lock_free_level[lock_free_macro_val] << " lock free\n";
}
#if (defined(__GNUC__) || defined(__SUNPRO_CC)) && defined(__i386__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
#endif
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && defined(__x86_64__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 2
#if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) && (defined(BOOST_HAS_INT128) || !defined(BOOST_NO_ALIGNMENT))
#define EXPECT_INT128_LOCK_FREE 2
#else
#define EXPECT_INT128_LOCK_FREE 0
#endif
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && (defined(__POWERPC__) || defined(__PPC__))
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_CHAR16_T_LOCK_FREE 2
#define EXPECT_CHAR32_T_LOCK_FREE 2
#define EXPECT_WCHAR_T_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(__powerpc64__)
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
#endif
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) && defined(__alpha__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_CHAR16_T_LOCK_FREE 2
#define EXPECT_CHAR32_T_LOCK_FREE 2
#define EXPECT_WCHAR_T_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 2
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__GNUC__) &&\
(\
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||\
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) ||\
defined(__ARM_ARCH_6ZK__) ||\
defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) ||\
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) ||\
defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7S__)\
)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if !(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6Z__)\
|| ((defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6ZK__)) && defined(__thumb__)) || defined(__ARM_ARCH_7M__))
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
#endif
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(__linux__) && defined(__arm__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 0
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif (defined(__GNUC__) || defined(__SUNPRO_CC)) && (defined(__sparcv8plus) || defined(__sparc_v9__))
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#define EXPECT_LLONG_LOCK_FREE 2
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#elif defined(BOOST_USE_WINDOWS_H) || defined(_WIN32_CE) || defined(BOOST_MSVC) || defined(BOOST_INTEL_WIN) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
#define EXPECT_CHAR_LOCK_FREE 2
#define EXPECT_SHORT_LOCK_FREE 2
#define EXPECT_INT_LOCK_FREE 2
#define EXPECT_LONG_LOCK_FREE 2
#if defined(_WIN64) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(_M_AMD64) || defined(_M_IA64) || (_MSC_VER >= 1700 && (defined(_M_ARM) || defined(_M_ARM64)))
#define EXPECT_LLONG_LOCK_FREE 2
#else
#define EXPECT_LLONG_LOCK_FREE 0
#endif
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 2
#define EXPECT_BOOL_LOCK_FREE 2
#else
#define EXPECT_CHAR_LOCK_FREE 0
#define EXPECT_SHORT_LOCK_FREE 0
#define EXPECT_INT_LOCK_FREE 0
#define EXPECT_LONG_LOCK_FREE 0
#define EXPECT_LLONG_LOCK_FREE 0
#define EXPECT_INT128_LOCK_FREE 0
#define EXPECT_POINTER_LOCK_FREE 0
#define EXPECT_BOOL_LOCK_FREE 0
#endif
int main(int, char *[])
{
verify_lock_free<char>("char", BOOST_ATOMIC_CHAR_LOCK_FREE, EXPECT_CHAR_LOCK_FREE);
verify_lock_free<short>("short", BOOST_ATOMIC_SHORT_LOCK_FREE, EXPECT_SHORT_LOCK_FREE);
verify_lock_free<int>("int", BOOST_ATOMIC_INT_LOCK_FREE, EXPECT_INT_LOCK_FREE);
verify_lock_free<long>("long", BOOST_ATOMIC_LONG_LOCK_FREE, EXPECT_LONG_LOCK_FREE);
#ifdef BOOST_HAS_LONG_LONG
verify_lock_free<long long>("long long", BOOST_ATOMIC_LLONG_LOCK_FREE, EXPECT_LLONG_LOCK_FREE);
#endif
#ifdef BOOST_HAS_INT128
verify_lock_free<boost::int128_type>("int128", BOOST_ATOMIC_INT128_LOCK_FREE, EXPECT_INT128_LOCK_FREE);
#endif
verify_lock_free<void *>("void *", BOOST_ATOMIC_POINTER_LOCK_FREE, EXPECT_SHORT_LOCK_FREE);
verify_lock_free<bool>("bool", BOOST_ATOMIC_BOOL_LOCK_FREE, EXPECT_BOOL_LOCK_FREE);
bool any_lock_free =
BOOST_ATOMIC_CHAR_LOCK_FREE > 0 ||
BOOST_ATOMIC_SHORT_LOCK_FREE > 0 ||
BOOST_ATOMIC_INT_LOCK_FREE > 0 ||
BOOST_ATOMIC_LONG_LOCK_FREE > 0 ||
BOOST_ATOMIC_LLONG_LOCK_FREE > 0 ||
BOOST_ATOMIC_BOOL_LOCK_FREE > 0;
BOOST_TEST(!any_lock_free || BOOST_ATOMIC_THREAD_FENCE > 0);
return boost::report_errors();
}

View File

@@ -0,0 +1,73 @@
// Copyright (c) 2011 Helge Bahmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/atomic.hpp>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include "api_test_helpers.hpp"
int main(int, char *[])
{
test_flag_api();
test_integral_api<char>();
test_integral_api<signed char>();
test_integral_api<unsigned char>();
test_integral_api<boost::uint8_t>();
test_integral_api<boost::int8_t>();
test_integral_api<short>();
test_integral_api<unsigned short>();
test_integral_api<boost::uint16_t>();
test_integral_api<boost::int16_t>();
test_integral_api<int>();
test_integral_api<unsigned int>();
test_integral_api<boost::uint32_t>();
test_integral_api<boost::int32_t>();
test_integral_api<long>();
test_integral_api<unsigned long>();
test_integral_api<boost::uint64_t>();
test_integral_api<boost::int64_t>();
test_integral_api<long long>();
test_integral_api<unsigned long long>();
#if defined(BOOST_HAS_INT128)
test_integral_api<boost::int128_type>();
test_integral_api<boost::uint128_type>();
#endif
test_constexpr_ctor<char>();
test_constexpr_ctor<short>();
test_constexpr_ctor<int>();
test_constexpr_ctor<long>();
test_constexpr_ctor<int*>();
test_pointer_api<int>();
test_enum_api();
test_struct_api<test_struct<boost::uint8_t> >();
test_struct_api<test_struct<boost::uint16_t> >();
test_struct_api<test_struct<boost::uint32_t> >();
test_struct_api<test_struct<boost::uint64_t> >();
#if defined(BOOST_HAS_INT128)
test_struct_api<test_struct<boost::uint128_type> >();
#endif
// https://svn.boost.org/trac/boost/ticket/10994
test_struct_x2_api<test_struct_x2<boost::uint64_t> >();
// https://svn.boost.org/trac/boost/ticket/9985
test_struct_api<test_struct<double> >();
test_large_struct_api();
// Test that boost::atomic<T> only requires T to be trivially copyable.
// Other non-trivial constructors are allowed.
test_struct_with_ctor_api();
return boost::report_errors();
}

View File

@@ -0,0 +1,257 @@
// Copyright (c) 2011 Helge Bahmann
// Copyright (c) 2012 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Attempt to determine whether the memory ordering/ fence operations
// work as expected:
// Let two threads race accessing multiple shared variables and
// verify that "observable" order of operations matches with the
// ordering constraints specified.
//
// We assume that "memory ordering violation" events are exponentially
// distributed, with unknown "average time between violations"
// (which is just the reciprocal of exp distribution parameter lambda).
// Use a "relaxed ordering" implementation that intentionally exhibits
// a (hopefully observable) such violation to compute the maximum-likelihood
// estimate for this time. From this, compute an estimate that covers the
// unknown value with 0.995 confidence (using chi square quantile).
//
// Use this estimate to pick a timeout for the race tests of the
// atomic implementations such that under the assumed distribution
// we get 0.995 probability to detect a race (if there is one).
//
// Overall this yields 0.995 * 0.995 > 0.99 confidence that the
// fences work as expected if this test program does not
// report an error.
#include <boost/atomic.hpp>
#include <boost/bind.hpp>
#include <boost/date_time/posix_time/posix_time_types.hpp>
#include <boost/date_time/posix_time/time_formatters.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/thread_time.hpp>
#include <boost/thread/locks.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/barrier.hpp>
#include <boost/core/lightweight_test.hpp>
// Two threads perform the following operations:
//
// thread # 1 thread # 2
// store(a, 1) store(b, 1)
// x = read(b) y = read(a)
//
// Under relaxed memory ordering, the case (x, y) == (0, 0) is
// possible. Under sequential consistency, this case is impossible.
//
// This "problem" is reproducible on all platforms, even x86.
template<boost::memory_order store_order, boost::memory_order load_order>
class total_store_order_test {
public:
total_store_order_test(void);
void run(boost::posix_time::time_duration & timeout);
bool detected_conflict(void) const { return detected_conflict_; }
private:
void thread1fn(void);
void thread2fn(void);
void check_conflict(void);
boost::atomic<int> a_;
/* insert a bit of padding to push the two variables into
different cache lines and increase the likelihood of detecting
a conflict */
char pad1_[512];
boost::atomic<int> b_;
char pad2_[512];
boost::barrier barrier_;
int vrfyb1_, vrfya2_;
boost::atomic<bool> terminate_threads_;
boost::atomic<int> termination_consensus_;
bool detected_conflict_;
boost::mutex m_;
boost::condition_variable c_;
};
template<boost::memory_order store_order, boost::memory_order load_order>
total_store_order_test<store_order, load_order>::total_store_order_test(void)
: a_(0), b_(0), barrier_(2),
vrfyb1_(0), vrfya2_(0),
terminate_threads_(false), termination_consensus_(0),
detected_conflict_(false)
{
}
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::run(boost::posix_time::time_duration & timeout)
{
boost::system_time start = boost::get_system_time();
boost::system_time end = start + timeout;
boost::thread t1(boost::bind(&total_store_order_test::thread1fn, this));
boost::thread t2(boost::bind(&total_store_order_test::thread2fn, this));
{
boost::mutex::scoped_lock guard(m_);
while (boost::get_system_time() < end && !detected_conflict_)
c_.timed_wait(guard, end);
}
terminate_threads_.store(true, boost::memory_order_relaxed);
t2.join();
t1.join();
boost::posix_time::time_duration duration = boost::get_system_time() - start;
if (duration < timeout)
timeout = duration;
}
volatile int backoff_dummy;
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::thread1fn(void)
{
for (;;) {
a_.store(1, store_order);
int b = b_.load(load_order);
barrier_.wait();
vrfyb1_ = b;
barrier_.wait();
check_conflict();
/* both threads synchronize via barriers, so either
both threads must exit here, or they must both do
another round, otherwise one of them will wait forever */
if (terminate_threads_.load(boost::memory_order_relaxed)) for (;;) {
int tmp = termination_consensus_.fetch_or(1, boost::memory_order_relaxed);
if (tmp == 3)
return;
if (tmp & 4)
break;
}
termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
unsigned int delay = rand() % 10000;
a_.store(0, boost::memory_order_relaxed);
barrier_.wait();
while(delay--) { backoff_dummy = delay; }
}
}
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::thread2fn(void)
{
for (;;) {
b_.store(1, store_order);
int a = a_.load(load_order);
barrier_.wait();
vrfya2_ = a;
barrier_.wait();
check_conflict();
/* both threads synchronize via barriers, so either
both threads must exit here, or they must both do
another round, otherwise one of them will wait forever */
if (terminate_threads_.load(boost::memory_order_relaxed)) for (;;) {
int tmp = termination_consensus_.fetch_or(2, boost::memory_order_relaxed);
if (tmp == 3)
return;
if (tmp & 4)
break;
}
termination_consensus_.fetch_xor(4, boost::memory_order_relaxed);
unsigned int delay = rand() % 10000;
b_.store(0, boost::memory_order_relaxed);
barrier_.wait();
while(delay--) { backoff_dummy = delay; }
}
}
template<boost::memory_order store_order, boost::memory_order load_order>
void
total_store_order_test<store_order, load_order>::check_conflict(void)
{
if (vrfyb1_ == 0 && vrfya2_ == 0) {
boost::mutex::scoped_lock guard(m_);
detected_conflict_ = true;
terminate_threads_.store(true, boost::memory_order_relaxed);
c_.notify_all();
}
}
void
test_seq_cst(void)
{
double sum = 0.0;
/* take 10 samples */
for (size_t n = 0; n < 10; n++) {
boost::posix_time::time_duration timeout(0, 0, 10);
total_store_order_test<boost::memory_order_relaxed, boost::memory_order_relaxed> test;
test.run(timeout);
if (!test.detected_conflict()) {
std::cout << "Failed to detect order=seq_cst violation while ith order=relaxed -- intrinsic ordering too strong for this test\n";
return;
}
std::cout << "seq_cst violation with order=relaxed after " << boost::posix_time::to_simple_string(timeout) << "\n";
sum = sum + timeout.total_microseconds();
}
/* determine maximum likelihood estimate for average time between
race observations */
double avg_race_time_mle = (sum / 10);
/* pick 0.995 confidence (7.44 = chi square 0.995 confidence) */
double avg_race_time_995 = avg_race_time_mle * 2 * 10 / 7.44;
/* 5.298 = 0.995 quantile of exponential distribution */
boost::posix_time::time_duration timeout = boost::posix_time::microseconds((long)(5.298 * avg_race_time_995));
std::cout << "run seq_cst for " << boost::posix_time::to_simple_string(timeout) << "\n";
total_store_order_test<boost::memory_order_seq_cst, boost::memory_order_seq_cst> test;
test.run(timeout);
BOOST_TEST(!test.detected_conflict()); // sequential consistency error
}
int main(int, char *[])
{
test_seq_cst();
return boost::report_errors();
}