diff --git a/www/atomic_design_a.html b/www/atomic_design_a.html index d8cc6c61..f649b2b3 100644 --- a/www/atomic_design_a.html +++ b/www/atomic_design_a.html @@ -71,7 +71,7 @@ type __atomic_load(const type* atomic_obj, int mem_ord); <font color="#C80000">// type must be trivially copyable</font> <font color="#C80000">// Behavior is defined for mem_ord = 0, 3, 5</font> -type __atomic_store(type* atomic_obj, type desired, int mem_ord); +void __atomic_store(type* atomic_obj, type desired, int mem_ord); <font color="#C80000">// type must be trivially copyable</font> <font color="#C80000">// Behavior is defined for mem_ord = [0 ... 5]</font> @@ -160,6 +160,150 @@ translate_memory_order(int o) return o; } </pre></blockquote> + +<p> +Below are representative C++ implementations of all of the operations. Their +purpose is to document the desired semantics of each operation, assuming +<tt>memory_order_seq_cst</tt>. This is essentially the code that will be called +if the front end calls out to compiler-rt. +</p> + +<blockquote><pre> +template <class T> +T +__atomic_load(T const volatile* obj) +{ + unique_lock<mutex> _(some_mutex); + return *obj; +} + +template <class T> +void +__atomic_store(T volatile* obj, T desr) +{ + unique_lock<mutex> _(some_mutex); + *obj = desr; +} + +template <class T> +T +__atomic_exchange(T volatile* obj, T desr) +{ + unique_lock<mutex> _(some_mutex); + T r = *obj; + *obj = desr; + return r; +} + +template <class T> +bool +__atomic_compare_exchange_strong(T volatile* obj, T* exp, T desr) +{ + unique_lock<mutex> _(some_mutex); + if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) <font color="#C80000">// if (*obj == *exp)</font> + { + std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); <font color="#C80000">// *obj = desr;</font> + return true; + } + std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); <font color="#C80000">// *exp = *obj;</font> + return false; +} + +<font color="#C80000">// May spuriously return false (even if *obj == *exp)</font> +template <class T> +bool +__atomic_compare_exchange_weak(T volatile* obj, T* exp, T desr) +{ + unique_lock<mutex> _(some_mutex); + if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) <font color="#C80000">// if (*obj == *exp)</font> + { + std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); <font color="#C80000">// *obj = desr;</font> + return true; + } + std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); <font color="#C80000">// *exp = *obj;</font> + return false; +} + +template <class T> +T +__atomic_fetch_add(T volatile* obj, T operand) +{ + unique_lock<mutex> _(some_mutex); + T r = *obj; + *obj += operand; + return r; +} + +template <class T> +T +__atomic_fetch_sub(T volatile* obj, T operand) +{ + unique_lock<mutex> _(some_mutex); + T r = *obj; + *obj -= operand; + return r; +} + +template <class T> +T +__atomic_fetch_and(T volatile* obj, T operand) +{ + unique_lock<mutex> _(some_mutex); + T r = *obj; + *obj &= operand; + return r; +} + +template <class T> +T +__atomic_fetch_or(T volatile* obj, T operand) +{ + unique_lock<mutex> _(some_mutex); + T r = *obj; + *obj |= operand; + return r; +} + +template <class T> +T +__atomic_fetch_xor(T volatile* obj, T operand) +{ + unique_lock<mutex> _(some_mutex); + T r = *obj; + *obj ^= operand; + return r; +} + +void* +__atomic_fetch_add(void* volatile* obj, ptrdiff_t operand) +{ + unique_lock<mutex> _(some_mutex); + void* r = *obj; + (char*&)(*obj) += operand; + return r; +} + +void* +__atomic_fetch_sub(void* volatile* obj, ptrdiff_t operand) +{ + unique_lock<mutex> _(some_mutex); + void* r = *obj; + (char*&)(*obj) -= operand; + return r; +} + +void __atomic_thread_fence() +{ + unique_lock<mutex> _(some_mutex); +} + +void __atomic_signal_fence() +{ + unique_lock<mutex> _(some_mutex); +} +</pre></blockquote> + + </div> </body> </html>