From be22ed405a6bddf82f663d80f516b38d1913e6ce Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Mon, 18 Oct 2010 16:40:13 +0000 Subject: [PATCH] Update atomic Design A spec with reference C++ implementations for the purpose of documeting the semantics of each atomic operation. git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@116713 91177308-0d34-0410-b5e6-96231b3b80d8 --- www/atomic_design_a.html | 146 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 145 insertions(+), 1 deletion(-) diff --git a/www/atomic_design_a.html b/www/atomic_design_a.html index d8cc6c61..f649b2b3 100644 --- a/www/atomic_design_a.html +++ b/www/atomic_design_a.html @@ -71,7 +71,7 @@ type __atomic_load(const type* atomic_obj, int mem_ord); // type must be trivially copyable // Behavior is defined for mem_ord = 0, 3, 5 -type __atomic_store(type* atomic_obj, type desired, int mem_ord); +void __atomic_store(type* atomic_obj, type desired, int mem_ord); // type must be trivially copyable // Behavior is defined for mem_ord = [0 ... 5] @@ -160,6 +160,150 @@ translate_memory_order(int o) return o; } + +

+Below are representative C++ implementations of all of the operations. Their +purpose is to document the desired semantics of each operation, assuming +memory_order_seq_cst. This is essentially the code that will be called +if the front end calls out to compiler-rt. +

+ +
+template <class T>
+T
+__atomic_load(T const volatile* obj)
+{
+    unique_lock<mutex> _(some_mutex);
+    return *obj;
+}
+
+template <class T>
+void
+__atomic_store(T volatile* obj, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    *obj = desr;
+}
+
+template <class T>
+T
+__atomic_exchange(T volatile* obj, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj = desr;
+    return r;
+}
+
+template <class T>
+bool
+__atomic_compare_exchange_strong(T volatile* obj, T* exp, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) // if (*obj == *exp)
+    {
+        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); // *obj = desr;
+        return true;
+    }
+    std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); // *exp = *obj;
+    return false;
+}
+
+// May spuriously return false (even if *obj == *exp)
+template <class T>
+bool
+__atomic_compare_exchange_weak(T volatile* obj, T* exp, T desr)
+{
+    unique_lock<mutex> _(some_mutex);
+    if (std::memcmp(const_cast<T*>(obj), exp, sizeof(T)) == 0) // if (*obj == *exp)
+    {
+        std::memcpy(const_cast<T*>(obj), &desr, sizeof(T)); // *obj = desr;
+        return true;
+    }
+    std::memcpy(exp, const_cast<T*>(obj), sizeof(T)); // *exp = *obj;
+    return false;
+}
+
+template <class T>
+T
+__atomic_fetch_add(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj += operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_sub(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj -= operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_and(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj &= operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_or(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj |= operand;
+    return r;
+}
+
+template <class T>
+T
+__atomic_fetch_xor(T volatile* obj, T operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    T r = *obj;
+    *obj ^= operand;
+    return r;
+}
+
+void*
+__atomic_fetch_add(void* volatile* obj, ptrdiff_t operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    void* r = *obj;
+    (char*&)(*obj) += operand;
+    return r;
+}
+
+void*
+__atomic_fetch_sub(void* volatile* obj, ptrdiff_t operand)
+{
+    unique_lock<mutex> _(some_mutex);
+    void* r = *obj;
+    (char*&)(*obj) -= operand;
+    return r;
+}
+
+void __atomic_thread_fence()
+{
+    unique_lock<mutex> _(some_mutex);
+}
+
+void __atomic_signal_fence()
+{
+    unique_lock<mutex> _(some_mutex);
+}
+
+ +