diff --git a/www/atomic_design.html b/www/atomic_design.html index 36e73244..87a2f624 100644 --- a/www/atomic_design.html +++ b/www/atomic_design.html @@ -53,6 +53,40 @@ should be identical (and conforming) for all three designs. +

+With any design, the (back end) compiler writer should note: +

+ +
+

+The decision to implement lock-free operations on any given type (or not) is an +ABI-binding decision. One can not change from treating a type as not lock free, +to lock free (or vice-versa) without breaking your ABI. +

+ +

+Example: +

+ +
+TU1.cc
+-----------
+extern atomic<long long> A;
+int foo() { return A.compare_exchange_strong(w, x); }
+
+TU2.cc
+-----------
+extern atomic<long long> A;
+void bar() { return A.compare_exchange_strong(y, z); }
+
+
+ +

+If only one of these calls to compare_exchange_strong is +implemented with mutex-locked code, then that mutex-locked code will not be +executed mutually exclusively of the one implemented in a lock-free manner. +

+ diff --git a/www/atomic_design_a.html b/www/atomic_design_a.html index f09d0b36..bcb26271 100644 --- a/www/atomic_design_a.html +++ b/www/atomic_design_a.html @@ -49,6 +49,9 @@ the memory ordering parameter. // In every intrinsic signature below, type* atomic_obj may be a pointer to a // volatile-qualifed type. +// type must be trivially copyable +bool __atomic_is_lock_free(const type* atomic_obj); + // type must be trivially copyable // Behavior is defined for mem_ord = 0, 1, 2, 5 type __atomic_load(const type* atomic_obj, int mem_ord); diff --git a/www/atomic_design_b.html b/www/atomic_design_b.html index 7891050c..b738445b 100644 --- a/www/atomic_design_b.html +++ b/www/atomic_design_b.html @@ -44,18 +44,21 @@ option in the spirit of completeness.

-// type can be any pod
+// type must be trivially copyable
+bool __atomic_is_lock_free(const type* atomic_obj);
+
+// type must be trivially copyable
 type __atomic_load_relaxed(const volatile type* atomic_obj);
 type __atomic_load_consume(const volatile type* atomic_obj);
 type __atomic_load_acquire(const volatile type* atomic_obj);
 type __atomic_load_seq_cst(const volatile type* atomic_obj);
 
-// type can be any pod
+// type must be trivially copyable
 type __atomic_store_relaxed(volatile type* atomic_obj, type desired);
 type __atomic_store_release(volatile type* atomic_obj, type desired);
 type __atomic_store_seq_cst(volatile type* atomic_obj, type desired);
 
-// type can be any pod
+// type must be trivially copyable
 type __atomic_exchange_relaxed(volatile type* atomic_obj, type desired);
 type __atomic_exchange_consume(volatile type* atomic_obj, type desired);
 type __atomic_exchange_acquire(volatile type* atomic_obj, type desired);
@@ -63,7 +66,7 @@ type __atomic_exchange_release(volatile type* atomic_obj, type desired);
 type __atomic_exchange_acq_rel(volatile type* atomic_obj, type desired);
 type __atomic_exchange_seq_cst(volatile type* atomic_obj, type desired);
 
-// type can be any pod
+// type must be trivially copyable
 bool __atomic_compare_exchange_strong_relaxed_relaxed(volatile type* atomic_obj,
                                                       type* expected,
                                                       type desired);
@@ -113,7 +116,7 @@ bool __atomic_compare_exchange_strong_seq_cst_seq_cst(volatile type* atomic_obj,
                                                       type* expected,
                                                       type desired);
 
-// type can be any pod
+// type must be trivially copyable
 bool __atomic_compare_exchange_weak_relaxed_relaxed(volatile type* atomic_obj,
                                                     type* expected,
                                                     type desired);