Unverified Commit 208a6d97 authored by Louis Dionne's avatar Louis Dionne Committed by GitHub
Browse files

[libc++] Fix inconsistency between is_lock_free and is_always_lock_free (#68109)

std::atomic is implemented with the following (confusing!) hierarchy of
types:

     std::atomic<T> : std::__atomic_base<T> { ... };
     std::__atomic_base<T> {
          std::__cxx_atomic_impl<T> __impl;
     };
     std::__cxx_atomic_impl<T> {
          _Atomic(T) __val;
     };

Inside std::__atomic_base, we implement the is_lock_free() and
is_always_lock_free() functions. However, we used to implement them
inconsistently:
- is_always_lock_free() is based on whether __cxx_atomic_impl<T> is
always lock free (using the builtin), which means that we include any
potential padding added by _Atomic(T) into the determination.
- is_lock_free() was based on whether T is lock free (using the
builtin), which meant that we did not take into account any potential
padding added by _Atomic(T).

It is important to note that the padding added by _Atomic(T) can turn a
type that wouldn't be lock free into a lock free type, for example by
making its size become a power of two.

The inconsistency of how the two functions were implemented could lead
to cases where is_always_lock_free() would return true, but
is_lock_free() would then return false. This is the case for example of
the following type, which is always lock free on arm64 but was
incorrectly reported as !is_lock_free() before this patch:

     struct Foo { float x[3]; };

This patch switches the determination of is_lock_free() to be based on
__cxx_atomic_impl<T> instead to match how we determine
is_always_lock_free().

rdar://115324353
parent 1196e6dd
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ struct __atomic_base // false

    _LIBCPP_HIDE_FROM_ABI
    bool is_lock_free() const volatile _NOEXCEPT
        {return __cxx_atomic_is_lock_free(sizeof(_Tp));}
        {return __cxx_atomic_is_lock_free(sizeof(__cxx_atomic_impl<_Tp>));}
    _LIBCPP_HIDE_FROM_ABI
    bool is_lock_free() const _NOEXCEPT
        {return static_cast<__atomic_base const volatile*>(this)->is_lock_free();}
+1 −0
Original line number Diff line number Diff line
@@ -100,6 +100,7 @@ int main(int, char**) {
  CHECK_ALIGNMENT(struct Empty {});
  CHECK_ALIGNMENT(struct OneInt { int i; });
  CHECK_ALIGNMENT(struct IntArr2 { int i[2]; });
  CHECK_ALIGNMENT(struct FloatArr3 { float i[3]; });
  CHECK_ALIGNMENT(struct LLIArr2 { long long int i[2]; });
  CHECK_ALIGNMENT(struct LLIArr4 { long long int i[4]; });
  CHECK_ALIGNMENT(struct LLIArr8 { long long int i[8]; });
+1 −1
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@
template <typename T>
void checkAlwaysLockFree() {
  if (std::atomic<T>::is_always_lock_free) {
    LIBCPP_ASSERT(sizeof(std::atomic<T>) == sizeof(T)); // technically not required, but libc++ does it that way
    assert(std::atomic<T>().is_lock_free());
  }
}
@@ -79,6 +78,7 @@ void run()
    CHECK_ALWAYS_LOCK_FREE(struct Empty {});
    CHECK_ALWAYS_LOCK_FREE(struct OneInt { int i; });
    CHECK_ALWAYS_LOCK_FREE(struct IntArr2 { int i[2]; });
    CHECK_ALWAYS_LOCK_FREE(struct FloatArr3 { float i[3]; });
    CHECK_ALWAYS_LOCK_FREE(struct LLIArr2 { long long int i[2]; });
    CHECK_ALWAYS_LOCK_FREE(struct LLIArr4 { long long int i[4]; });
    CHECK_ALWAYS_LOCK_FREE(struct LLIArr8 { long long int i[8]; });
+8 −2
Original line number Diff line number Diff line
@@ -80,8 +80,14 @@ do_test()
    typedef typename std::remove_pointer<T>::type X;
    A obj(T(0));
    assert(obj == T(0));
    bool b0 = obj.is_lock_free();
    ((void)b0); // mark as unused
    {
        bool lockfree = obj.is_lock_free();
        (void)lockfree;
#if TEST_STD_VER >= 17
        if (A::is_always_lock_free)
            assert(lockfree);
#endif
    }
    obj.store(T(0));
    assert(obj == T(0));
    obj.store(T(1), std::memory_order_release);
+24 −6
Original line number Diff line number Diff line
@@ -61,8 +61,14 @@ int main(int, char**)
    {
        volatile std::atomic<bool> obj(true);
        assert(obj == true);
        bool b0 = obj.is_lock_free();
        (void)b0; // to placate scan-build
        {
            bool lockfree = obj.is_lock_free();
            (void)lockfree;
#if TEST_STD_VER >= 17
            if (std::atomic<bool>::is_always_lock_free)
                assert(lockfree);
#endif
        }
        obj.store(false);
        assert(obj == false);
        obj.store(true, std::memory_order_release);
@@ -112,8 +118,14 @@ int main(int, char**)
    {
        std::atomic<bool> obj(true);
        assert(obj == true);
        bool b0 = obj.is_lock_free();
        (void)b0; // to placate scan-build
        {
            bool lockfree = obj.is_lock_free();
            (void)lockfree;
#if TEST_STD_VER >= 17
            if (std::atomic<bool>::is_always_lock_free)
                assert(lockfree);
#endif
        }
        obj.store(false);
        assert(obj == false);
        obj.store(true, std::memory_order_release);
@@ -163,8 +175,14 @@ int main(int, char**)
    {
        std::atomic_bool obj(true);
        assert(obj == true);
        bool b0 = obj.is_lock_free();
        (void)b0; // to placate scan-build
        {
            bool lockfree = obj.is_lock_free();
            (void)lockfree;
#if TEST_STD_VER >= 17
            if (std::atomic_bool::is_always_lock_free)
                assert(lockfree);
#endif
        }
        obj.store(false);
        assert(obj == false);
        obj.store(true, std::memory_order_release);
Loading