Commit 6fd6cfdf authored by Peter Collingbourne's avatar Peter Collingbourne
Browse files

scudo: Replace a couple of macros with their expansions.

The macros INLINE and COMPILER_CHECK always expand to the same thing (inline
and static_assert respectively). Both expansions are standards compliant C++
and are used consistently in the rest of LLVM, so let's improve consistency
with the rest of LLVM by replacing them with the expansions.

Differential Revision: https://reviews.llvm.org/D70793
parent f30fe16d
Loading
Loading
Loading
Loading
+17 −17
Original line number Diff line number Diff line
@@ -21,12 +21,12 @@ enum memory_order {
  memory_order_acq_rel = 4,
  memory_order_seq_cst = 5
};
COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
static_assert(memory_order_release == __ATOMIC_RELEASE, "");
static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");

struct atomic_u8 {
  typedef u8 Type;
@@ -60,7 +60,7 @@ struct atomic_uptr {
};

template <typename T>
INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  typename T::Type V;
  __atomic_load(&A->ValDoNotUse, &V, MO);
@@ -68,29 +68,29 @@ INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
}

template <typename T>
INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  __atomic_store(&A->ValDoNotUse, &V, MO);
}

INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }

template <typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
                                         memory_order MO) {
  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
}

template <typename T>
INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
                                         memory_order MO) {
  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
}

template <typename T>
INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
                                        memory_order MO) {
  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
  typename T::Type R;
@@ -99,7 +99,7 @@ INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
}

template <typename T>
INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
                                           typename T::Type Xchg,
                                           memory_order MO) {
  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
@@ -107,7 +107,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
}

template <typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
                                         typename T::Type Xchg,
                                         memory_order MO) {
  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
@@ -117,17 +117,17 @@ INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
// Clutter-reducing helpers.

template <typename T>
INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
inline typename T::Type atomic_load_relaxed(const volatile T *A) {
  return atomic_load(A, memory_order_relaxed);
}

template <typename T>
INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
  atomic_store(A, V, memory_order_relaxed);
}

template <typename T>
INLINE typename T::Type atomic_compare_exchange(volatile T *A,
inline typename T::Type atomic_compare_exchange(volatile T *A,
                                                typename T::Type Cmp,
                                                typename T::Type Xchg) {
  atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+1 −1
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ enum class Checksum : u8 {
// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
// odds with CRC32, but enough for our needs.
INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
inline u16 computeBSDChecksum(u16 Sum, uptr Data) {
  for (u8 I = 0; I < sizeof(Data); I++) {
    Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
    Sum = static_cast<u16>(Sum + (Data & 0xff));
+9 −9
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@ namespace scudo {

extern Checksum HashAlgorithm;

INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
  // If the hardware CRC32 feature is defined here, it was enabled everywhere,
  // as opposed to only for crc32_hw.cpp. This means that other hardware
  // specific instructions were likely emitted at other places, and as a result
@@ -71,7 +71,7 @@ struct UnpackedHeader {
  uptr Checksum : 16;
};
typedef atomic_u64 AtomicPackedHeader;
COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), "");

// Those constants are required to silence some -Werror=conversion errors when
// assigning values to the related bitfield variables.
@@ -86,12 +86,12 @@ constexpr uptr getHeaderSize() {
  return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}

INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
  return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
                                                getHeaderSize());
}

INLINE
inline
const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
  return reinterpret_cast<const AtomicPackedHeader *>(
      reinterpret_cast<uptr>(Ptr) - getHeaderSize());
@@ -100,7 +100,7 @@ const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
// We do not need a cryptographically strong hash for the checksum, but a CRC
// type function that can alert us in the event a header is invalid or
// corrupted. Ideally slightly better than a simple xor of all fields.
static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
                                        UnpackedHeader *Header) {
  UnpackedHeader ZeroChecksumHeader = *Header;
  ZeroChecksumHeader.Checksum = 0;
@@ -110,7 +110,7 @@ static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
                         ARRAY_SIZE(HeaderHolder));
}

INLINE void storeHeader(u32 Cookie, void *Ptr,
inline void storeHeader(u32 Cookie, void *Ptr,
                        UnpackedHeader *NewUnpackedHeader) {
  NewUnpackedHeader->Checksum =
      computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
@@ -118,7 +118,7 @@ INLINE void storeHeader(u32 Cookie, void *Ptr,
  atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
}

INLINE
inline
void loadHeader(u32 Cookie, const void *Ptr,
                UnpackedHeader *NewUnpackedHeader) {
  PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
@@ -128,7 +128,7 @@ void loadHeader(u32 Cookie, const void *Ptr,
    reportHeaderCorruption(const_cast<void *>(Ptr));
}

INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
inline void compareExchangeHeader(u32 Cookie, void *Ptr,
                                  UnpackedHeader *NewUnpackedHeader,
                                  UnpackedHeader *OldUnpackedHeader) {
  NewUnpackedHeader->Checksum =
@@ -141,7 +141,7 @@ INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
    reportHeaderRace(Ptr);
}

INLINE
inline
bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
  PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
  *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+3 −3
Original line number Diff line number Diff line
@@ -184,7 +184,7 @@ public:
        ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());

    // Takes care of extravagantly large sizes as well as integer overflows.
    COMPILER_CHECK(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment);
    static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
    if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
      if (Options.MayReturnNull)
        return nullptr;
@@ -523,7 +523,7 @@ private:
      reportSanityCheckError("class ID");
  }

  static INLINE void *getBlockBegin(const void *Ptr,
  static inline void *getBlockBegin(const void *Ptr,
                                    Chunk::UnpackedHeader *Header) {
    return reinterpret_cast<void *>(
        reinterpret_cast<uptr>(Ptr) - Chunk::getHeaderSize() -
@@ -531,7 +531,7 @@ private:
  }

  // Return the size of a chunk as requested during its allocation.
  INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
  inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
    const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
    if (LIKELY(Header->ClassId))
      return SizeOrUnusedBytes;
+15 −15
Original line number Diff line number Diff line
@@ -19,22 +19,22 @@

namespace scudo {

template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
  COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
  static_assert(sizeof(Dest) == sizeof(Source), "");
  Dest D;
  memcpy(&D, &S, sizeof(D));
  return D;
}

INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
inline constexpr uptr roundUpTo(uptr X, uptr Boundary) {
  return (X + Boundary - 1) & ~(Boundary - 1);
}

INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
inline constexpr uptr roundDownTo(uptr X, uptr Boundary) {
  return X & ~(Boundary - 1);
}

INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
inline constexpr bool isAligned(uptr X, uptr Alignment) {
  return (X & (Alignment - 1)) == 0;
}

@@ -48,14 +48,14 @@ template <class T> void Swap(T &A, T &B) {
  B = Tmp;
}

INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }

INLINE uptr getMostSignificantSetBitIndex(uptr X) {
inline uptr getMostSignificantSetBitIndex(uptr X) {
  DCHECK_NE(X, 0U);
  return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
}

INLINE uptr roundUpToPowerOfTwo(uptr Size) {
inline uptr roundUpToPowerOfTwo(uptr Size) {
  DCHECK(Size);
  if (isPowerOfTwo(Size))
    return Size;
@@ -65,17 +65,17 @@ INLINE uptr roundUpToPowerOfTwo(uptr Size) {
  return 1UL << (Up + 1);
}

INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
inline uptr getLeastSignificantSetBitIndex(uptr X) {
  DCHECK_NE(X, 0U);
  return static_cast<uptr>(__builtin_ctzl(X));
}

INLINE uptr getLog2(uptr X) {
inline uptr getLog2(uptr X) {
  DCHECK(isPowerOfTwo(X));
  return getLeastSignificantSetBitIndex(X);
}

INLINE u32 getRandomU32(u32 *State) {
inline u32 getRandomU32(u32 *State) {
  // ANSI C linear congruential PRNG (16-bit output).
  // return (*State = *State * 1103515245 + 12345) >> 16;
  // XorShift (32-bit output).
@@ -85,11 +85,11 @@ INLINE u32 getRandomU32(u32 *State) {
  return *State;
}

INLINE u32 getRandomModN(u32 *State, u32 N) {
inline u32 getRandomModN(u32 *State, u32 N) {
  return getRandomU32(State) % N; // [0, N)
}

template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
  if (N <= 1)
    return;
  u32 State = *RandState;
@@ -100,7 +100,7 @@ template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {

// Hardware specific inlinable functions.

INLINE void yieldProcessor(u8 Count) {
inline void yieldProcessor(u8 Count) {
#if defined(__i386__) || defined(__x86_64__)
  __asm__ __volatile__("" ::: "memory");
  for (u8 I = 0; I < Count; I++)
@@ -117,7 +117,7 @@ INLINE void yieldProcessor(u8 Count) {

extern uptr PageSizeCached;
uptr getPageSizeSlow();
INLINE uptr getPageSizeCached() {
inline uptr getPageSizeCached() {
  // Bionic uses a hardcoded value.
  if (SCUDO_ANDROID)
    return 4096U;
Loading