Unverified Commit 0af5c066 authored by Florian Hahn's avatar Florian Hahn Committed by GitHub
Browse files

[InstCombine] Don't consider aligned_alloc removable if icmp uses result (#69474)

At the moment, all alloc-like functions are assumed to return non-null
pointers, if their return value is only used in a compare. This is based
on being allowed to substitute the allocation function with one that
doesn't fail to allocate the required memory.

aligned_alloc however must also return null if the required alignment
cannot be satisfied, so I don't think the same reasoning as above can be
applied to it.

This patch adds a bail-out for aligned_alloc calls to
isAllocSiteRemovable.
parent 44d4b30c
Loading
Loading
Loading
Loading
+20 −0
Original line number Diff line number Diff line
@@ -2430,6 +2430,26 @@ static bool isAllocSiteRemovable(Instruction *AI,
        unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
        if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
          return false;

        // Do not fold compares to aligned_alloc calls, as they may have to
        // return null in case the required alignment cannot be satisfied,
        // unless we can prove that both alignment and size are valid.
        auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
          // Check if alignment and size of a call to aligned_alloc is valid,
          // that is alignment is a power-of-2 and the size is a multiple of the
          // alignment.
          const APInt *Alignment;
          const APInt *Size;
          return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
                 match(CB->getArgOperand(1), m_APInt(Size)) &&
                 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
        };
        auto *CB = dyn_cast<CallBase>(AI);
        LibFunc TheLibFunc;
        if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
            TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
            !AlignmentAndSizeKnownValid(CB))
          return false;
        Users.emplace_back(I);
        continue;
      }
+11 −5
Original line number Diff line number Diff line
@@ -26,9 +26,11 @@ define i32 @dead_aligned_alloc(i32 %size, i32 %alignment, i8 %value) {
  ret i32 0
}

define i1 @aligned_alloc_pointer_only_used_by_cmp(i32 %size, i32 %alignment, i8 %value) {
; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp(
; CHECK-NEXT:    ret i1 true
define i1 @aligned_alloc_only_pointe(i32 %size, i32 %alignment, i8 %value) {
; CHECK-LABEL: @aligned_alloc_only_pointe(
; CHECK-NEXT:    [[ALIGNED_ALLOCATION:%.*]] = tail call ptr @aligned_alloc(i32 [[ALIGNMENT:%.*]], i32 [[SIZE:%.*]])
; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
; CHECK-NEXT:    ret i1 [[CMP]]
;
  %aligned_allocation = tail call ptr @aligned_alloc(i32 %alignment, i32 %size)
  %cmp = icmp ne ptr %aligned_allocation, null
@@ -46,7 +48,9 @@ define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_and_value_known_ok(i

define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(i32 %size, i32 %alignment, i8 %value) {
; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(
; CHECK-NEXT:    ret i1 true
; CHECK-NEXT:    [[ALIGNED_ALLOCATION:%.*]] = tail call dereferenceable_or_null(32) ptr @aligned_alloc(i32 3, i32 32)
; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
; CHECK-NEXT:    ret i1 [[CMP]]
;
  %aligned_allocation = tail call ptr @aligned_alloc(i32 3, i32 32)
  %cmp = icmp ne ptr %aligned_allocation, null
@@ -55,7 +59,9 @@ define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(i32 %s

define i1 @aligned_alloc_pointer_only_used_by_cmp_size_not_multiple_of_alignment(i32 %size, i32 %alignment, i8 %value) {
; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp_size_not_multiple_of_alignment(
; CHECK-NEXT:    ret i1 true
; CHECK-NEXT:    [[ALIGNED_ALLOCATION:%.*]] = tail call dereferenceable_or_null(31) ptr @aligned_alloc(i32 8, i32 31)
; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
; CHECK-NEXT:    ret i1 [[CMP]]
;
  %aligned_allocation = tail call ptr @aligned_alloc(i32 8, i32 31)
  %cmp = icmp ne ptr %aligned_allocation, null