Commit 9fe57006 authored by Antonio Frighetto's avatar Antonio Frighetto
Browse files

[AArch64] Add support for v8.4a `ldapur`/`stlur`

AArch64 backend now features v8.4a atomic Load-Acquire
RCpc and Store-Release register unscaled support.
parent a8799719
Loading
Loading
Loading
Loading
+11 −7
Original line number Diff line number Diff line
@@ -997,6 +997,15 @@ static bool isWorthFoldingADDlow(SDValue N) {
  return true;
}

/// Check if the immediate offset is valid as a scaled immediate.
static bool isValidAsScaledImmediate(int64_t Offset, unsigned Range,
                                     unsigned Size) {
  if ((Offset & (Size - 1)) == 0 && Offset >= 0 &&
      Offset < (Range << Log2_32(Size)))
    return true;
  return false;
}

/// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
/// immediate" address.  The "Size" argument is the size in bytes of the memory
/// reference, which determines the scale.
@@ -1092,7 +1101,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
    if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
      int64_t RHSC = (int64_t)RHS->getZExtValue();
      unsigned Scale = Log2_32(Size);
      if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
      if (isValidAsScaledImmediate(RHSC, 0x1000, Size)) {
        Base = N.getOperand(0);
        if (Base.getOpcode() == ISD::FrameIndex) {
          int FI = cast<FrameIndexSDNode>(Base)->getIndex();
@@ -1130,10 +1139,6 @@ bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
    return false;
  if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
    int64_t RHSC = RHS->getSExtValue();
    // If the offset is valid as a scaled immediate, don't match here.
    if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 &&
        RHSC < (0x1000 << Log2_32(Size)))
      return false;
    if (RHSC >= -256 && RHSC < 256) {
      Base = N.getOperand(0);
      if (Base.getOpcode() == ISD::FrameIndex) {
@@ -1312,11 +1317,10 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
  //     LDR  X2, [BaseReg, X0]
  if (isa<ConstantSDNode>(RHS)) {
    int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue();
    unsigned Scale = Log2_32(Size);
    // Skip the immediate can be selected by load/store addressing mode.
    // Also skip the immediate can be encoded by a single ADD (SUB is also
    // checked by using -ImmOff).
    if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) ||
    if (isValidAsScaledImmediate(ImmOff, 0x1000, Size) ||
        isPreferredADD(ImmOff) || isPreferredADD(-ImmOff))
      return false;

+31 −0
Original line number Diff line number Diff line
@@ -573,3 +573,34 @@ let Predicates = [HasRCPC3, HasNEON] in {
                (i64 (bitconvert (v1f64 VecListOne64:$Vt)))),
            (STL1 (SUBREG_TO_REG (i64 0), VecListOne64:$Vt, dsub), (i64 0), GPR64sp:$Rn)>;
}

// v8.4a FEAT_LRCPC2 patterns
let Predicates = [HasRCPC_IMMO] in {
  // Load-Acquire RCpc Register unscaled loads
  def : Pat<(acquiring_load<atomic_load_az_8>
               (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
          (LDAPURBi GPR64sp:$Rn, simm9:$offset)>;
  def : Pat<(acquiring_load<atomic_load_az_16>
               (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
          (LDAPURHi GPR64sp:$Rn, simm9:$offset)>;
  def : Pat<(acquiring_load<atomic_load_32>
               (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
          (LDAPURi GPR64sp:$Rn, simm9:$offset)>;
  def : Pat<(acquiring_load<atomic_load_64>
               (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
          (LDAPURXi GPR64sp:$Rn, simm9:$offset)>;

  // Store-Release Register unscaled stores
  def : Pat<(releasing_store<atomic_store_8>
               (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
          (STLURBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
  def : Pat<(releasing_store<atomic_store_16>
               (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
          (STLURHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
  def : Pat<(releasing_store<atomic_store_32>
               (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
          (STLURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
  def : Pat<(releasing_store<atomic_store_64>
               (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
          (STLURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
}
+1 −1
Original line number Diff line number Diff line
@@ -94,7 +94,7 @@ def HasTLB_RMI : Predicate<"Subtarget->hasTLB_RMI()">,
def HasFlagM         : Predicate<"Subtarget->hasFlagM()">,
                       AssemblerPredicateWithAll<(all_of FeatureFlagM), "flagm">;

def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPCImm()">,
def HasRCPC_IMMO      : Predicate<"Subtarget->hasRCPC_IMMO()">,
                       AssemblerPredicateWithAll<(all_of FeatureRCPC_IMMO), "rcpc-immo">;

def HasFPARMv8       : Predicate<"Subtarget->hasFPARMv8()">,
+0 −3
Original line number Diff line number Diff line
@@ -7397,9 +7397,6 @@ AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
    return std::nullopt;
  RHSC = RHSOp1.getCImm()->getSExtValue();

  // If the offset is valid as a scaled immediate, don't match here.
  if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
    return std::nullopt;
  if (RHSC >= -256 && RHSC < 256) {
    MachineOperand &Base = RootDef->getOperand(1);
    return {{
+10 −20
Original line number Diff line number Diff line
@@ -36,8 +36,7 @@ define i8 @load_atomic_i8_aligned_monotonic_const(ptr readonly %ptr) {

define i8 @load_atomic_i8_aligned_acquire(ptr %ptr) {
; CHECK-LABEL: load_atomic_i8_aligned_acquire:
; CHECK:    add x8, x0, #4
; CHECK:    ldaprb w0, [x8]
; CHECK:    ldapurb w0, [x0, #4]
    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
    %r = load atomic i8, ptr %gep acquire, align 1
    ret i8 %r
@@ -45,8 +44,7 @@ define i8 @load_atomic_i8_aligned_acquire(ptr %ptr) {

define i8 @load_atomic_i8_aligned_acquire_const(ptr readonly %ptr) {
; CHECK-LABEL: load_atomic_i8_aligned_acquire_const:
; CHECK:    add x8, x0, #4
; CHECK:    ldaprb w0, [x8]
; CHECK:    ldapurb w0, [x0, #4]
    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
    %r = load atomic i8, ptr %gep acquire, align 1
    ret i8 %r
@@ -104,8 +102,7 @@ define i16 @load_atomic_i16_aligned_monotonic_const(ptr readonly %ptr) {

define i16 @load_atomic_i16_aligned_acquire(ptr %ptr) {
; CHECK-LABEL: load_atomic_i16_aligned_acquire:
; CHECK:    add x8, x0, #8
; CHECK:    ldaprh w0, [x8]
; CHECK:    ldapurh w0, [x0, #8]
    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
    %r = load atomic i16, ptr %gep acquire, align 2
    ret i16 %r
@@ -113,8 +110,7 @@ define i16 @load_atomic_i16_aligned_acquire(ptr %ptr) {

define i16 @load_atomic_i16_aligned_acquire_const(ptr readonly %ptr) {
; CHECK-LABEL: load_atomic_i16_aligned_acquire_const:
; CHECK:    add x8, x0, #8
; CHECK:    ldaprh w0, [x8]
; CHECK:    ldapurh w0, [x0, #8]
    %gep = getelementptr inbounds i16, ptr %ptr, i32 4
    %r = load atomic i16, ptr %gep acquire, align 2
    ret i16 %r
@@ -172,8 +168,7 @@ define i32 @load_atomic_i32_aligned_monotonic_const(ptr readonly %ptr) {

define i32 @load_atomic_i32_aligned_acquire(ptr %ptr) {
; CHECK-LABEL: load_atomic_i32_aligned_acquire:
; CHECK:    add x8, x0, #16
; CHECK:    ldapr w0, [x8]
; CHECK:    ldapur w0, [x0, #16]
    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
    %r = load atomic i32, ptr %gep acquire, align 4
    ret i32 %r
@@ -181,8 +176,7 @@ define i32 @load_atomic_i32_aligned_acquire(ptr %ptr) {

define i32 @load_atomic_i32_aligned_acquire_const(ptr readonly %ptr) {
; CHECK-LABEL: load_atomic_i32_aligned_acquire_const:
; CHECK:    add x8, x0, #16
; CHECK:    ldapr w0, [x8]
; CHECK:    ldapur w0, [x0, #16]
    %gep = getelementptr inbounds i32, ptr %ptr, i32 4
    %r = load atomic i32, ptr %gep acquire, align 4
    ret i32 %r
@@ -240,8 +234,7 @@ define i64 @load_atomic_i64_aligned_monotonic_const(ptr readonly %ptr) {

define i64 @load_atomic_i64_aligned_acquire(ptr %ptr) {
; CHECK-LABEL: load_atomic_i64_aligned_acquire:
; CHECK:    add x8, x0, #32
; CHECK:    ldapr x0, [x8]
; CHECK:    ldapur x0, [x0, #32]
    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
    %r = load atomic i64, ptr %gep acquire, align 8
    ret i64 %r
@@ -249,8 +242,7 @@ define i64 @load_atomic_i64_aligned_acquire(ptr %ptr) {

define i64 @load_atomic_i64_aligned_acquire_const(ptr readonly %ptr) {
; CHECK-LABEL: load_atomic_i64_aligned_acquire_const:
; CHECK:    add x8, x0, #32
; CHECK:    ldapr x0, [x8]
; CHECK:    ldapur x0, [x0, #32]
    %gep = getelementptr inbounds i64, ptr %ptr, i32 4
    %r = load atomic i64, ptr %gep acquire, align 8
    ret i64 %r
@@ -376,8 +368,7 @@ define i8 @load_atomic_i8_unaligned_monotonic_const(ptr readonly %ptr) {

define i8 @load_atomic_i8_unaligned_acquire(ptr %ptr) {
; CHECK-LABEL: load_atomic_i8_unaligned_acquire:
; CHECK:    add x8, x0, #4
; CHECK:    ldaprb w0, [x8]
; CHECK:    ldapurb w0, [x0, #4]
    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
    %r = load atomic i8, ptr %gep acquire, align 1
    ret i8 %r
@@ -385,8 +376,7 @@ define i8 @load_atomic_i8_unaligned_acquire(ptr %ptr) {

define i8 @load_atomic_i8_unaligned_acquire_const(ptr readonly %ptr) {
; CHECK-LABEL: load_atomic_i8_unaligned_acquire_const:
; CHECK:    add x8, x0, #4
; CHECK:    ldaprb w0, [x8]
; CHECK:    ldapurb w0, [x0, #4]
    %gep = getelementptr inbounds i8, ptr %ptr, i32 4
    %r = load atomic i8, ptr %gep acquire, align 1
    ret i8 %r
Loading