Commit 810127f6 authored by Simon Tatham's avatar Simon Tatham
Browse files

[ARM,MVE] Add the `vsbciq` intrinsics.

Summary:
These are exactly parallel to the existing `vadciq` intrinsics, which
we implemented last year as part of the original MVE intrinsics
framework setup.

Just like VADC/VADCI, the MVE VSBC/VSBCI instructions deliver two
outputs, both of which the intrinsic exposes: a modified vector
register and a carry flag. So they have to be instruction-selected in
C++ rather than Tablegen. However, in this case, that's trivial: the
same C++ isel routine we already have for VADC works unchanged, and
all we have to do is to pass it a different instruction id.

Reviewers: MarkMurrayARM, dmgreen, miyuki, ostannard

Reviewed By: miyuki

Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D75444
parent 9284abd0
Loading
Loading
Loading
Loading
+24 −20
Original line number Diff line number Diff line
@@ -1139,28 +1139,32 @@ defm sqrshr: ScalarSaturatingShiftReg<s32, s64>;
def lsll: LongScalarShift<u64, (args s32:$sh), (IRInt<"lsll"> $lo, $hi, $sh)>;
def asrl: LongScalarShift<s64, (args s32:$sh), (IRInt<"asrl"> $lo, $hi, $sh)>;

let params = T.Int32 in {
def vadcq: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
    (seq (IRInt<"vadc", [Vector]> $a, $b, (shl (load $carry), 29)):$pair,
multiclass vadcsbc {
  def q: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
      (seq (IRInt<NAME, [Vector]> $a, $b, (shl (load $carry), 29)):$pair,
           (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
           (xval $pair, 0))>;
def vadciq: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
    (seq (IRInt<"vadc", [Vector]> $a, $b, 0):$pair,
  def iq: Intrinsic<Vector, (args Vector:$a, Vector:$b, Ptr<uint>:$carry),
      (seq (IRInt<NAME, [Vector]> $a, $b, 0):$pair,
           (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
           (xval $pair, 0))>;
def vadcq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
  def q_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
                                   Ptr<uint>:$carry, Predicate:$pred),
    (seq (IRInt<"vadc_predicated", [Vector, Predicate]> $inactive, $a, $b,
      (seq (IRInt<NAME # "_predicated", [Vector, Predicate]> $inactive, $a, $b,
               (shl (load $carry), 29), $pred):$pair,
           (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
           (xval $pair, 0))>;
def vadciq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
  def iq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
                                    Ptr<uint>:$carry, Predicate:$pred),
    (seq (IRInt<"vadc_predicated", [Vector, Predicate]> $inactive, $a, $b,
      (seq (IRInt<NAME # "_predicated", [Vector, Predicate]> $inactive, $a, $b,
               0, $pred):$pair,
           (store (and 1, (lshr (xval $pair, 1), 29)), $carry),
           (xval $pair, 0))>;
}
let params = T.Int32 in {
  defm vadc: vadcsbc;
  defm vsbc: vadcsbc;
}

multiclass VectorComplexAddPred<dag not_halving, dag angle> {
  def "" : Intrinsic<Vector, (args Vector:$a, Vector:$b),
+160 −0
Original line number Diff line number Diff line
@@ -87,3 +87,163 @@ int32x4_t test_vadcq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, unsigne
    return vadcq_m_s32(inactive, a, b, carry, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbciq_s32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0)
// CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 1
// CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 29
// CHECK-NEXT:    [[TMP3:%.*]] = and i32 1, [[TMP2]]
// CHECK-NEXT:    store i32 [[TMP3]], i32* [[CARRY_OUT:%.*]], align 4
// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP4]]
//
int32x4_t test_vsbciq_s32(int32x4_t a, int32x4_t b, unsigned *carry_out) {
#ifdef POLYMORPHIC
  return vsbciq(a, b, carry_out);
#else  /* POLYMORPHIC */
  return vsbciq_s32(a, b, carry_out);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbciq_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0)
// CHECK-NEXT:    [[TMP1:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 1
// CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 29
// CHECK-NEXT:    [[TMP3:%.*]] = and i32 1, [[TMP2]]
// CHECK-NEXT:    store i32 [[TMP3]], i32* [[CARRY_OUT:%.*]], align 4
// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP0]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP4]]
//
uint32x4_t test_vsbciq_u32(uint32x4_t a, uint32x4_t b, unsigned *carry_out) {
#ifdef POLYMORPHIC
  return vsbciq(a, b, carry_out);
#else  /* POLYMORPHIC */
  return vsbciq_u32(a, b, carry_out);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbcq_s32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4
// CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 29
// CHECK-NEXT:    [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]])
// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1
// CHECK-NEXT:    [[TMP4:%.*]] = lshr i32 [[TMP3]], 29
// CHECK-NEXT:    [[TMP5:%.*]] = and i32 1, [[TMP4]]
// CHECK-NEXT:    store i32 [[TMP5]], i32* [[CARRY]], align 4
// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP6]]
//
int32x4_t test_vsbcq_s32(int32x4_t a, int32x4_t b, unsigned *carry) {
#ifdef POLYMORPHIC
  return vsbcq(a, b, carry);
#else  /* POLYMORPHIC */
  return vsbcq_s32(a, b, carry);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbcq_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4
// CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 29
// CHECK-NEXT:    [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]])
// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1
// CHECK-NEXT:    [[TMP4:%.*]] = lshr i32 [[TMP3]], 29
// CHECK-NEXT:    [[TMP5:%.*]] = and i32 1, [[TMP4]]
// CHECK-NEXT:    store i32 [[TMP5]], i32* [[CARRY]], align 4
// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP6]]
//
uint32x4_t test_vsbcq_u32(uint32x4_t a, uint32x4_t b, unsigned *carry) {
#ifdef POLYMORPHIC
  return vsbcq(a, b, carry);
#else  /* POLYMORPHIC */
  return vsbcq_u32(a, b, carry);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbciq_m_s32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT:    [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1
// CHECK-NEXT:    [[TMP4:%.*]] = lshr i32 [[TMP3]], 29
// CHECK-NEXT:    [[TMP5:%.*]] = and i32 1, [[TMP4]]
// CHECK-NEXT:    store i32 [[TMP5]], i32* [[CARRY_OUT:%.*]], align 4
// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP6]]
//
int32x4_t test_vsbciq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry_out, mve_pred16_t p) {
#ifdef POLYMORPHIC
  return vsbciq_m(inactive, a, b, carry_out, p);
#else  /* POLYMORPHIC */
  return vsbciq_m_s32(inactive, a, b, carry_out, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbciq_m_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
// CHECK-NEXT:    [[TMP2:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, <4 x i1> [[TMP1]])
// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 1
// CHECK-NEXT:    [[TMP4:%.*]] = lshr i32 [[TMP3]], 29
// CHECK-NEXT:    [[TMP5:%.*]] = and i32 1, [[TMP4]]
// CHECK-NEXT:    store i32 [[TMP5]], i32* [[CARRY_OUT:%.*]], align 4
// CHECK-NEXT:    [[TMP6:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP2]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP6]]
//
uint32x4_t test_vsbciq_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry_out, mve_pred16_t p) {
#ifdef POLYMORPHIC
  return vsbciq_m(inactive, a, b, carry_out, p);
#else  /* POLYMORPHIC */
  return vsbciq_m_u32(inactive, a, b, carry_out, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbcq_m_s32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4
// CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 29
// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
// CHECK-NEXT:    [[TMP4:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]], <4 x i1> [[TMP3]])
// CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 1
// CHECK-NEXT:    [[TMP6:%.*]] = lshr i32 [[TMP5]], 29
// CHECK-NEXT:    [[TMP7:%.*]] = and i32 1, [[TMP6]]
// CHECK-NEXT:    store i32 [[TMP7]], i32* [[CARRY]], align 4
// CHECK-NEXT:    [[TMP8:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP8]]
//
int32x4_t test_vsbcq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, unsigned *carry, mve_pred16_t p) {
#ifdef POLYMORPHIC
  return vsbcq_m(inactive, a, b, carry, p);
#else  /* POLYMORPHIC */
  return vsbcq_m_s32(inactive, a, b, carry, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vsbcq_m_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CARRY:%.*]], align 4
// CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 29
// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
// CHECK-NEXT:    [[TMP4:%.*]] = call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 [[TMP1]], <4 x i1> [[TMP3]])
// CHECK-NEXT:    [[TMP5:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 1
// CHECK-NEXT:    [[TMP6:%.*]] = lshr i32 [[TMP5]], 29
// CHECK-NEXT:    [[TMP7:%.*]] = and i32 1, [[TMP6]]
// CHECK-NEXT:    store i32 [[TMP7]], i32* [[CARRY]], align 4
// CHECK-NEXT:    [[TMP8:%.*]] = extractvalue { <4 x i32>, i32 } [[TMP4]], 0
// CHECK-NEXT:    ret <4 x i32> [[TMP8]]
//
uint32x4_t test_vsbcq_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, unsigned *carry, mve_pred16_t p) {
#ifdef POLYMORPHIC
  return vsbcq_m(inactive, a, b, carry, p);
#else  /* POLYMORPHIC */
  return vsbcq_m_u32(inactive, a, b, carry, p);
#endif /* POLYMORPHIC */
}
+7 −0
Original line number Diff line number Diff line
@@ -1020,10 +1020,17 @@ def int_arm_mve_vabd: Intrinsic<
def int_arm_mve_vadc: Intrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vsbc: Intrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vadc_predicated: Intrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
    llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vsbc_predicated: Intrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
    llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vmulh: Intrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
+5 −0
Original line number Diff line number Diff line
@@ -4588,6 +4588,11 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
      SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true,
                        IntNo == Intrinsic::arm_mve_vadc_predicated);
      return;
    case Intrinsic::arm_mve_vsbc:
    case Intrinsic::arm_mve_vsbc_predicated:
      SelectMVE_VADCSBC(N, ARM::MVE_VSBC, ARM::MVE_VSBCI, true,
                        IntNo == Intrinsic::arm_mve_vsbc_predicated);
      return;

    case Intrinsic::arm_mve_vmlldava:
    case Intrinsic::arm_mve_vmlldava_predicated: {
+184 −0
Original line number Diff line number Diff line
@@ -96,3 +96,187 @@ entry:
  %8 = extractvalue { <4 x i32>, i32 } %4, 0
  ret <4 x i32> %8
}

declare { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32>, <4 x i32>, i32)

define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_s32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out) {
; CHECK-LABEL: test_vsbciq_s32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vsbci.i32 q0, q0, q1
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 0)
  %1 = extractvalue { <4 x i32>, i32 } %0, 1
  %2 = lshr i32 %1, 29
  %3 = and i32 %2, 1
  store i32 %3, i32* %carry_out, align 4
  %4 = extractvalue { <4 x i32>, i32 } %0, 0
  ret <4 x i32> %4
}

define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_u32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out) {
; CHECK-LABEL: test_vsbciq_u32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vsbci.i32 q0, q0, q1
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 0)
  %1 = extractvalue { <4 x i32>, i32 } %0, 1
  %2 = lshr i32 %1, 29
  %3 = and i32 %2, 1
  store i32 %3, i32* %carry_out, align 4
  %4 = extractvalue { <4 x i32>, i32 } %0, 0
  ret <4 x i32> %4
}

define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_s32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry) {
; CHECK-LABEL: test_vsbcq_s32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    ldr r1, [r0]
; CHECK-NEXT:    lsls r1, r1, #29
; CHECK-NEXT:    vmsr fpscr_nzcvqc, r1
; CHECK-NEXT:    vsbc.i32 q0, q0, q1
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = load i32, i32* %carry, align 4
  %1 = shl i32 %0, 29
  %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1)
  %3 = extractvalue { <4 x i32>, i32 } %2, 1
  %4 = lshr i32 %3, 29
  %5 = and i32 %4, 1
  store i32 %5, i32* %carry, align 4
  %6 = extractvalue { <4 x i32>, i32 } %2, 0
  ret <4 x i32> %6
}

define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_u32(<4 x i32> %a, <4 x i32> %b, i32* nocapture %carry) {
; CHECK-LABEL: test_vsbcq_u32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    ldr r1, [r0]
; CHECK-NEXT:    lsls r1, r1, #29
; CHECK-NEXT:    vmsr fpscr_nzcvqc, r1
; CHECK-NEXT:    vsbc.i32 q0, q0, q1
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = load i32, i32* %carry, align 4
  %1 = shl i32 %0, 29
  %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1)
  %3 = extractvalue { <4 x i32>, i32 } %2, 1
  %4 = lshr i32 %3, 29
  %5 = and i32 %4, 1
  store i32 %5, i32* %carry, align 4
  %6 = extractvalue { <4 x i32>, i32 } %2, 0
  ret <4 x i32> %6
}

declare { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, <4 x i1>)

define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out, i16 zeroext %p) {
; CHECK-LABEL: test_vsbciq_m_s32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vmsr p0, r1
; CHECK-NEXT:    vpst
; CHECK-NEXT:    vsbcit.i32 q0, q1, q2
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = zext i16 %p to i32
  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
  %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1)
  %3 = extractvalue { <4 x i32>, i32 } %2, 1
  %4 = lshr i32 %3, 29
  %5 = and i32 %4, 1
  store i32 %5, i32* %carry_out, align 4
  %6 = extractvalue { <4 x i32>, i32 } %2, 0
  ret <4 x i32> %6
}

define arm_aapcs_vfpcc <4 x i32> @test_vsbciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry_out, i16 zeroext %p) {
; CHECK-LABEL: test_vsbciq_m_u32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vmsr p0, r1
; CHECK-NEXT:    vpst
; CHECK-NEXT:    vsbcit.i32 q0, q1, q2
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = zext i16 %p to i32
  %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
  %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1)
  %3 = extractvalue { <4 x i32>, i32 } %2, 1
  %4 = lshr i32 %3, 29
  %5 = and i32 %4, 1
  store i32 %5, i32* %carry_out, align 4
  %6 = extractvalue { <4 x i32>, i32 } %2, 0
  ret <4 x i32> %6
}

define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry, i16 zeroext %p) {
; CHECK-LABEL: test_vsbcq_m_s32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    ldr r2, [r0]
; CHECK-NEXT:    vmsr p0, r1
; CHECK-NEXT:    lsls r1, r2, #29
; CHECK-NEXT:    vmsr fpscr_nzcvqc, r1
; CHECK-NEXT:    vpst
; CHECK-NEXT:    vsbct.i32 q0, q1, q2
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = load i32, i32* %carry, align 4
  %1 = shl i32 %0, 29
  %2 = zext i16 %p to i32
  %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
  %4 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 %1, <4 x i1> %3)
  %5 = extractvalue { <4 x i32>, i32 } %4, 1
  %6 = lshr i32 %5, 29
  %7 = and i32 %6, 1
  store i32 %7, i32* %carry, align 4
  %8 = extractvalue { <4 x i32>, i32 } %4, 0
  ret <4 x i32> %8
}

define arm_aapcs_vfpcc <4 x i32> @test_vsbcq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* nocapture %carry, i16 zeroext %p) {
; CHECK-LABEL: test_vsbcq_m_u32:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    ldr r2, [r0]
; CHECK-NEXT:    vmsr p0, r1
; CHECK-NEXT:    lsls r1, r2, #29
; CHECK-NEXT:    vmsr fpscr_nzcvqc, r1
; CHECK-NEXT:    vpst
; CHECK-NEXT:    vsbct.i32 q0, q1, q2
; CHECK-NEXT:    vmrs r1, fpscr_nzcvqc
; CHECK-NEXT:    ubfx r1, r1, #29, #1
; CHECK-NEXT:    str r1, [r0]
; CHECK-NEXT:    bx lr
entry:
  %0 = load i32, i32* %carry, align 4
  %1 = shl i32 %0, 29
  %2 = zext i16 %p to i32
  %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2)
  %4 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 %1, <4 x i1> %3)
  %5 = extractvalue { <4 x i32>, i32 } %4, 1
  %6 = lshr i32 %5, 29
  %7 = and i32 %6, 1
  store i32 %7, i32* %carry, align 4
  %8 = extractvalue { <4 x i32>, i32 } %4, 0
  ret <4 x i32> %8
}