Commit fe3bb8ec authored by Kerry McLaughlin's avatar Kerry McLaughlin
Browse files

[AArch64][SVE] Add ImmArg property to intrinsics with immediates

Summary:
Several SVE intrinsics with immediate arguments (including those
added by D70253 & D70437) do not use the ImmArg property.
This patch adds ImmArg<Op> where required and changes
the appropriate patterns which match the immediates.

Reviewers: efriedma, sdesmalen, andwar, rengolin

Reviewed By: efriedma

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, cfe-commits, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D72612
parent 01ad4c83
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -794,7 +794,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                [LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem]>;
                [IntrNoMem, ImmArg<2>]>;

  class AdvSIMD_3VectorArgIndexed_Intrinsic
    : Intrinsic<[llvm_anyvector_ty],
@@ -802,7 +802,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem]>;
                [IntrNoMem, ImmArg<3>]>;

  class AdvSIMD_Pred1VectorArg_Intrinsic
    : Intrinsic<[llvm_anyvector_ty],
@@ -894,7 +894,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem]>;
                [IntrNoMem, ImmArg<2>]>;

  class AdvSIMD_SVE_ShiftWide_Intrinsic
    : Intrinsic<[llvm_anyvector_ty],
@@ -932,7 +932,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                 LLVMMatchType<0>,
                 llvm_i32_ty,
                 llvm_i32_ty],
                [IntrNoMem]>;
                [IntrNoMem, ImmArg<3>]>;

  class AdvSIMD_SVE_EXPA_Intrinsic
    : Intrinsic<[llvm_anyvector_ty],
@@ -1012,7 +1012,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                 LLVMSubdivide4VectorType<0>,
                 LLVMSubdivide4VectorType<0>,
                 llvm_i32_ty],
                [IntrNoMem]>;
                [IntrNoMem, ImmArg<3>]>;

  class AdvSIMD_SVE_PTEST_Intrinsic
    : Intrinsic<[llvm_i1_ty],
@@ -1039,7 +1039,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                 LLVMSubdivide2VectorType<0>,
                 LLVMSubdivide2VectorType<0>,
                 llvm_i32_ty],
                [IntrNoMem]>;
                [IntrNoMem, ImmArg<3>]>;

  class SVE2_1VectorArg_Narrowing_Intrinsic
    : Intrinsic<[LLVMSubdivide2VectorType<0>],
+51 −25
Original line number Diff line number Diff line
@@ -647,6 +647,13 @@ def tvecshiftR32 : Operand<i32>, TImmLeaf<i32, [{
  let DecoderMethod = "DecodeVecShiftR32Imm";
  let ParserMatchClass = Imm1_32Operand;
}
def tvecshiftR64 : Operand<i32>, TImmLeaf<i32, [{
  return (((uint32_t)Imm) > 0) && (((uint32_t)Imm) < 65);
}]> {
  let EncoderMethod = "getVecShiftR64OpValue";
  let DecoderMethod = "DecodeVecShiftR64Imm";
  let ParserMatchClass = Imm1_64Operand;
}

def Imm0_1Operand : AsmImmRange<0, 1>;
def Imm0_7Operand : AsmImmRange<0, 7>;
@@ -683,7 +690,6 @@ def vecshiftL64 : Operand<i32>, ImmLeaf<i32, [{
  let ParserMatchClass = Imm0_63Operand;
}


// Crazy immediate formats used by 32-bit and 64-bit logical immediate
// instructions for splatting repeating bit patterns across the immediate.
def logical_imm32_XFORM : SDNodeXForm<imm, [{
@@ -832,7 +838,7 @@ def imm0_7 : Operand<i64>, ImmLeaf<i64, [{
}

// imm32_0_7 predicate - True if the 32-bit immediate is in the range [0,7]
def imm32_0_7 : Operand<i32>, ImmLeaf<i32, [{
def imm32_0_7 : Operand<i32>, TImmLeaf<i32, [{
  return ((uint32_t)Imm) < 8;
}]> {
  let ParserMatchClass = Imm0_7Operand;
@@ -1091,29 +1097,44 @@ class AsmVectorIndex<int Min, int Max, string NamePrefix=""> : AsmOperandClass {
  let RenderMethod = "addVectorIndexOperands";
}

class AsmVectorIndexOpnd<ValueType ty, AsmOperandClass mc, code pred>
    : Operand<ty>, ImmLeaf<ty, pred> {
class AsmVectorIndexOpnd<ValueType ty, AsmOperandClass mc>
    : Operand<ty> {
  let ParserMatchClass = mc;
  let PrintMethod = "printVectorIndex";
}

multiclass VectorIndex<ValueType ty, AsmOperandClass mc, code pred> {
  def "" : AsmVectorIndexOpnd<ty, mc>, ImmLeaf<ty, pred>;
  def _timm : AsmVectorIndexOpnd<ty, mc>, TImmLeaf<ty, pred>;
}

def VectorIndex1Operand : AsmVectorIndex<1, 1>;
def VectorIndexBOperand : AsmVectorIndex<0, 15>;
def VectorIndexHOperand : AsmVectorIndex<0, 7>;
def VectorIndexSOperand : AsmVectorIndex<0, 3>;
def VectorIndexDOperand : AsmVectorIndex<0, 1>;

def VectorIndex1 : AsmVectorIndexOpnd<i64, VectorIndex1Operand, [{ return ((uint64_t)Imm) == 1; }]>;
def VectorIndexB : AsmVectorIndexOpnd<i64, VectorIndexBOperand, [{ return ((uint64_t)Imm) < 16; }]>;
def VectorIndexH : AsmVectorIndexOpnd<i64, VectorIndexHOperand, [{ return ((uint64_t)Imm) < 8; }]>;
def VectorIndexS : AsmVectorIndexOpnd<i64, VectorIndexSOperand, [{ return ((uint64_t)Imm) < 4; }]>;
def VectorIndexD : AsmVectorIndexOpnd<i64, VectorIndexDOperand, [{ return ((uint64_t)Imm) < 2; }]>;

def VectorIndex132b : AsmVectorIndexOpnd<i32, VectorIndex1Operand, [{ return ((uint64_t)Imm) == 1; }]>;
def VectorIndexB32b : AsmVectorIndexOpnd<i32, VectorIndexBOperand, [{ return ((uint64_t)Imm) < 16; }]>;
def VectorIndexH32b : AsmVectorIndexOpnd<i32, VectorIndexHOperand, [{ return ((uint64_t)Imm) < 8; }]>;
def VectorIndexS32b : AsmVectorIndexOpnd<i32, VectorIndexSOperand, [{ return ((uint64_t)Imm) < 4; }]>;
def VectorIndexD32b : AsmVectorIndexOpnd<i32, VectorIndexDOperand, [{ return ((uint64_t)Imm) < 2; }]>;
defm VectorIndex1 : VectorIndex<i64, VectorIndex1Operand,
                                [{ return ((uint64_t)Imm) == 1; }]>;
defm VectorIndexB : VectorIndex<i64, VectorIndexBOperand,
                                [{ return ((uint64_t)Imm) < 16; }]>;
defm VectorIndexH : VectorIndex<i64, VectorIndexHOperand,
                                [{ return ((uint64_t)Imm) < 8; }]>;
defm VectorIndexS : VectorIndex<i64, VectorIndexSOperand,
                                [{ return ((uint64_t)Imm) < 4; }]>;
defm VectorIndexD : VectorIndex<i64, VectorIndexDOperand,
                                [{ return ((uint64_t)Imm) < 2; }]>;

defm VectorIndex132b : VectorIndex<i32, VectorIndex1Operand,
                                   [{ return ((uint64_t)Imm) == 1; }]>;
defm VectorIndexB32b : VectorIndex<i32, VectorIndexBOperand,
                                   [{ return ((uint64_t)Imm) < 16; }]>;
defm VectorIndexH32b : VectorIndex<i32, VectorIndexHOperand,
                                   [{ return ((uint64_t)Imm) < 8; }]>;
defm VectorIndexS32b : VectorIndex<i32, VectorIndexSOperand,
                                   [{ return ((uint64_t)Imm) < 4; }]>;
defm VectorIndexD32b : VectorIndex<i32, VectorIndexDOperand,
                                   [{ return ((uint64_t)Imm) < 2; }]>;

def SVEVectorIndexExtDupBOperand : AsmVectorIndex<0, 63, "SVE">;
def SVEVectorIndexExtDupHOperand : AsmVectorIndex<0, 31, "SVE">;
@@ -1121,16 +1142,21 @@ def SVEVectorIndexExtDupSOperand : AsmVectorIndex<0, 15, "SVE">;
def SVEVectorIndexExtDupDOperand : AsmVectorIndex<0, 7, "SVE">;
def SVEVectorIndexExtDupQOperand : AsmVectorIndex<0, 3, "SVE">;

def sve_elm_idx_extdup_b
  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupBOperand, [{ return ((uint64_t)Imm) < 64; }]>;
def sve_elm_idx_extdup_h
  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupHOperand, [{ return ((uint64_t)Imm) < 32; }]>;
def sve_elm_idx_extdup_s
  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupSOperand, [{ return ((uint64_t)Imm) < 16; }]>;
def sve_elm_idx_extdup_d
  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupDOperand, [{ return ((uint64_t)Imm) < 8; }]>;
def sve_elm_idx_extdup_q
  : AsmVectorIndexOpnd<i64, SVEVectorIndexExtDupQOperand, [{ return ((uint64_t)Imm) < 4; }]>;
defm sve_elm_idx_extdup_b
  : VectorIndex<i64, SVEVectorIndexExtDupBOperand,
                [{ return ((uint64_t)Imm) < 64; }]>;
defm sve_elm_idx_extdup_h
  : VectorIndex<i64, SVEVectorIndexExtDupHOperand,
                [{ return ((uint64_t)Imm) < 32; }]>;
defm sve_elm_idx_extdup_s
  : VectorIndex<i64, SVEVectorIndexExtDupSOperand,
                [{ return ((uint64_t)Imm) < 16; }]>;
defm sve_elm_idx_extdup_d
  : VectorIndex<i64, SVEVectorIndexExtDupDOperand,
                [{ return ((uint64_t)Imm) < 8; }]>;
defm sve_elm_idx_extdup_q
  : VectorIndex<i64, SVEVectorIndexExtDupQOperand,
                [{ return ((uint64_t)Imm) < 4; }]>;

// 8-bit immediate for AdvSIMD where 64-bit values of the form:
// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
+27 −27
Original line number Diff line number Diff line
@@ -1646,12 +1646,12 @@ multiclass sve_fp_fma_by_indexed_elem<bit opc, string asm,
    let Inst{19-16} = Zm;
  }

  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, nxv8f16:$Op3, (i32 VectorIndexH32b:$idx))),
            (!cast<Instruction>(NAME # _H) $Op1, $Op2, $Op3, VectorIndexH32b:$idx)>;
  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, nxv4f32:$Op3, (i32 VectorIndexS32b:$idx))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, $Op3, VectorIndexS32b:$idx)>;
  def : Pat<(nxv2f64 (op nxv2f64:$Op1, nxv2f64:$Op2, nxv2f64:$Op3, (i32 VectorIndexD32b:$idx))),
            (!cast<Instruction>(NAME # _D) $Op1, $Op2, $Op3, VectorIndexD32b:$idx)>;
  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, nxv8f16:$Op3, (i32 VectorIndexH32b_timm:$idx))),
            (!cast<Instruction>(NAME # _H) $Op1, $Op2, $Op3, VectorIndexH32b_timm:$idx)>;
  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, nxv4f32:$Op3, (i32 VectorIndexS32b_timm:$idx))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, $Op3, VectorIndexS32b_timm:$idx)>;
  def : Pat<(nxv2f64 (op nxv2f64:$Op1, nxv2f64:$Op2, nxv2f64:$Op3, (i32 VectorIndexD32b_timm:$idx))),
            (!cast<Instruction>(NAME # _D) $Op1, $Op2, $Op3, VectorIndexD32b_timm:$idx)>;
}


@@ -1694,12 +1694,12 @@ multiclass sve_fp_fmul_by_indexed_elem<string asm, SDPatternOperator op> {
    let Inst{19-16} = Zm;
  }

  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, (i32 VectorIndexH32b:$idx))),
            (!cast<Instruction>(NAME # _H) $Op1, $Op2, VectorIndexH32b:$idx)>;
  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, (i32 VectorIndexS32b:$idx))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, VectorIndexS32b:$idx)>;
  def : Pat<(nxv2f64 (op nxv2f64:$Op1, nxv2f64:$Op2, (i32 VectorIndexD32b:$idx))),
            (!cast<Instruction>(NAME # _D) $Op1, $Op2, VectorIndexD32b:$idx)>;
  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, (i32 VectorIndexH32b_timm:$idx))),
            (!cast<Instruction>(NAME # _H) $Op1, $Op2, VectorIndexH32b_timm:$idx)>;
  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, (i32 VectorIndexS32b_timm:$idx))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, VectorIndexS32b_timm:$idx)>;
  def : Pat<(nxv2f64 (op nxv2f64:$Op1, nxv2f64:$Op2, (i32 VectorIndexD32b_timm:$idx))),
            (!cast<Instruction>(NAME # _D) $Op1, $Op2, VectorIndexD32b_timm:$idx)>;
}

//===----------------------------------------------------------------------===//
@@ -1785,10 +1785,10 @@ multiclass sve_fp_fcmla_by_indexed_elem<string asm, SDPatternOperator op> {
    let Inst{19-16} = Zm;
  }

  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, nxv8f16:$Op3, (i32 VectorIndexS32b:$idx), (i32 complexrotateop:$imm))),
            (!cast<Instruction>(NAME # _H) $Op1, $Op2, $Op3, VectorIndexS32b:$idx, complexrotateop:$imm)>;
  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, nxv4f32:$Op3, (i32 VectorIndexD32b:$idx), (i32 complexrotateop:$imm))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, $Op3, VectorIndexD32b:$idx, complexrotateop:$imm)>;
  def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, nxv8f16:$Op3, (i32 VectorIndexS32b_timm:$idx), (i32 complexrotateop:$imm))),
            (!cast<Instruction>(NAME # _H) $Op1, $Op2, $Op3, VectorIndexS32b_timm:$idx, complexrotateop:$imm)>;
  def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, nxv4f32:$Op3, (i32 VectorIndexD32b_timm:$idx), (i32 complexrotateop:$imm))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, $Op3, VectorIndexD32b_timm:$idx, complexrotateop:$imm)>;
}

//===----------------------------------------------------------------------===//
@@ -1949,7 +1949,7 @@ class sve2_fp_mla_long_by_indexed_elem<bits<2> opc, string asm>
multiclass sve2_fp_mla_long_by_indexed_elem<bits<2> opc, string asm,
                                            SDPatternOperator op> {
  def NAME : sve2_fp_mla_long_by_indexed_elem<opc, asm>;
  def : SVE_4_Op_Imm_Pat<nxv4f32, op, nxv4f32, nxv8f16, nxv8f16, i32, VectorIndexH32b, !cast<Instruction>(NAME)>;
  def : SVE_4_Op_Imm_Pat<nxv4f32, op, nxv4f32, nxv8f16, nxv8f16, i32, VectorIndexH32b_timm, !cast<Instruction>(NAME)>;
}

//===----------------------------------------------------------------------===//
@@ -2479,23 +2479,23 @@ class sve_intx_dot_by_indexed_elem<bit sz, bit U, string asm,

multiclass sve_intx_dot_by_indexed_elem<bit opc, string asm,
                                        SDPatternOperator op> {
  def _S : sve_intx_dot_by_indexed_elem<0b0, opc, asm, ZPR32, ZPR8, ZPR3b8, VectorIndexS32b> {
  def _S : sve_intx_dot_by_indexed_elem<0b0, opc, asm, ZPR32, ZPR8, ZPR3b8, VectorIndexS32b_timm> {
    bits<2> iop;
    bits<3> Zm;
    let Inst{20-19} = iop;
    let Inst{18-16} = Zm;
  }
  def _D : sve_intx_dot_by_indexed_elem<0b1, opc, asm, ZPR64, ZPR16, ZPR4b16, VectorIndexD32b> {
  def _D : sve_intx_dot_by_indexed_elem<0b1, opc, asm, ZPR64, ZPR16, ZPR4b16, VectorIndexD32b_timm> {
    bits<1> iop;
    bits<4> Zm;
    let Inst{20} = iop;
    let Inst{19-16} = Zm;
  }

  def : Pat<(nxv4i32 (op nxv4i32:$Op1, nxv16i8:$Op2, nxv16i8:$Op3, (i32 VectorIndexS32b:$idx))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, $Op3, VectorIndexS32b:$idx)>;
  def : Pat<(nxv2i64 (op nxv2i64:$Op1, nxv8i16:$Op2, nxv8i16:$Op3, (i32 VectorIndexD32b:$idx))),
            (!cast<Instruction>(NAME # _D) $Op1, $Op2, $Op3, VectorIndexD32b:$idx)>;
  def : Pat<(nxv4i32 (op nxv4i32:$Op1, nxv16i8:$Op2, nxv16i8:$Op3, (i32 VectorIndexS32b_timm:$idx))),
            (!cast<Instruction>(NAME # _S) $Op1, $Op2, $Op3, VectorIndexS32b_timm:$idx)>;
  def : Pat<(nxv2i64 (op nxv2i64:$Op1, nxv8i16:$Op2, nxv8i16:$Op3, (i32 VectorIndexD32b_timm:$idx))),
            (!cast<Instruction>(NAME # _D) $Op1, $Op2, $Op3, VectorIndexD32b_timm:$idx)>;
}

//===----------------------------------------------------------------------===//
@@ -4406,10 +4406,10 @@ multiclass sve_int_bin_pred_shift_imm_right<bits<4> opc, string asm,
    let Inst{9-8} = imm{4-3};
  }

  def : SVE_3_Op_Imm_Pat<nxv16i8, op, nxv16i1, nxv16i8, i32, vecshiftR8,  !cast<Instruction>(NAME # _B)>;
  def : SVE_3_Op_Imm_Pat<nxv8i16, op, nxv8i1,  nxv8i16, i32, vecshiftR16, !cast<Instruction>(NAME # _H)>;
  def : SVE_3_Op_Imm_Pat<nxv4i32, op, nxv4i1,  nxv4i32, i32, vecshiftR32, !cast<Instruction>(NAME # _S)>;
  def : SVE_3_Op_Imm_Pat<nxv2i64, op, nxv2i1,  nxv2i64, i32, vecshiftR64, !cast<Instruction>(NAME # _D)>;
  def : SVE_3_Op_Imm_Pat<nxv16i8, op, nxv16i1, nxv16i8, i32, tvecshiftR8,  !cast<Instruction>(NAME # _B)>;
  def : SVE_3_Op_Imm_Pat<nxv8i16, op, nxv8i1,  nxv8i16, i32, tvecshiftR16, !cast<Instruction>(NAME # _H)>;
  def : SVE_3_Op_Imm_Pat<nxv4i32, op, nxv4i1,  nxv4i32, i32, tvecshiftR32, !cast<Instruction>(NAME # _S)>;
  def : SVE_3_Op_Imm_Pat<nxv2i64, op, nxv2i1,  nxv2i64, i32, tvecshiftR64, !cast<Instruction>(NAME # _D)>;
}

class sve_int_bin_pred_shift<bits<2> sz8_64, bit wide, bits<3> opc,