Commit c41d2b5a authored by Andrzej Warzynski's avatar Andrzej Warzynski
Browse files

[AArch64][SVE2] Add intrinsics for binary narrowing operations

Summary:
The following intrinsics for binary narrowing add and sub operations are
added:
  * @llvm.aarch64.sve.addhnb
  * @llvm.aarch64.sve.addhnt
  * @llvm.aarch64.sve.raddhnb
  * @llvm.aarch64.sve.raddhnt
  * @llvm.aarch64.sve.subhnb
  * @llvm.aarch64.sve.subhnt
  * @llvm.aarch64.sve.rsubhnb
  * @llvm.aarch64.sve.rsubhnt

Reviewers: sdesmalen, rengolin, efriedma

Reviewed By: sdesmalen, efriedma

Subscribers: tschuett, kristof.beyls, hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D71424
parent 7f4f07dd
Loading
Loading
Loading
Loading
+26 −0
Original line number Diff line number Diff line
@@ -998,6 +998,17 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
                [LLVMSubdivide2VectorType<0>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;
  class SVE2_2VectorArg_Narrowing_Intrinsic
      : Intrinsic<
            [LLVMSubdivide2VectorType<0>],
            [llvm_anyvector_ty, LLVMMatchType<0>],
            [IntrNoMem]>;

  class SVE2_Merged2VectorArg_Narrowing_Intrinsic
      : Intrinsic<
            [LLVMSubdivide2VectorType<0>],
            [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
            [IntrNoMem]>;

  // NOTE: There is no relationship between these intrinsics beyond an attempt
  // to reuse currently identical class definitions.
@@ -1504,4 +1515,19 @@ def int_aarch64_sve_sqxtunb : SVE2_1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_sqxtunt : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_uqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_uqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;

//
// SVE2 - Binary narrowing DSP operations
//
def int_aarch64_sve_addhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_addhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

def int_aarch64_sve_raddhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_raddhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

def int_aarch64_sve_subhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_subhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

def int_aarch64_sve_rsubhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_rsubhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
}
+8 −8
Original line number Diff line number Diff line
@@ -1446,16 +1446,16 @@ let Predicates = [HasSVE2] in {
  defm UQRSHRNT_ZZI  : sve2_int_bin_shift_imm_right_narrow_top<0b111, "uqrshrnt">;

  // SVE2 integer add/subtract narrow high part (bottom)
  defm ADDHNB_ZZZ  : sve2_int_addsub_narrow_high_bottom<0b00, "addhnb">;
  defm RADDHNB_ZZZ : sve2_int_addsub_narrow_high_bottom<0b01, "raddhnb">;
  defm SUBHNB_ZZZ  : sve2_int_addsub_narrow_high_bottom<0b10, "subhnb">;
  defm RSUBHNB_ZZZ : sve2_int_addsub_narrow_high_bottom<0b11, "rsubhnb">;
  defm ADDHNB_ZZZ  : sve2_int_addsub_narrow_high_bottom<0b00, "addhnb",  int_aarch64_sve_addhnb>;
  defm RADDHNB_ZZZ : sve2_int_addsub_narrow_high_bottom<0b01, "raddhnb", int_aarch64_sve_raddhnb>;
  defm SUBHNB_ZZZ  : sve2_int_addsub_narrow_high_bottom<0b10, "subhnb",  int_aarch64_sve_subhnb>;
  defm RSUBHNB_ZZZ : sve2_int_addsub_narrow_high_bottom<0b11, "rsubhnb", int_aarch64_sve_rsubhnb>;

  // SVE2 integer add/subtract narrow high part (top)
  defm ADDHNT_ZZZ  : sve2_int_addsub_narrow_high_top<0b00, "addhnt">;
  defm RADDHNT_ZZZ : sve2_int_addsub_narrow_high_top<0b01, "raddhnt">;
  defm SUBHNT_ZZZ  : sve2_int_addsub_narrow_high_top<0b10, "subhnt">;
  defm RSUBHNT_ZZZ : sve2_int_addsub_narrow_high_top<0b11, "rsubhnt">;
  defm ADDHNT_ZZZ  : sve2_int_addsub_narrow_high_top<0b00, "addhnt",  int_aarch64_sve_addhnt>;
  defm RADDHNT_ZZZ : sve2_int_addsub_narrow_high_top<0b01, "raddhnt", int_aarch64_sve_raddhnt>;
  defm SUBHNT_ZZZ  : sve2_int_addsub_narrow_high_top<0b10, "subhnt",  int_aarch64_sve_subhnt>;
  defm RSUBHNT_ZZZ : sve2_int_addsub_narrow_high_top<0b11, "rsubhnt", int_aarch64_sve_rsubhnt>;

  // SVE2 saturating extract narrow (bottom)
  defm SQXTNB_ZZ  : sve2_int_sat_extract_narrow_bottom<0b00, "sqxtnb",  int_aarch64_sve_sqxtnb>;
+12 −2
Original line number Diff line number Diff line
@@ -3005,10 +3005,15 @@ class sve2_int_addsub_narrow_high_bottom<bits<2> sz, bits<2> opc, string asm,
  let Inst{4-0}   = Zd;
}

multiclass sve2_int_addsub_narrow_high_bottom<bits<2> opc, string asm> {
multiclass sve2_int_addsub_narrow_high_bottom<bits<2> opc, string asm,
                                              SDPatternOperator op> {
  def _B : sve2_int_addsub_narrow_high_bottom<0b01, opc, asm, ZPR8, ZPR16>;
  def _H : sve2_int_addsub_narrow_high_bottom<0b10, opc, asm, ZPR16, ZPR32>;
  def _S : sve2_int_addsub_narrow_high_bottom<0b11, opc, asm, ZPR32, ZPR64>;

  def : SVE_2_Op_Pat<nxv16i8, op, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _B)>;
  def : SVE_2_Op_Pat<nxv8i16, op, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _H)>;
  def : SVE_2_Op_Pat<nxv4i32, op, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _S)>;
}

class sve2_int_addsub_narrow_high_top<bits<2> sz, bits<2> opc, string asm,
@@ -3031,10 +3036,15 @@ class sve2_int_addsub_narrow_high_top<bits<2> sz, bits<2> opc, string asm,
  let Constraints = "$Zd = $_Zd";
}

multiclass sve2_int_addsub_narrow_high_top<bits<2> opc, string asm> {
multiclass sve2_int_addsub_narrow_high_top<bits<2> opc, string asm,
                                           SDPatternOperator op> {
  def _B : sve2_int_addsub_narrow_high_top<0b01, opc, asm, ZPR8, ZPR16>;
  def _H : sve2_int_addsub_narrow_high_top<0b10, opc, asm, ZPR16, ZPR32>;
  def _S : sve2_int_addsub_narrow_high_top<0b11, opc, asm, ZPR32, ZPR64>;

  def : SVE_3_Op_Pat<nxv16i8, op, nxv16i8, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _B)>;
  def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv4i32, nxv4i32, !cast<Instruction>(NAME # _H)>;
  def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _S)>;
}

class sve2_int_sat_extract_narrow_bottom<bits<3> tsz8_64, bits<2> opc, string asm,
+278 −0
Original line number Diff line number Diff line
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s

; ADDHNB

define <vscale x 16 x i8> @addhnb_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: addhnb_h:
; CHECK: addhnb z0.b, z0.h, z1.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16> %a,
                                                                  <vscale x 8 x i16> %b)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @addhnb_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: addhnb_s:
; CHECK: addhnb z0.h, z0.s, z1.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32> %a,
                                                                  <vscale x 4 x i32> %b)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @addhnb_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: addhnb_d:
; CHECK: addhnb z0.s, z0.d, z1.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64> %a,
                                                                  <vscale x 2 x i64> %b)
  ret <vscale x 4 x i32> %out
}

; ADDHNT

define <vscale x 16 x i8> @addhnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: addhnt_h:
; CHECK: addhnt z0.b, z1.h, z2.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addhnt.nxv8i16(<vscale x 16 x i8> %a,
                                                                  <vscale x 8 x i16> %b,
                                                                  <vscale x 8 x i16> %c)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @addhnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: addhnt_s:
; CHECK: addhnt z0.h, z1.s, z2.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addhnt.nxv4i32(<vscale x 8 x i16> %a,
                                                                  <vscale x 4 x i32> %b,
                                                                  <vscale x 4 x i32> %c)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @addhnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: addhnt_d:
; CHECK: addhnt z0.s, z1.d, z2.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.addhnt.nxv2i64(<vscale x 4 x i32> %a,
                                                                  <vscale x 2 x i64> %b,
                                                                  <vscale x 2 x i64> %c)
  ret <vscale x 4 x i32> %out
}

; RADDHNB

define <vscale x 16 x i8> @raddhnb_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: raddhnb_h:
; CHECK: raddhnb z0.b, z0.h, z1.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.raddhnb.nxv8i16(<vscale x 8 x i16> %a,
                                                                   <vscale x 8 x i16> %b)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @raddhnb_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: raddhnb_s:
; CHECK: raddhnb z0.h, z0.s, z1.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.raddhnb.nxv4i32(<vscale x 4 x i32> %a,
                                                                   <vscale x 4 x i32> %b)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @raddhnb_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: raddhnb_d:
; CHECK: raddhnb z0.s, z0.d, z1.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.raddhnb.nxv2i64(<vscale x 2 x i64> %a,
                                                                   <vscale x 2 x i64> %b)
  ret <vscale x 4 x i32> %out
}

; RADDHNT

define <vscale x 16 x i8> @raddhnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: raddhnt_h:
; CHECK: raddhnt z0.b, z1.h, z2.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.raddhnt.nxv8i16(<vscale x 16 x i8> %a,
                                                                   <vscale x 8 x i16> %b,
                                                                   <vscale x 8 x i16> %c)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @raddhnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: raddhnt_s:
; CHECK: raddhnt z0.h, z1.s, z2.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.raddhnt.nxv4i32(<vscale x 8 x i16> %a,
                                                                   <vscale x 4 x i32> %b,
                                                                   <vscale x 4 x i32> %c)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @raddhnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: raddhnt_d:
; CHECK: raddhnt z0.s, z1.d, z2.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.raddhnt.nxv2i64(<vscale x 4 x i32> %a,
                                                                   <vscale x 2 x i64> %b,
                                                                   <vscale x 2 x i64> %c)
  ret <vscale x 4 x i32> %out
}

; RSUBHNB

define <vscale x 16 x i8> @rsubhnb_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: rsubhnb_h:
; CHECK: rsubhnb z0.b, z0.h, z1.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.rsubhnb.nxv8i16(<vscale x 8 x i16> %a,
                                                                   <vscale x 8 x i16> %b)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @rsubhnb_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: rsubhnb_s:
; CHECK: rsubhnb z0.h, z0.s, z1.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.rsubhnb.nxv4i32(<vscale x 4 x i32> %a,
                                                                   <vscale x 4 x i32> %b)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @rsubhnb_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: rsubhnb_d:
; CHECK: rsubhnb z0.s, z0.d, z1.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.rsubhnb.nxv2i64(<vscale x 2 x i64> %a,
                                                                   <vscale x 2 x i64> %b)
  ret <vscale x 4 x i32> %out
}

; RSUBHNT

define <vscale x 16 x i8> @rsubhnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: rsubhnt_h:
; CHECK: rsubhnt z0.b, z1.h, z2.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.rsubhnt.nxv8i16(<vscale x 16 x i8> %a,
                                                                   <vscale x 8 x i16> %b,
                                                                   <vscale x 8 x i16> %c)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @rsubhnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: rsubhnt_s:
; CHECK: rsubhnt z0.h, z1.s, z2.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.rsubhnt.nxv4i32(<vscale x 8 x i16> %a,
                                                                   <vscale x 4 x i32> %b,
                                                                   <vscale x 4 x i32> %c)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @rsubhnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: rsubhnt_d:
; CHECK: rsubhnt z0.s, z1.d, z2.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.rsubhnt.nxv2i64(<vscale x 4 x i32> %a,
                                                                   <vscale x 2 x i64> %b,
                                                                   <vscale x 2 x i64> %c)
  ret <vscale x 4 x i32> %out
}

; SUBHNB

define <vscale x 16 x i8> @subhnb_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; CHECK-LABEL: subhnb_h:
; CHECK: subhnb z0.b, z0.h, z1.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.subhnb.nxv8i16(<vscale x 8 x i16> %a,
                                                                  <vscale x 8 x i16> %b)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @subhnb_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
; CHECK-LABEL: subhnb_s:
; CHECK: subhnb z0.h, z0.s, z1.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.subhnb.nxv4i32(<vscale x 4 x i32> %a,
                                                                  <vscale x 4 x i32> %b)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @subhnb_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
; CHECK-LABEL: subhnb_d:
; CHECK: subhnb z0.s, z0.d, z1.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.subhnb.nxv2i64(<vscale x 2 x i64> %a,
                                                                  <vscale x 2 x i64> %b)
  ret <vscale x 4 x i32> %out
}

; SUBHNT

define <vscale x 16 x i8> @subhnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: subhnt_h:
; CHECK: subhnt z0.b, z1.h, z2.h
; CHECK-NEXT: ret
  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.subhnt.nxv8i16(<vscale x 16 x i8> %a,
                                                                  <vscale x 8 x i16> %b,
                                                                  <vscale x 8 x i16> %c)
  ret <vscale x 16 x i8> %out
}

define <vscale x 8 x i16> @subhnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: subhnt_s:
; CHECK: subhnt z0.h, z1.s, z2.s
; CHECK-NEXT: ret
  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.subhnt.nxv4i32(<vscale x 8 x i16> %a,
                                                                  <vscale x 4 x i32> %b,
                                                                  <vscale x 4 x i32> %c)
  ret <vscale x 8 x i16> %out
}

define <vscale x 4 x i32> @subhnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: subhnt_d:
; CHECK: subhnt z0.s, z1.d, z2.d
; CHECK-NEXT: ret
  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.subhnt.nxv2i64(<vscale x 4 x i32> %a,
                                                                  <vscale x 2 x i64> %b,
                                                                  <vscale x 2 x i64> %c)
  ret <vscale x 4 x i32> %out
}


declare <vscale x 16 x i8> @llvm.aarch64.sve.addhnb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.addhnb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.addhnb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.addhnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.addhnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.addhnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.raddhnb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.raddhnb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.raddhnb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.raddhnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.raddhnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.raddhnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.subhnb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.subhnb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.subhnb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.subhnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.subhnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.subhnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.rsubhnb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.rsubhnb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.rsubhnb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)

declare <vscale x 16 x i8> @llvm.aarch64.sve.rsubhnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 8 x i16> @llvm.aarch64.sve.rsubhnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 4 x i32> @llvm.aarch64.sve.rsubhnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, <vscale x 2 x i64>)