Commit be6070c2 authored by Craig Topper's avatar Craig Topper
Browse files

[RISCV] Use FP ABI for some RVV intrinsic tests. NFC

Removes moves from GPR to FPR and improves f64 tests on RV32.

Differential Revision: https://reviews.llvm.org/D117969
parent 85e42db1
This diff is collapsed.
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh \
; RUN: -mattr=+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
; RUN: -target-abi=lp64d < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
......@@ -688,9 +688,8 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
......@@ -712,9 +711,8 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
......@@ -735,9 +733,8 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
......@@ -759,9 +756,8 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
......@@ -782,9 +778,8 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
......@@ -806,9 +801,8 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
......@@ -829,9 +823,8 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
......@@ -853,9 +846,8 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
......@@ -876,9 +868,8 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
......@@ -900,9 +891,8 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
......@@ -923,9 +913,8 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
......@@ -947,9 +936,8 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
......@@ -970,9 +958,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
......@@ -994,9 +981,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
......@@ -1017,9 +1003,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
......@@ -1041,9 +1026,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
......@@ -1064,9 +1048,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
......@@ -1088,9 +1071,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
......@@ -1111,9 +1093,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
......@@ -1135,9 +1116,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
......@@ -1158,9 +1138,8 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
......@@ -1182,9 +1161,8 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
......@@ -1205,9 +1183,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
......@@ -1229,9 +1206,8 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
......@@ -1252,9 +1228,8 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
......@@ -1276,9 +1251,8 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v10, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
......@@ -1299,9 +1273,8 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
......@@ -1323,9 +1296,8 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v12, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
......@@ -1346,9 +1318,8 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
......@@ -1370,9 +1341,8 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v16, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
......
This diff is collapsed.
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \
; RUN: < %s | FileCheck %s
; RUN: -target-abi=lp64d < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
......@@ -687,9 +687,8 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfdiv_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.f16(
......@@ -711,9 +710,8 @@ declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfdiv_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.f16(
......@@ -734,9 +732,8 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfdiv_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfdiv.nxv2f16.f16(
......@@ -758,9 +755,8 @@ declare <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfdiv_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfdiv.mask.nxv2f16.f16(
......@@ -781,9 +777,8 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfdiv_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfdiv.nxv4f16.f16(
......@@ -805,9 +800,8 @@ declare <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfdiv_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfdiv.mask.nxv4f16.f16(
......@@ -828,9 +822,8 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfdiv_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfdiv.nxv8f16.f16(
......@@ -852,9 +845,8 @@ declare <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfdiv_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfdiv.mask.nxv8f16.f16(
......@@ -875,9 +867,8 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfdiv_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfdiv.nxv16f16.f16(
......@@ -899,9 +890,8 @@ declare <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfdiv_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfdiv.mask.nxv16f16.f16(
......@@ -922,9 +912,8 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfdiv_vf_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfdiv.nxv32f16.f16(
......@@ -946,9 +935,8 @@ declare <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfdiv_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v16, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v16, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfdiv.mask.nxv32f16.f16(
......@@ -969,9 +957,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfdiv_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32(
......@@ -993,9 +980,8 @@ declare <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfdiv_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32(
......@@ -1016,9 +1002,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfdiv_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32(
......@@ -1040,9 +1025,8 @@ declare <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfdiv_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v9, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32(
......@@ -1063,9 +1047,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfdiv_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32(
......@@ -1087,9 +1070,8 @@ declare <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfdiv_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v10, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v10, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32(
......@@ -1110,9 +1092,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfdiv_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, ft0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32(
......@@ -1134,9 +1115,8 @@ declare <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfdiv_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v12, ft0, v0.t
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vfdiv.vf v8, v12, fa0, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32(
......@@ -1157,9 +1137,8 @@ declare <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfdiv_vf_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {