Commit 85e42db1 authored by Craig Topper's avatar Craig Topper
Browse files

[RISCV] Merge some rvv intrinsic test cases that only differ by XLen type.

Instead of having a test for i32 XLen and i64 XLen, use sed to
replace iXLen with i32/i64 before running llc.

This change updates tests for intrinsics that operate exclusively
on mask values. It removes over 4000 lines worth of test content.
More merging will come in future changes.

Differential Revision: https://reviews.llvm.org/D117968
parent 3cf15af2
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare i64 @llvm.riscv.vcpop.i64.nxv1i1(
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv1i1(
<vscale x 1 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.i64.nxv2i1(
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv2i1(
<vscale x 2 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.i64.nxv4i1(
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv4i1(
<vscale x 4 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.i64.nxv8i1(
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv8i1(
<vscale x 8 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.i64.nxv16i1(
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv16i1(
<vscale x 16 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.i64.nxv32i1(
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv32i1(
<vscale x 32 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.i64.nxv64i1(
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vcpop_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.i64.nxv64i1(
<vscale x 64 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vcpop_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret i64 %a
}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare i32 @llvm.riscv.vcpop.i32.nxv1i1(
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
; RUN: -verify-machineinstrs | FileCheck %s
declare iXLen @llvm.riscv.vcpop.iXLen.nxv1i1(
<vscale x 1 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv1i1:
define iXLen @intrinsic_vcpop_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.i32.nxv1i1(
%a = call iXLen @llvm.riscv.vcpop.iXLen.nxv1i1(
<vscale x 1 x i1> %0,
i32 %1)
iXLen %1)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv1i1:
define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
......@@ -33,39 +35,39 @@ define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
%a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
iXLen %2)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.i32.nxv2i1(
declare iXLen @llvm.riscv.vcpop.iXLen.nxv2i1(
<vscale x 2 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv2i1:
define iXLen @intrinsic_vcpop_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.i32.nxv2i1(
%a = call iXLen @llvm.riscv.vcpop.iXLen.nxv2i1(
<vscale x 2 x i1> %0,
i32 %1)
iXLen %1)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv2i1:
define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
......@@ -73,39 +75,39 @@ define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
%a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
iXLen %2)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.i32.nxv4i1(
declare iXLen @llvm.riscv.vcpop.iXLen.nxv4i1(
<vscale x 4 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv4i1:
define iXLen @intrinsic_vcpop_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.i32.nxv4i1(
%a = call iXLen @llvm.riscv.vcpop.iXLen.nxv4i1(
<vscale x 4 x i1> %0,
i32 %1)
iXLen %1)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv4i1:
define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
......@@ -113,39 +115,39 @@ define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
%a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
iXLen %2)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.i32.nxv8i1(
declare iXLen @llvm.riscv.vcpop.iXLen.nxv8i1(
<vscale x 8 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv8i1:
define iXLen @intrinsic_vcpop_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.i32.nxv8i1(
%a = call iXLen @llvm.riscv.vcpop.iXLen.nxv8i1(
<vscale x 8 x i1> %0,
i32 %1)
iXLen %1)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv8i1:
define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
......@@ -153,39 +155,39 @@ define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
%a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
iXLen %2)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.i32.nxv16i1(
declare iXLen @llvm.riscv.vcpop.iXLen.nxv16i1(
<vscale x 16 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv16i1:
define iXLen @intrinsic_vcpop_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.i32.nxv16i1(
%a = call iXLen @llvm.riscv.vcpop.iXLen.nxv16i1(
<vscale x 16 x i1> %0,
i32 %1)
iXLen %1)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv16i1:
define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
......@@ -193,39 +195,39 @@ define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
%a = call iXLen @llvm.riscv.vcpop.mask.iXLen.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
iXLen %2)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.i32.nxv32i1(
declare iXLen @llvm.riscv.vcpop.iXLen.nxv32i1(
<vscale x 32 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv32i1:
define iXLen @intrinsic_vcpop_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vcpop_m_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
%a = call i32 @llvm.riscv.vcpop.i32.nxv32i1(
%a = call iXLen @llvm.riscv.vcpop.iXLen.nxv32i1(
<vscale x 32 x i1> %0,
i32 %1)
iXLen %1)
ret i32 %a
ret iXLen %a
}
declare i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
declare iXLen @llvm.riscv.vcpop.mask.iXLen.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
iXLen);
define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv32i1:
define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
......@@ -233,39 +235,39 @@ define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x