Commit a048bf87 authored by Mark Murray's avatar Mark Murray
Browse files

[ARM][MVE][Intrinsics] Add MVE VAND/VORR/VORN/VEOR/VBIC intrinsics. Add unit tests.

Summary: Add MVE VAND/VORR/VORN/VEOR/VBIC intrinsics. Add unit tests.

Reviewers: simon_tatham, ostannard, dmgreen

Subscribers: kristof.beyls, hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D70547
parent e8a8dbe9
Loading
Loading
Loading
Loading
+41 −0
Original line number Diff line number Diff line
@@ -28,9 +28,23 @@ foreach n = [ 2, 4 ] in {
                              "Intrinsic::arm_mve_vld"#n#"q":$IRIntr)>;
}

multiclass bit_op_fp<IRBuilder bitop> {
def "": Intrinsic<Vector, (args Vector:$a, Vector:$b),
   (bitcast (bitop (bitcast $a, UVector), (bitcast $b, UVector)), Vector)>;
}

multiclass bit_op_fp_with_inv<IRBuilder bitop> {
def "": Intrinsic<Vector, (args Vector:$a, Vector:$b),
   (bitcast (bitop (bitcast $a, UVector), (not (bitcast $b, UVector))), Vector)>;
}

let params = T.Int in {
def vaddq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (add $a, $b)>;
def vandq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (and $a, $b)>;
def vbicq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (and $a, (not $b))>;
def veorq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (xor $a, $b)>;
def vornq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (or $a, (not $b))>;
def vorrq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (or $a, $b)>;
def vsubq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (sub $a, $b)>;
def vmulq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (mul $a, $b)>;
}
@@ -38,17 +52,39 @@ def vmulq: Intrinsic<Vector, (args Vector:$a, Vector:$b), (mul $a, $b)>;
let params = T.Float in {
def vaddqf: Intrinsic<Vector, (args Vector:$a, Vector:$b), (fadd $a, $b)>,
            NameOverride<"vaddq">;
defm vandqf: bit_op_fp<and>, NameOverride<"vandq">;
defm vbicqf: bit_op_fp_with_inv<and>, NameOverride<"vbicq">;
defm veorqf: bit_op_fp<xor>, NameOverride<"veorq">;
defm vornqf: bit_op_fp_with_inv<or>, NameOverride<"vornq">;
defm vorrqf: bit_op_fp<or>, NameOverride<"vorrq">;
def vsubqf: Intrinsic<Vector, (args Vector:$a, Vector:$b), (fsub $a, $b)>,
            NameOverride<"vsubq">;
def vmulqf: Intrinsic<Vector, (args Vector:$a, Vector:$b), (fmul $a, $b)>,
            NameOverride<"vmulq">;
}

// The bitcasting below is not overcomplicating the IR because while
// Vector and UVector may be different vector types at the C level i.e.
// vectors of same size signed/unsigned ints. Once they're lowered
// to IR, they are just bit vectors with no sign at all, so the
// bitcasts will be automatically elided by IRBuilder.
multiclass predicated_bit_op_fp<string int_op> {
def "": Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
                                Predicate:$pred),
    (bitcast (IRInt<int_op, [UVector, Predicate]>
                    (bitcast $a, UVector),
                    (bitcast $b, UVector),
                    $pred,
                    (bitcast $inactive, UVector)), Vector)>;
}

// Plain intrinsics
let params = T.Usual in {
def vabdq: Intrinsic<Vector, (args Vector:$a, Vector:$b),
                             (IRInt<"vabd", [Vector]> $a, $b)>;
}

// Predicated intrinsics
let params = T.Usual in {
def vabdq_m: Intrinsic<
    Vector, (args Vector:$inactive, Vector:$a, Vector:$b, Predicate:$pred),
@@ -62,6 +98,11 @@ def vsubq_m: Intrinsic<
def vmulq_m: Intrinsic<
    Vector, (args Vector:$inactive, Vector:$a, Vector:$b, Predicate:$pred),
    (IRInt<"mul_predicated", [Vector, Predicate]> $a, $b, $pred, $inactive)>;
defm vandq_m: predicated_bit_op_fp<"and_predicated">;
defm vbicq_m: predicated_bit_op_fp<"bic_predicated">;
defm veorq_m: predicated_bit_op_fp<"eor_predicated">;
defm vornq_m: predicated_bit_op_fp<"orn_predicated">;
defm vorrq_m: predicated_bit_op_fp<"orr_predicated">;
}

let params = T.Int in {
+2 −0
Original line number Diff line number Diff line
@@ -59,8 +59,10 @@ class CGHelperFn<string func> : IRBuilderBase {
}
def add: IRBuilder<"CreateAdd">;
def mul: IRBuilder<"CreateMul">;
def not: IRBuilder<"CreateNot">;
def or: IRBuilder<"CreateOr">;
def and: IRBuilder<"CreateAnd">;
def xor: IRBuilder<"CreateXor">;
def sub: IRBuilder<"CreateSub">;
def shl: IRBuilder<"CreateShl">;
def lshr: IRBuilder<"CreateLShr">;
+72 −0
Original line number Diff line number Diff line
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s

#include <arm_mve.h>

// CHECK-LABEL: @test_vandq_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = and <4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
//
uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b)
{
#ifdef POLYMORPHIC
    return vandq(a, b);
#else /* POLYMORPHIC */
    return vandq_u32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vandq_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
// CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP0]], [[TMP1]]
// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to <4 x float>
// CHECK-NEXT:    ret <4 x float> [[TMP3]]
//
float32x4_t test_vandq_f32(float32x4_t a, float32x4_t b)
{
#ifdef POLYMORPHIC
    return vandq(a, b);
#else /* POLYMORPHIC */
    return vandq_f32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vandq_m_s8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.and.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
//
int8x16_t test_vandq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
    return vandq_m(inactive, a, b, p);
#else /* POLYMORPHIC */
    return vandq_m_s8(inactive, a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vandq_m_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half> [[A:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half> [[INACTIVE:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP5:%.*]] = call <8 x i16> @llvm.arm.mve.and.predicated.v8i16.v8i1(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i1> [[TMP3]], <8 x i16> [[TMP4]])
// CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <8 x half>
// CHECK-NEXT:    ret <8 x half> [[TMP6]]
//
float16x8_t test_vandq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
    return vandq_m(inactive, a, b, p);
#else /* POLYMORPHIC */
    return vandq_m_f16(inactive, a, b, p);
#endif /* POLYMORPHIC */
}
+74 −0
Original line number Diff line number Diff line
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s

#include <arm_mve.h>

// CHECK-LABEL: @test_vbicq_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = xor <4 x i32> [[B:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-NEXT:    [[TMP1:%.*]] = and <4 x i32> [[A:%.*]], [[TMP0]]
// CHECK-NEXT:    ret <4 x i32> [[TMP1]]
//
uint32x4_t test_vbicq_u32(uint32x4_t a, uint32x4_t b)
{
#ifdef POLYMORPHIC
    return vbicq(a, b);
#else /* POLYMORPHIC */
    return vbicq_u32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vbicq_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
// CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 -1, i32 -1, i32 -1, i32 -1>
// CHECK-NEXT:    [[TMP3:%.*]] = and <4 x i32> [[TMP0]], [[TMP2]]
// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <4 x float>
// CHECK-NEXT:    ret <4 x float> [[TMP4]]
//
float32x4_t test_vbicq_f32(float32x4_t a, float32x4_t b)
{
#ifdef POLYMORPHIC
    return vbicq(a, b);
#else /* POLYMORPHIC */
    return vbicq_f32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vbicq_m_s8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.bic.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
//
int8x16_t test_vbicq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
    return vbicq_m(inactive, a, b, p);
#else /* POLYMORPHIC */
    return vbicq_m_s8(inactive, a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_vbicq_m_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half> [[A:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half> [[INACTIVE:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP5:%.*]] = call <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i1> [[TMP3]], <8 x i16> [[TMP4]])
// CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <8 x half>
// CHECK-NEXT:    ret <8 x half> [[TMP6]]
//
float16x8_t test_vbicq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
    return vbicq_m(inactive, a, b, p);
#else /* POLYMORPHIC */
    return vbicq_m_f16(inactive, a, b, p);
#endif /* POLYMORPHIC */
}
+72 −0
Original line number Diff line number Diff line
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s

#include <arm_mve.h>

// CHECK-LABEL: @test_veorq_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], [[B:%.*]]
// CHECK-NEXT:    ret <4 x i32> [[TMP0]]
//
uint32x4_t test_veorq_u32(uint32x4_t a, uint32x4_t b)
{
#ifdef POLYMORPHIC
    return veorq(a, b);
#else /* POLYMORPHIC */
    return veorq_u32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_veorq_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
// CHECK-NEXT:    [[TMP2:%.*]] = xor <4 x i32> [[TMP0]], [[TMP1]]
// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to <4 x float>
// CHECK-NEXT:    ret <4 x float> [[TMP3]]
//
float32x4_t test_veorq_f32(float32x4_t a, float32x4_t b)
{
#ifdef POLYMORPHIC
    return veorq(a, b);
#else /* POLYMORPHIC */
    return veorq_f32(a, b);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_veorq_m_s8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.eor.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
// CHECK-NEXT:    ret <16 x i8> [[TMP2]]
//
int8x16_t test_veorq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
    return veorq_m(inactive, a, b, p);
#else /* POLYMORPHIC */
    return veorq_m_s8(inactive, a, b, p);
#endif /* POLYMORPHIC */
}

// CHECK-LABEL: @test_veorq_m_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half> [[A:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
// CHECK-NEXT:    [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half> [[INACTIVE:%.*]] to <8 x i16>
// CHECK-NEXT:    [[TMP5:%.*]] = call <8 x i16> @llvm.arm.mve.eor.predicated.v8i16.v8i1(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i1> [[TMP3]], <8 x i16> [[TMP4]])
// CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <8 x half>
// CHECK-NEXT:    ret <8 x half> [[TMP6]]
//
float16x8_t test_veorq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
{
#ifdef POLYMORPHIC
    return veorq_m(inactive, a, b, p);
#else /* POLYMORPHIC */
    return veorq_m_f16(inactive, a, b, p);
#endif /* POLYMORPHIC */
}
Loading