Commit 8f7d3430 authored by Pavel Iliin's avatar Pavel Iliin
Browse files

[ARM][NFC] More detailed vbsl checks in ARM & Thumb2 tests.

parent a7e9c5a3
Loading
Loading
Loading
Loading
+74 −22
Original line number Diff line number Diff line
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -disable-post-ra -mtriple=armv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=SOFT
; RUN: llc < %s -disable-post-ra -mtriple=armv7-gnueabi -float-abi=hard -mcpu=cortex-a8 | FileCheck %s -check-prefix=HARD

; rdar://8984306
define float @test1(float %x, float %y) nounwind {
entry:
; SOFT-LABEL: test1:
; SOFT: lsr r1, r1, #31
; SOFT: bfi r0, r1, #31, #1

; SOFT:       @ %bb.0: @ %entry
; SOFT-NEXT:    lsr r1, r1, #31
; SOFT-NEXT:    bfi r0, r1, #31, #1
; SOFT-NEXT:    bx lr
;
; HARD-LABEL: test1:
; HARD: vmov.i32 [[REG1:(d[0-9]+)]], #0x80000000
; HARD: vbsl [[REG1]], d
; HARD:       @ %bb.0: @ %entry
; HARD-NEXT:    vmov.f32 s4, s1
; HARD-NEXT:    @ kill: def $s0 killed $s0 def $d0
; HARD-NEXT:    vmov.i32 d1, #0x80000000
; HARD-NEXT:    vbsl d1, d2, d0
; HARD-NEXT:    vmov.f32 s0, s2
; HARD-NEXT:    bx lr
entry:

  %0 = tail call float @copysignf(float %x, float %y) nounwind readnone
  ret float %0
}

define double @test2(double %x, double %y) nounwind {
entry:
; SOFT-LABEL: test2:
; SOFT: lsr r2, r3, #31
; SOFT: bfi r1, r2, #31, #1

; SOFT:       @ %bb.0: @ %entry
; SOFT-NEXT:    lsr r2, r3, #31
; SOFT-NEXT:    bfi r1, r2, #31, #1
; SOFT-NEXT:    bx lr
;
; HARD-LABEL: test2:
; HARD: vmov.i32 [[REG2:(d[0-9]+)]], #0x80000000
; HARD: vshl.i64 [[REG2]], [[REG2]], #32
; HARD: vbsl [[REG2]], d1, d0
; HARD:       @ %bb.0: @ %entry
; HARD-NEXT:    vmov.i32 d16, #0x80000000
; HARD-NEXT:    vshl.i64 d16, d16, #32
; HARD-NEXT:    vbsl d16, d1, d0
; HARD-NEXT:    vorr d0, d16, d16
; HARD-NEXT:    bx lr
entry:

  %0 = tail call double @copysign(double %x, double %y) nounwind readnone
  ret double %0
}

define double @test3(double %x, double %y, double %z) nounwind {
entry:
; SOFT-LABEL: test3:
; SOFT: vmov.i32 [[REG3:(d[0-9]+)]], #0x80000000
; SOFT: vshl.i64 [[REG3]], [[REG3]], #32
; SOFT: vbsl [[REG3]],
; SOFT:       @ %bb.0: @ %entry
; SOFT-NEXT:    vmov d16, r2, r3
; SOFT-NEXT:    vmov d17, r0, r1
; SOFT-NEXT:    vmul.f64 d16, d17, d16
; SOFT-NEXT:    vmov.i32 d17, #0x80000000
; SOFT-NEXT:    vshl.i64 d17, d17, #32
; SOFT-NEXT:    vldr d18, [sp]
; SOFT-NEXT:    vbsl d17, d18, d16
; SOFT-NEXT:    vmov r0, r1, d17
; SOFT-NEXT:    bx lr
;
; HARD-LABEL: test3:
; HARD:       @ %bb.0: @ %entry
; HARD-NEXT:    vmul.f64 d16, d0, d1
; HARD-NEXT:    vmov.i32 d17, #0x80000000
; HARD-NEXT:    vshl.i64 d0, d17, #32
; HARD-NEXT:    vbsl d0, d2, d16
; HARD-NEXT:    bx lr
entry:
  %0 = fmul double %x, %y
  %1 = tail call double @copysign(double %0, double %z) nounwind readnone
  ret double %1
@@ -42,12 +72,34 @@ entry:

; rdar://9287902
define float @test4() nounwind {
entry:
; SOFT-LABEL: test4:
; SOFT: vmov [[REG7:(d[0-9]+)]], r0, r1
; SOFT: vmov.i32 [[REG6:(d[0-9]+)]], #0x80000000
; SOFT: vshr.u64 [[REG7]], [[REG7]], #32
; SOFT: vbsl [[REG6]], [[REG7]], 
; SOFT:       @ %bb.0: @ %entry
; SOFT-NEXT:    push {lr}
; SOFT-NEXT:    bl _bar
; SOFT-NEXT:    vmov d16, r0, r1
; SOFT-NEXT:    vcvt.f32.f64 s0, d16
; SOFT-NEXT:    vmov.i32 d17, #0x80000000
; SOFT-NEXT:    vshr.u64 d16, d16, #32
; SOFT-NEXT:    vmov.f32 d18, #5.000000e-01
; SOFT-NEXT:    vbsl d17, d16, d18
; SOFT-NEXT:    vadd.f32 d0, d0, d17
; SOFT-NEXT:    vmov r0, s0
; SOFT-NEXT:    pop {lr}
;
; HARD-LABEL: test4:
; HARD:       @ %bb.0: @ %entry
; HARD-NEXT:    .save {r11, lr}
; HARD-NEXT:    push {r11, lr}
; HARD-NEXT:    bl bar
; HARD-NEXT:    vmov d16, r0, r1
; HARD-NEXT:    vcvt.f32.f64 s0, d16
; HARD-NEXT:    vmov.i32 d1, #0x80000000
; HARD-NEXT:    vshr.u64 d16, d16, #32
; HARD-NEXT:    vmov.f32 s4, #5.000000e-01
; HARD-NEXT:    vbsl d1, d16, d2
; HARD-NEXT:    vadd.f32 s0, s0, s2
; HARD-NEXT:    pop {r11, pc}
entry:
  %0 = tail call double (...) @bar() nounwind
  %1 = fptrunc double %0 to float
  %2 = tail call float @copysignf(float 5.000000e-01, float %1) nounwind readnone
+36 −10
Original line number Diff line number Diff line
@@ -424,7 +424,7 @@ declare half @llvm.fmuladd.f16(half %a, half %b, half %c) #0
; CHECK-FP16: vsqrt.f32
; CHECK-FP16: vcvtb.f16.f32
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-VFP-LIBCALL: vsqrt.f32
; CHECK-LIBCALL-VFP: vsqrt.f32
; CHECK-NOVFP: bl sqrtf
; CHECK-LIBCALL: bl __aeabi_f2h
define void @test_sqrt(half* %p) #0 {
@@ -700,18 +700,44 @@ define void @test_maximum(half* %p) #0 {
}

; CHECK-FP16-LABEL: test_copysign:
; CHECK-FP16: vcvtb.f32.f16
; CHECK-FP16: vcvtb.f32.f16
; CHECK-FP16: vbsl
; CHECK-FP16: vcvtb.f16.f32
; CHECK-FP16:         ldrh r2, [r0]
; CHECK-FP16-NEXT:    vmov.i32 d0, #0x80000000
; CHECK-FP16-NEXT:    ldrh r1, [r1]
; CHECK-FP16-NEXT:    vmov s2, r2
; CHECK-FP16-NEXT:    vmov s4, r1
; CHECK-FP16-NEXT:    vcvtb.f32.f16 s2, s2
; CHECK-FP16-NEXT:    vcvtb.f32.f16 s4, s4
; CHECK-FP16-NEXT:    vbsl d0, d2, d1
; CHECK-FP16-NEXT:    vcvtb.f16.f32 s0, s0
; CHECK-FP16-NEXT:    vmov r1, s0
; CHECK-FP16-NEXT:    strh r1, [r0]
; CHECK-FP16-NEXT:    bx lr

; CHECK-LIBCALL-LABEL: test_copysign:
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-VFP-LIBCALL: vbsl
; CHECK-LIBCALL-VFP:         .fnstart
; CHECK-LIBCALL-VFP-NEXT:    .save {r4, r5, r11, lr}
; CHECK-LIBCALL-VFP-NEXT:    push {r4, r5, r11, lr}
; CHECK-LIBCALL-VFP-NEXT:    .vsave {d8, d9}
; CHECK-LIBCALL-VFP-NEXT:    vpush {d8, d9}
; CHECK-LIBCALL-VFP-NEXT:    mov r5, r0
; CHECK-LIBCALL-VFP-NEXT:    ldrh r0, [r0]
; CHECK-LIBCALL-VFP-NEXT:    mov r4, r1
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL-VFP:         ldrh r1, [r4]
; CHECK-LIBCALL-VFP-NEXT:    vmov s18, r0
; CHECK-LIBCALL-VFP-NEXT:    vmov.i32 d8, #0x80000000
; CHECK-LIBCALL-VFP-NEXT:    mov r0, r1
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL-VFP:         vmov s0, r0
; CHECK-LIBCALL-VFP-NEXT:    vbsl d8, d0, d9
; CHECK-LIBCALL-VFP-NEXT:    vmov r0, s16
; CHECK-LIBCALL: bl __aeabi_f2h
; CHECK-LIBCALL-VFP:         strh r0, [r5]
; CHECK-LIBCALL-VFP-NEXT:    vpop {d8, d9}
; CHECK-LIBCALL-VFP-NEXT:    pop {r4, r5, r11, pc}
; CHECK-NOVFP: and
; CHECK-NOVFP: bic
; CHECK-NOVFP: orr
; CHECK-LIBCALL: bl __aeabi_f2h
define void @test_copysign(half* %p, half* %q) #0 {
  %a = load half, half* %p, align 2
  %b = load half, half* %q, align 2
@@ -820,7 +846,7 @@ define void @test_round(half* %p) {
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-VFP-LIBCALL: vmla.f32
; CHECK-LIBCALL-VFP: vmla.f32
; CHECK-NOVFP: bl __aeabi_fmul
; CHECK-LIBCALL: bl __aeabi_f2h
define void @test_fmuladd(half* %p, half* %q, half* %r) #0 {
+70 −34
Original line number Diff line number Diff line
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+neon | FileCheck %s

define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
; CHECK-LABEL: v_bsli8:
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmov.i8 d16, #0x3
; CHECK-NEXT:    vldr d17, [r2]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d16, d18, d17
; CHECK-NEXT:    vmov r0, r1, d16
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <8 x i8>, <8 x i8>* %A
	%tmp2 = load <8 x i8>, <8 x i8>* %B
	%tmp3 = load <8 x i8>, <8 x i8>* %C
@@ -16,9 +21,13 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {

define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
; CHECK-LABEL: v_bsli16:
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmov.i16 d16, #0x3
; CHECK-NEXT:    vldr d17, [r2]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d16, d18, d17
; CHECK-NEXT:    vmov r0, r1, d16
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <4 x i16>, <4 x i16>* %A
	%tmp2 = load <4 x i16>, <4 x i16>* %B
	%tmp3 = load <4 x i16>, <4 x i16>* %C
@@ -30,9 +39,13 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind

define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
; CHECK-LABEL: v_bsli32:
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vmov.i32 d16, #0x3
; CHECK-NEXT:    vldr d17, [r2]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d16, d18, d17
; CHECK-NEXT:    vmov r0, r1, d16
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <2 x i32>, <2 x i32>* %A
	%tmp2 = load <2 x i32>, <2 x i32>* %B
	%tmp3 = load <2 x i32>, <2 x i32>* %C
@@ -44,10 +57,13 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind

define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
; CHECK-LABEL: v_bsli64:
;CHECK: vldr
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d17, [r2]
; CHECK-NEXT:    vldr d16, LCPI3_0
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d16, d18, d17
; CHECK-NEXT:    vmov r0, r1, d16
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <1 x i64>, <1 x i64>* %A
	%tmp2 = load <1 x i64>, <1 x i64>* %B
	%tmp3 = load <1 x i64>, <1 x i64>* %C
@@ -59,9 +75,14 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind

define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
; CHECK-LABEL: v_bslQi8:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
; CHECK-NEXT:    vmov.i8 q9, #0x3
; CHECK-NEXT:    vld1.32 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <16 x i8>, <16 x i8>* %A
	%tmp2 = load <16 x i8>, <16 x i8>* %B
	%tmp3 = load <16 x i8>, <16 x i8>* %C
@@ -73,9 +94,14 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind

define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
; CHECK-LABEL: v_bslQi16:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
; CHECK-NEXT:    vmov.i16 q9, #0x3
; CHECK-NEXT:    vld1.32 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <8 x i16>, <8 x i16>* %A
	%tmp2 = load <8 x i16>, <8 x i16>* %B
	%tmp3 = load <8 x i16>, <8 x i16>* %C
@@ -87,9 +113,14 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin

define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
; CHECK-LABEL: v_bslQi32:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
; CHECK-NEXT:    vmov.i32 q9, #0x3
; CHECK-NEXT:    vld1.32 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <4 x i32>, <4 x i32>* %A
	%tmp2 = load <4 x i32>, <4 x i32>* %B
	%tmp3 = load <4 x i32>, <4 x i32>* %C
@@ -101,10 +132,15 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin

define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
; CHECK-LABEL: v_bslQi64:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vld1.64
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.32 {d16, d17}, [r2]
; CHECK-NEXT:    vld1.32 {d18, d19}, [r0]
; CHECK-NEXT:    adr r0, LCPI7_0
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0:128]
; CHECK-NEXT:    vbsl q10, q9, q8
; CHECK-NEXT:    vmov r0, r1, d20
; CHECK-NEXT:    vmov r2, r3, d21
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <2 x i64>, <2 x i64>* %A
	%tmp2 = load <2 x i64>, <2 x i64>* %B
	%tmp3 = load <2 x i64>, <2 x i64>* %C
+177 −28
Original line number Diff line number Diff line
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s

; rdar://12471808

define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
; CHECK-LABEL: v_bsli8:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [r2]
; CHECK-NEXT:    vldr d17, [r1]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <8 x i8>, <8 x i8>* %A
	%tmp2 = load <8 x i8>, <8 x i8>* %B
	%tmp3 = load <8 x i8>, <8 x i8>* %C
@@ -17,7 +24,13 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {

define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
; CHECK-LABEL: v_bsli16:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [r2]
; CHECK-NEXT:    vldr d17, [r1]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <4 x i16>, <4 x i16>* %A
	%tmp2 = load <4 x i16>, <4 x i16>* %B
	%tmp3 = load <4 x i16>, <4 x i16>* %C
@@ -30,7 +43,13 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind

define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
; CHECK-LABEL: v_bsli32:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [r2]
; CHECK-NEXT:    vldr d17, [r1]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <2 x i32>, <2 x i32>* %A
	%tmp2 = load <2 x i32>, <2 x i32>* %B
	%tmp3 = load <2 x i32>, <2 x i32>* %C
@@ -43,7 +62,13 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind

define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
; CHECK-LABEL: v_bsli64:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [r2]
; CHECK-NEXT:    vldr d17, [r1]
; CHECK-NEXT:    vldr d18, [r0]
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <1 x i64>, <1 x i64>* %A
	%tmp2 = load <1 x i64>, <1 x i64>* %B
	%tmp3 = load <1 x i64>, <1 x i64>* %C
@@ -56,7 +81,14 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind

define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
; CHECK-LABEL: v_bslQi8:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q10, q9, q8
; CHECK-NEXT:    vmov r0, r1, d20
; CHECK-NEXT:    vmov r2, r3, d21
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <16 x i8>, <16 x i8>* %A
	%tmp2 = load <16 x i8>, <16 x i8>* %B
	%tmp3 = load <16 x i8>, <16 x i8>* %C
@@ -69,7 +101,14 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind

define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
; CHECK-LABEL: v_bslQi16:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q10, q9, q8
; CHECK-NEXT:    vmov r0, r1, d20
; CHECK-NEXT:    vmov r2, r3, d21
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <8 x i16>, <8 x i16>* %A
	%tmp2 = load <8 x i16>, <8 x i16>* %B
	%tmp3 = load <8 x i16>, <8 x i16>* %C
@@ -82,7 +121,14 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin

define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
; CHECK-LABEL: v_bslQi32:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q10, q9, q8
; CHECK-NEXT:    vmov r0, r1, d20
; CHECK-NEXT:    vmov r2, r3, d21
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <4 x i32>, <4 x i32>* %A
	%tmp2 = load <4 x i32>, <4 x i32>* %B
	%tmp3 = load <4 x i32>, <4 x i32>* %C
@@ -95,7 +141,14 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin

define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
; CHECK-LABEL: v_bslQi64:
;CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vld1.64 {d16, d17}, [r2]
; CHECK-NEXT:    vld1.64 {d18, d19}, [r1]
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q10, q9, q8
; CHECK-NEXT:    vmov r0, r1, d20
; CHECK-NEXT:    vmov r2, r3, d21
; CHECK-NEXT:    mov pc, lr
	%tmp1 = load <2 x i64>, <2 x i64>* %A
	%tmp2 = load <2 x i64>, <2 x i64>* %B
	%tmp3 = load <2 x i64>, <2 x i64>* %C
@@ -108,84 +161,180 @@ define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwin

define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f1:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [sp]
; CHECK-NEXT:    vmov d17, r2, r3
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
  %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind
  ret <8 x i8> %vbsl.i
}

define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f2:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [sp]
; CHECK-NEXT:    vmov d17, r2, r3
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind
  ret <4 x i16> %vbsl3.i
}

define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f3:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [sp]
; CHECK-NEXT:    vmov d17, r2, r3
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind
  ret <2 x i32> %vbsl3.i
}

define <2 x float> @f4(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: f4:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [sp]
; CHECK-NEXT:    vmov d17, r2, r3
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
  %vbsl4.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind
  ret <2 x float> %vbsl4.i
}

define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g1:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    add r12, sp, #16
; CHECK-NEXT:    vmov d19, r2, r3
; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    mov r0, sp
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
  %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind
  ret <16 x i8> %vbsl.i
}

define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g2:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    add r12, sp, #16
; CHECK-NEXT:    vmov d19, r2, r3
; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    mov r0, sp
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind
  ret <8 x i16> %vbsl3.i
}

define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g3:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    add r12, sp, #16
; CHECK-NEXT:    vmov d19, r2, r3
; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    mov r0, sp
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind
  ret <4 x i32> %vbsl3.i
}

define <4 x float> @g4(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: g4:
; CHECK: vbsl
; CHECK:       @ %bb.0:
; CHECK-NEXT:    add r12, sp, #16
; CHECK-NEXT:    vmov d19, r2, r3
; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    mov r0, sp
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
  %vbsl4.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind
  ret <4 x float> %vbsl4.i
}

define <1 x i64> @test_vbsl_s64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbsl_s64:
; CHECK: vbsl d
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [sp]
; CHECK-NEXT:    vmov d17, r2, r3
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
  ret <1 x i64> %vbsl3.i
}

define <1 x i64> @test_vbsl_u64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbsl_u64:
; CHECK: vbsl d
; CHECK:       @ %bb.0:
; CHECK-NEXT:    vldr d16, [sp]
; CHECK-NEXT:    vmov d17, r2, r3
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    vbsl d18, d17, d16
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
  ret <1 x i64> %vbsl3.i
}

define <2 x i64> @test_vbslq_s64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbslq_s64:
; CHECK: vbsl q
; CHECK:       @ %bb.0:
; CHECK-NEXT:    add r12, sp, #16
; CHECK-NEXT:    vmov d19, r2, r3
; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    mov r0, sp
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
  ret <2 x i64> %vbsl3.i
}

define <2 x i64> @test_vbslq_u64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
; CHECK-LABEL: test_vbslq_u64:
; CHECK: vbsl q
; CHECK:       @ %bb.0:
; CHECK-NEXT:    add r12, sp, #16
; CHECK-NEXT:    vmov d19, r2, r3
; CHECK-NEXT:    vld1.64 {d16, d17}, [r12]
; CHECK-NEXT:    vmov d18, r0, r1
; CHECK-NEXT:    mov r0, sp
; CHECK-NEXT:    vld1.64 {d20, d21}, [r0]
; CHECK-NEXT:    vbsl q9, q10, q8
; CHECK-NEXT:    vmov r0, r1, d18
; CHECK-NEXT:    vmov r2, r3, d19
; CHECK-NEXT:    mov pc, lr
  %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
  ret <2 x i64> %vbsl3.i
}
+417 −17

File changed.

Preview size limit exceeded, changes collapsed.

Loading