Commit 7e9747b5 authored by Simon Pilgrim's avatar Simon Pilgrim
Browse files

[X86][F16C] Remove cvtph2ps intrinsics and use generic half2float conversion (PR37554)

This removes everything but int_x86_avx512_mask_vcvtph2ps_512 which provides the SAE variant, but even this can use the fpext generic if the rounding control is the default.

Differential Revision: https://reviews.llvm.org/D75162
parent 777e97cc
Loading
Loading
Loading
Loading
+48 −0
Original line number Diff line number Diff line
@@ -10327,6 +10327,46 @@ Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
  return EmitX86CpuIs(CPUStr);
}
// Convert F16 halfs to floats.
static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
                                       ArrayRef<Value *> Ops,
                                       llvm::Type *DstTy) {
  assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
         "Unknown cvtph2ps intrinsic");
  // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
  if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
    Intrinsic::ID IID = Intrinsic::x86_avx512_mask_vcvtph2ps_512;
    Function *F =
        CGF.CGM.getIntrinsic(IID, {DstTy, Ops[0]->getType(), Ops[1]->getType(),
                                   Ops[2]->getType(), Ops[3]->getType()});
    return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
  }
  unsigned NumDstElts = DstTy->getVectorNumElements();
  Value *Src = Ops[0];
  // Extract the subvector.
  if (NumDstElts != Src->getType()->getVectorNumElements()) {
    assert(NumDstElts == 4 && "Unexpected vector size");
    uint32_t ShuffleMask[4] = {0, 1, 2, 3};
    Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
                                          ShuffleMask);
  }
  // Bitcast from vXi16 to vXf16.
  llvm::Type *HalfTy = llvm::VectorType::get(
      llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
  Src = CGF.Builder.CreateBitCast(Src, HalfTy);
  // Perform the fp-extension.
  Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
  if (Ops.size() >= 3)
    Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
  return Res;
}
// Convert a BF16 to a float.
static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
                                        const CallExpr *E,
@@ -12531,6 +12571,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
  case X86::BI__builtin_ia32_cmpordsd:
    return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
  // f16c half2float intrinsics
  case X86::BI__builtin_ia32_vcvtph2ps:
  case X86::BI__builtin_ia32_vcvtph2ps256:
  case X86::BI__builtin_ia32_vcvtph2ps_mask:
  case X86::BI__builtin_ia32_vcvtph2ps256_mask:
  case X86::BI__builtin_ia32_vcvtph2ps512_mask:
    return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
// AVX512 bf16 intrinsics
  case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
    Ops[2] = getMaskVecValue(*this, Ops[2],
+14 −3
Original line number Diff line number Diff line
@@ -171,21 +171,32 @@ __m128 test_mm_maskz_sqrt_ss(__mmask8 __U, __m128 __A, __m128 __B){
__m512 test_mm512_cvtph_ps (__m256i __A)
{
  // COMMON-LABEL: test_mm512_cvtph_ps 
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.512
  // COMMONIR: bitcast <4 x i64> %{{.*}} to <16 x i16>
  // COMMONIR: bitcast <16 x i16> %{{.*}} to <16 x half>
  // UNCONSTRAINED: fpext <16 x half> %{{.*}} to <16 x float>
  // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %{{.*}}, metadata !"fpexcept.strict")
  return _mm512_cvtph_ps (__A);
}

__m512 test_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
{
  // COMMON-LABEL: test_mm512_mask_cvtph_ps 
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.512
  // COMMONIR: bitcast <4 x i64> %{{.*}} to <16 x i16>
  // COMMONIR: bitcast <16 x i16> %{{.*}} to <16 x half>
  // UNCONSTRAINED: fpext <16 x half> %{{.*}} to <16 x float>
  // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %{{.*}}, metadata !"fpexcept.strict")
  // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
  return _mm512_mask_cvtph_ps (__W,__U,__A);
}

__m512 test_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
{
  // COMMON-LABEL: test_mm512_maskz_cvtph_ps 
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.512
  // COMMONIR: bitcast <4 x i64> %{{.*}} to <16 x i16>
  // COMMONIR: bitcast <16 x i16> %{{.*}} to <16 x half>
  // UNCONSTRAINED: fpext <16 x half> %{{.*}} to <16 x float>
  // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %{{.*}}, metadata !"fpexcept.strict")
  // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
  return _mm512_maskz_cvtph_ps (__U,__A);
}
+11 −3
Original line number Diff line number Diff line
@@ -9463,21 +9463,29 @@ __m256 test_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
__m512 test_mm512_cvtph_ps (__m256i __A)
{
  // CHECK-LABEL: @test_mm512_cvtph_ps 
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.512
  // CHECK: bitcast <4 x i64> %{{.*}} to <16 x i16>
  // CHECK: bitcast <16 x i16> %{{.*}} to <16 x half>
  // CHECK: fpext <16 x half> %{{.*}} to <16 x float>
  return _mm512_cvtph_ps (__A);
}

__m512 test_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
{
  // CHECK-LABEL: @test_mm512_mask_cvtph_ps 
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.512
  // CHECK: bitcast <4 x i64> %{{.*}} to <16 x i16>
  // CHECK: bitcast <16 x i16> %{{.*}} to <16 x half>
  // CHECK: fpext <16 x half> %{{.*}} to <16 x float>
  // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
  return _mm512_mask_cvtph_ps (__W,__U,__A);
}

__m512 test_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
{
  // CHECK-LABEL: @test_mm512_maskz_cvtph_ps 
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.512
  // CHECK: bitcast <4 x i64> %{{.*}} to <16 x i16>
  // CHECK: bitcast <16 x i16> %{{.*}} to <16 x half>
  // CHECK: fpext <16 x half> %{{.*}} to <16 x float>
  // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
  return _mm512_maskz_cvtph_ps (__U,__A);
}

+22 −4
Original line number Diff line number Diff line
@@ -8,25 +8,43 @@

__m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
  // COMMON-LABEL: @test_mm_mask_cvtph_ps
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.128
  // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
  // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
  // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict") 
  // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
  return _mm_mask_cvtph_ps(__W, __U, __A);
}

__m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
  // COMMON-LABEL: @test_mm_maskz_cvtph_ps
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.128
  // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
  // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
  // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict") 
  // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
  return _mm_maskz_cvtph_ps(__U, __A);
}

__m256 test_mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) {
  // COMMON-LABEL: @test_mm256_mask_cvtph_ps
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.256
  // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // COMMONIR: bitcast <8 x i16> %{{.*}} to <8 x half>
  // UNCONSTRAINED: fpext <8 x half> %{{.*}} to <8 x float>
  // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict") 
  // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
  return _mm256_mask_cvtph_ps(__W, __U, __A);
}

__m256 test_mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
  // COMMON-LABEL: @test_mm256_maskz_cvtph_ps
  // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.256
  // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // COMMONIR: bitcast <8 x i16> %{{.*}} to <8 x half>
  // UNCONSTRAINED: fpext <8 x half> %{{.*}} to <8 x float>
  // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict") 
  // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
  return _mm256_maskz_cvtph_ps(__U, __A);
}

+18 −4
Original line number Diff line number Diff line
@@ -9692,25 +9692,39 @@ __m256 test_mm256_maskz_mov_ps(__mmask8 __U, __m256 __A) {

__m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
  // CHECK-LABEL: @test_mm_mask_cvtph_ps
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.128
  // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
  // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
  // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
  return _mm_mask_cvtph_ps(__W, __U, __A);
}

__m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
  // CHECK-LABEL: @test_mm_maskz_cvtph_ps
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.128
  // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
  // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
  // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
  // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
  return _mm_maskz_cvtph_ps(__U, __A);
}

__m256 test_mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) {
  // CHECK-LABEL: @test_mm256_mask_cvtph_ps
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.256
  // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // CHECK: bitcast <8 x i16> %{{.*}} to <8 x half>
  // CHECK: fpext <8 x half> %{{.*}} to <8 x float>
  // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
  return _mm256_mask_cvtph_ps(__W, __U, __A);
}

__m256 test_mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
  // CHECK-LABEL: @test_mm256_maskz_cvtph_ps
  // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.256
  // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
  // CHECK: bitcast <8 x i16> %{{.*}} to <8 x half>
  // CHECK: fpext <8 x half> %{{.*}} to <8 x float>
  // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
  return _mm256_maskz_cvtph_ps(__U, __A);
}

Loading