Commit cddf73ef authored by Tom Stellard's avatar Tom Stellard
Browse files

Merging r343373:

------------------------------------------------------------------------
r343373 | rksimon | 2018-09-29 06:25:22 -0700 (Sat, 29 Sep 2018) | 3 lines

[X86][SSE] Fixed issue with v2i64 variable shifts on 32-bit targets

The shift amount might have peeked through a extract_subvector, altering the number of vector elements in the 'Amt' variable - so we were incorrectly calculating the ratio when peeking through bitcasts, resulting in incorrectly detecting splats.
------------------------------------------------------------------------

llvm-svn: 344810
parent 80adf409
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -23315,12 +23315,11 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
  if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
      Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
    Amt = Amt.getOperand(0);
    unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
                     VT.getVectorNumElements();
    unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
    std::vector<SDValue> Vals(Ratio);
    for (unsigned i = 0; i != Ratio; ++i)
      Vals[i] = Amt.getOperand(i);
    for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
    for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
      for (unsigned j = 0; j != Ratio; ++j)
        if (Vals[j] != Amt.getOperand(i + j))
          return SDValue();
+18 −11
Original line number Diff line number Diff line
@@ -381,19 +381,26 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x
; X32-NEXT:    movl %esp, %ebp
; X32-NEXT:    andl $-16, %esp
; X32-NEXT:    subl $16, %esp
; X32-NEXT:    vmovdqa {{.*#+}} xmm3 = [33,0,63,0]
; X32-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648]
; X32-NEXT:    vpsrlq %xmm3, %xmm4, %xmm5
; X32-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,3,0,1]
; X32-NEXT:    vpsrlq %xmm6, %xmm4, %xmm4
; X32-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
; X32-NEXT:    vextractf128 $1, %ymm2, %xmm5
; X32-NEXT:    vpsrlq %xmm6, %xmm5, %xmm7
; X32-NEXT:    vpsrlq %xmm3, %xmm5, %xmm5
; X32-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7]
; X32-NEXT:    vpsrlq %xmm6, %xmm2, %xmm6
; X32-NEXT:    vpsrlq %xmm3, %xmm2, %xmm2
; X32-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
; X32-NEXT:    vpmovsxdq 16(%ebp), %xmm3
; X32-NEXT:    vpxor %xmm4, %xmm5, %xmm5
; X32-NEXT:    vpsubq %xmm4, %xmm5, %xmm5
; X32-NEXT:    vpxor %xmm4, %xmm2, %xmm2
; X32-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
; X32-NEXT:    vpmovsxdq 8(%ebp), %xmm4
; X32-NEXT:    vmovdqa {{.*#+}} xmm5 = [33,0,63,0]
; X32-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,2147483648,0,2147483648]
; X32-NEXT:    vpsrlq %xmm5, %xmm6, %xmm6
; X32-NEXT:    vextractf128 $1, %ymm2, %xmm7
; X32-NEXT:    vpsrlq %xmm5, %xmm7, %xmm7
; X32-NEXT:    vpxor %xmm6, %xmm7, %xmm7
; X32-NEXT:    vpsubq %xmm6, %xmm7, %xmm7
; X32-NEXT:    vpsrlq %xmm5, %xmm2, %xmm2
; X32-NEXT:    vpxor %xmm6, %xmm2, %xmm2
; X32-NEXT:    vpsubq %xmm6, %xmm2, %xmm2
; X32-NEXT:    vinsertf128 $1, %xmm7, %ymm2, %ymm2
; X32-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
; X32-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm3
; X32-NEXT:    vextractf128 $1, %ymm1, %xmm4
; X32-NEXT:    vextractf128 $1, %ymm0, %xmm5