Commit bfbfd1ca authored by Simon Pilgrim's avatar Simon Pilgrim
Browse files

[X86] combineLoad - try to reuse existing constant pool entries for smaller vector constant data

If we already have a YMM/ZMM constant that a smaller XMM/YMM has matching lower bits, then ensure we reuse the same constant pool entry.

Extends the similar combines we already have to reuse VBROADCAST_LOAD/SUBV_BROADCAST_LOAD constant loads.

This is a mainly a canonicalization, but should make it easier for us to merge constant loads in a future commit (related to both #70947 and better X86FixupVectorConstantsPass usage for #71078).
parent a4051932
Loading
Loading
Loading
Loading
+11 −6
Original line number Diff line number Diff line
@@ -49796,8 +49796,8 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
    }
  }
  // If we also broadcast this to a wider type, then just extract the lowest
  // subvector.
  // If we also load/broadcast this to a wider type, then just extract the
  // lowest subvector.
  if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
      (RegVT.is128BitVector() || RegVT.is256BitVector())) {
    SDValue Ptr = Ld->getBasePtr();
@@ -49805,8 +49805,9 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
    for (SDNode *User : Chain->uses()) {
      if (User != N &&
          (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
           User->getOpcode() == X86ISD::VBROADCAST_LOAD) &&
          cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
           User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
           ISD::isNormalLoad(User)) &&
          cast<MemSDNode>(User)->getChain() == Chain &&
          !User->hasAnyUseOfValue(1) &&
          User->getValueSizeInBits(0).getFixedValue() >
              RegVT.getFixedSizeInBits()) {
@@ -49819,9 +49820,13 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
          Extract = DAG.getBitcast(RegVT, Extract);
          return DCI.CombineTo(N, Extract, SDValue(User, 1));
        }
        if (User->getOpcode() == X86ISD::VBROADCAST_LOAD &&
        if ((User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
             (ISD::isNormalLoad(User) &&
              cast<LoadSDNode>(User)->getBasePtr() != Ptr)) &&
            getTargetConstantFromBasePtr(Ptr)) {
          // See if we are loading a constant that has also been broadcast.
          // See if we are loading a constant that has also been broadcast or
          // we are loading a constant that also matches in the lower
          // bits of a longer constant (but from a different constant pool ptr).
          APInt Undefs, UserUndefs;
          SmallVector<APInt> Bits, UserBits;
          if (getTargetConstantBitsFromNode(SDValue(N, 0), 8, Undefs, Bits) &&
+2 −2
Original line number Diff line number Diff line
@@ -1400,7 +1400,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-64-LABEL: f4xi64_i128:
; AVX-64:       # %bb.0:
; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1]
; AVX-64-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
; AVX-64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1535,7 +1535,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm2
; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,3]
; AVX-64-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1]
; AVX-64-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+3 −3
Original line number Diff line number Diff line
@@ -2157,7 +2157,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7,8,9,10],ymm11[11],ymm10[12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm8, %ymm11, %ymm8
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
@@ -2329,7 +2329,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,5,1,u,4,u,u,u>
; AVX2-FAST-NEXT:    vpermd %ymm11, %ymm12, %ymm11
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm10, %ymm12, %ymm10
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm6[2],ymm4[3,4,5],ymm6[6],ymm4[7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
@@ -2496,7 +2496,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5,6,7,8,9,10],ymm12[11],ymm11[12,13,14,15]
; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm8, %ymm12, %ymm8
; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm13
+1 −1
Original line number Diff line number Diff line
@@ -1685,7 +1685,7 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX2-ONLY-NEXT:    # ymm10 = mem[0,1,0,1]
; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm7, %ymm8, %ymm7
; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0>
; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm6, %ymm7, %ymm6
; AVX2-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm7
; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[1,6,11]
+7 −8
Original line number Diff line number Diff line
@@ -1238,13 +1238,12 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm6[4,5,6,7]
; AVX512F-NEXT:    vmovdqa (%rdx), %ymm6
; AVX512F-NEXT:    vmovdqa 32(%rdx), %ymm7
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <5,5,u,6,6,u,7,7>
; AVX512F-NEXT:    vpermd %ymm7, %ymm9, %ymm9
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
; AVX512F-NEXT:    vpandn %ymm9, %ymm10, %ymm9
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512F-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512F-NEXT:    vpshufb %ymm9, %ymm7, %ymm10
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = <5,5,u,6,6,u,7,7>
; AVX512F-NEXT:    vpermd %ymm7, %ymm11, %ymm7
; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
; AVX512F-NEXT:    vmovdqa (%rdi), %ymm3
; AVX512F-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
@@ -1259,7 +1258,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
; AVX512F-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[4,5,6,7]
; AVX512F-NEXT:    vpshufb %ymm10, %ymm6, %ymm1
; AVX512F-NEXT:    vpshufb %ymm9, %ymm6, %ymm1
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,0,0,u,1,1,u,2>
; AVX512F-NEXT:    vpermd %ymm6, %ymm2, %ymm2
; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
Loading