HexagonTargetTransformInfo.cpp 12.5 KB
Newer Older
1
//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2
//
3
4
5
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
7
8
9
10
11
12
13
14
15
//
/// \file
/// This file implements a TargetTransformInfo analysis pass specific to the
/// Hexagon target machine. It uses the target's detailed information to provide
/// more precise answers to certain TTI queries, while letting the target
/// independent and default TTI implementations handle the rest.
///
//===----------------------------------------------------------------------===//

#include "HexagonTargetTransformInfo.h"
16
17
#include "HexagonSubtarget.h"
#include "llvm/Analysis/TargetTransformInfo.h"
18
#include "llvm/CodeGen/ValueTypes.h"
19
#include "llvm/IR/InstrTypes.h"
20
#include "llvm/IR/Instructions.h"
21
22
23
#include "llvm/IR/User.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
24
#include "llvm/Transforms/Utils/UnrollLoop.h"
25
26
27
28
29

using namespace llvm;

#define DEBUG_TYPE "hexagontti"

30
static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
31
32
  cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));

33
34
35
36
static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
  cl::init(true), cl::Hidden,
  cl::desc("Control lookup table emission on Hexagon target"));

37
38
39
40
// Constant "cost factor" to make floating point operations more expensive
// in terms of vectorization cost. This isn't the best way, but it should
// do. Ultimately, the cost should use cycles.
static const unsigned FloatFactor = 4;
41
42
43
44
45
46
47

bool HexagonTTIImpl::useHVX() const {
  return ST.useHVXOps() && HexagonAutoHVX;
}

bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
  assert(VecTy->isVectorTy());
48
  if (isa<ScalableVectorType>(VecTy))
49
    return false;
50
51
52
53
54
55
56
57
  // Avoid types like <2 x i32*>.
  if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
    return false;
  EVT VecVT = EVT::getEVT(VecTy);
  if (!VecVT.isSimple() || VecVT.getSizeInBits() <= 64)
    return false;
  if (ST.isHVXVectorType(VecVT.getSimpleVT()))
    return true;
58
  auto Action = TLI.getPreferredVectorAction(VecVT.getSimpleVT());
59
60
61
  return Action == TargetLoweringBase::TypeWidenVector;
}

62
unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
63
  if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
64
    return VTy->getNumElements();
65
66
67
68
69
  assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
         "Expecting scalar type");
  return 1;
}

70
71
TargetTransformInfo::PopcntSupportKind
HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
72
  // Return fast hardware support as every input < 64 bits will be promoted
73
74
75
76
77
  // to 64 bits.
  return TargetTransformInfo::PSK_FastHardware;
}

// The Hexagon target can unroll loops with run-time trip counts.
78
void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
79
80
                                             TTI::UnrollingPreferences &UP) {
  UP.Runtime = UP.Partial = true;
81
  // Only try to peel innermost loops with small runtime trip counts.
82
  if (L && L->empty() && canPeel(L) &&
83
84
85
86
87
      SE.getSmallConstantTripCount(L) == 0 &&
      SE.getSmallConstantMaxTripCount(L) > 0 &&
      SE.getSmallConstantMaxTripCount(L) <= 5) {
    UP.PeelCount = 2;
  }
88
89
}

90
91
92
93
bool HexagonTTIImpl::shouldFavorPostInc() const {
  return true;
}

94
95
/// --- Vector TTI begin ---

96
97
unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
  if (Vector)
98
    return useHVX() ? 32 : 0;
99
100
101
102
  return 32;
}

unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
103
  return useHVX() ? 2 : 0;
104
105
106
107
108
109
110
}

unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
  return Vector ? getMinVectorRegisterBitWidth() : 32;
}

unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
111
  return useHVX() ? ST.getVectorLength()*8 : 0;
112
113
}

114
unsigned HexagonTTIImpl::getMinimumVF(unsigned ElemWidth) const {
115
116
117
  return (8 * ST.getVectorLength()) / ElemWidth;
}

118
unsigned HexagonTTIImpl::getScalarizationOverhead(VectorType *Ty,
119
120
121
                                                  const APInt &DemandedElts,
                                                  bool Insert, bool Extract) {
  return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
122
123
124
125
126
127
128
129
}

unsigned HexagonTTIImpl::getOperandsScalarizationOverhead(
      ArrayRef<const Value*> Args, unsigned VF) {
  return BaseT::getOperandsScalarizationOverhead(Args, VF);
}

unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
130
131
      ArrayRef<Type*> Tys, TTI::TargetCostKind CostKind) {
  return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
132
133
}

134
135
136
137
138
unsigned
HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                      TTI::TargetCostKind CostKind) {
  if (ICA.getID() == Intrinsic::bswap) {
    std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
139
140
    return LT.first + 2;
  }
141
  return BaseT::getIntrinsicInstrCost(ICA, CostKind);
142
143
144
145
146
}

unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
      ScalarEvolution *SE, const SCEV *S) {
  return 0;
147
148
}

149
unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
150
151
                                         MaybeAlign Alignment,
                                         unsigned AddressSpace,
152
                                         TTI::TargetCostKind CostKind,
153
                                         const Instruction *I) {
154
  assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
Sam Parker's avatar
Sam Parker committed
155
156
157
158
  // TODO: Handle other cost kinds.
  if (CostKind != TTI::TCK_RecipThroughput)
    return 1;

159
  if (Opcode == Instruction::Store)
160
161
    return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
                                  CostKind, I);
162
163

  if (Src->isVectorTy()) {
164
    VectorType *VecTy = cast<VectorType>(Src);
165
    unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
166
167
    if (useHVX() && isTypeForHVX(VecTy)) {
      unsigned RegWidth = getRegisterBitWidth(true);
168
      assert(RegWidth && "Non-zero vector register width expected");
169
170
171
      // Cost of HVX loads.
      if (VecWidth % RegWidth == 0)
        return VecWidth / RegWidth;
172
173
174
175
176
177
      // Cost of constructing HVX vector from scalar loads
      const Align RegAlign(RegWidth / 8);
      if (!Alignment || *Alignment > RegAlign)
        Alignment = RegAlign;
      assert(Alignment);
      unsigned AlignWidth = 8 * Alignment->value();
178
      unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
179
      return 3 * NumLoads;
180
    }
181
182
183

    // Non-HVX vectors.
    // Add extra cost for floating point types.
184
185
186
    unsigned Cost =
        VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;

187
    // At this point unspecified alignment is considered as Align(1).
188
189
    const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
    unsigned AlignWidth = 8 * BoundAlignment.value();
190
    unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
191
    if (Alignment == Align(4) || Alignment == Align(8))
192
193
      return Cost * NumLoads;
    // Loads of less than 32 bits will need extra inserts to compose a vector.
194
195
    assert(BoundAlignment <= Align(8));
    unsigned LogA = Log2(BoundAlignment);
196
    return (3 - LogA) * Cost * NumLoads;
197
  }
198

199
200
  return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
                                CostKind, I);
201
}
202

203
unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode,
204
205
206
207
      Type *Src, unsigned Alignment, unsigned AddressSpace,
      TTI::TargetCostKind CostKind) {
  return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
                                      CostKind);
208
209
210
211
212
213
214
}

unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
      int Index, Type *SubTp) {
  return 1;
}

215
216
217
218
unsigned HexagonTTIImpl::getGatherScatterOpCost(
    unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask,
    unsigned Alignment, TTI::TargetCostKind CostKind,
    const Instruction *I) {
219
  return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
220
                                       Alignment, CostKind, I);
221
222
223
224
}

unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode,
      Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
225
226
      unsigned Alignment, unsigned AddressSpace,
      TTI::TargetCostKind CostKind, bool UseMaskForCond,
227
228
      bool UseMaskForGaps) {
  if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
229
    return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
230
                                             Alignment, AddressSpace,
231
                                             CostKind,
232
                                             UseMaskForCond, UseMaskForGaps);
233
  return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
234
                         CostKind);
235
236
237
}

unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
238
      Type *CondTy, TTI::TargetCostKind CostKind, const Instruction *I) {
239
240
241
  if (ValTy->isVectorTy()) {
    std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
    if (Opcode == Instruction::FCmp)
242
      return LT.first + FloatFactor * getTypeNumElements(ValTy);
243
  }
244
  return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
245
246
}

247
unsigned HexagonTTIImpl::getArithmeticInstrCost(
248
249
    unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
    TTI::OperandValueKind Opd1Info,
250
251
252
    TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
    TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
    const Instruction *CxtI) {
253
254
255
256
257
  if (Ty->isVectorTy()) {
    std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
    if (LT.second.isFloatingPoint())
      return LT.first + FloatFactor * getTypeNumElements(Ty);
  }
258
  return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
259
                                       Opd1PropInfo, Opd2PropInfo, Args, CxtI);
260
261
}

262
unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
263
      Type *SrcTy, TTI::TargetCostKind CostKind, const Instruction *I) {
264
265
266
267
268
269
  if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
    unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
    unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;

    std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
    std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
270
271
272
273
274
    unsigned Cost = std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
    // TODO: Allow non-throughput costs that aren't binary.
    if (CostKind != TTI::TCK_RecipThroughput)
      return Cost == 0 ? 0 : 1;
    return Cost;
275
  }
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
  return 1;
}

unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
      unsigned Index) {
  Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
                                   : Val;
  if (Opcode == Instruction::InsertElement) {
    // Need two rotations for non-zero index.
    unsigned Cost = (Index != 0) ? 2 : 0;
    if (ElemTy->isIntegerTy(32))
      return Cost;
    // If it's not a 32-bit value, there will need to be an extract.
    return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
  }

  if (Opcode == Instruction::ExtractElement)
    return 2;

  return 1;
}

/// --- Vector TTI end ---

300
unsigned HexagonTTIImpl::getPrefetchDistance() const {
301
  return ST.getL1PrefetchDistance();
302
303
304
}

unsigned HexagonTTIImpl::getCacheLineSize() const {
305
  return ST.getL1CacheLineSize();
306
}
307

308
309
310
311
int
HexagonTTIImpl::getUserCost(const User *U,
                            ArrayRef<const Value *> Operands,
                            TTI::TargetCostKind CostKind) {
312
  auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
313
314
    if (!CI->isIntegerCast())
      return false;
315
316
317
318
319
320
321
322
    // Only extensions from an integer type shorter than 32-bit to i32
    // can be folded into the load.
    const DataLayout &DL = getDataLayout();
    unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
    unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
    if (DBW != 32 || SBW >= DBW)
      return false;

323
324
325
326
    const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
    // Technically, this code could allow multiple uses of the load, and
    // check if all the uses are the same extension operation, but this
    // should be sufficient for most cases.
327
    return LI && LI->hasOneUse();
328
329
330
331
332
  };

  if (const CastInst *CI = dyn_cast<const CastInst>(U))
    if (isCastFoldedIntoLoad(CI))
      return TargetTransformInfo::TCC_Free;
333
  return BaseT::getUserCost(U, Operands, CostKind);
334
}
335
336

bool HexagonTTIImpl::shouldBuildLookupTables() const {
337
  return EmitLookupTables;
338
}