Commit 50aeeed8 authored by Lei Zhang's avatar Lei Zhang
Browse files

[mlir][spirv] Use spv.entry_point_abi in GPU to SPIR-V conversions

We have spv.entry_point_abi for specifying the local workgroup size.
It should be decorated onto input gpu.func ops to drive the SPIR-V
CodeGen to generate the proper SPIR-V module execution mode. Compared
to using command-line options for specifying the configuration, using
attributes also has the benefits that 1) we are now able to use
different local workgroup for different entry points and 2) the
tests contains the configuration directly.

Differential Revision: https://reviews.llvm.org/D74012
parent 9559834a
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -17,13 +17,13 @@

namespace mlir {
class SPIRVTypeConverter;

/// Appends to a pattern list additional patterns for translating GPU Ops to
/// SPIR-V ops. Needs the workgroup size as input since SPIR-V/Vulkan requires
/// the workgroup size to be statically specified.
/// SPIR-V ops. For a gpu.func to be converted, it should have a
/// spv.entry_point_abi attribute.
void populateGPUToSPIRVPatterns(MLIRContext *context,
                                SPIRVTypeConverter &typeConverter,
                                OwningRewritePatternList &patterns,
                                ArrayRef<int64_t> workGroupSize);
                                OwningRewritePatternList &patterns);
} // namespace mlir

#endif // MLIR_CONVERSION_GPUTOSPIRV_CONVERTGPUTOSPIRV_H
+3 −4
Original line number Diff line number Diff line
@@ -22,10 +22,9 @@ namespace mlir {
class ModuleOp;
template <typename T> class OpPassBase;

/// Pass to convert GPU Ops to SPIR-V ops.  Needs the workgroup size as input
/// since SPIR-V/Vulkan requires the workgroup size to be statically specified.
std::unique_ptr<OpPassBase<ModuleOp>>
createConvertGPUToSPIRVPass(ArrayRef<int64_t> workGroupSize);
/// Pass to convert GPU Ops to SPIR-V ops. For a gpu.func to be converted, it
/// should have a spv.entry_point_abi attribute.
std::unique_ptr<OpPassBase<ModuleOp>> createConvertGPUToSPIRVPass();

} // namespace mlir
#endif // MLIR_CONVERSION_GPUTOSPIRV_CONVERTGPUTOSPIRVPASS_H
+9 −5
Original line number Diff line number Diff line
@@ -112,6 +112,15 @@ StringRef getEntryPointABIAttrName();
EntryPointABIAttr getEntryPointABIAttr(ArrayRef<int32_t> localSize,
                                       MLIRContext *context);

/// Queries the entry point ABI on the nearest function-like op containing the
/// given `op`. Returns null attribute if not found.
EntryPointABIAttr lookupEntryPointABI(Operation *op);

/// Queries the local workgroup size from entry point ABI on the nearest
/// function-like op containing the given `op`. Returns null attribute if not
/// found.
DenseIntElementsAttr lookupLocalWorkGroupSize(Operation *op);

/// Returns a default resource limits attribute that uses numbers from
/// "Table 46. Required Limits" of the Vulkan spec.
ResourceLimitsAttr getDefaultResourceLimits(MLIRContext *context);
@@ -128,11 +137,6 @@ TargetEnvAttr getDefaultTargetEnv(MLIRContext *context);
/// extensions) if not provided.
TargetEnvAttr lookupTargetEnvOrDefault(Operation *op);

/// Queries the local workgroup size from entry point ABI on the nearest
/// function-like op containing the given `op`. Returns null attribute if not
/// found.
DenseIntElementsAttr lookupLocalWorkGroupSize(Operation *op);

} // namespace spirv
} // namespace mlir

+15 −25
Original line number Diff line number Diff line
@@ -82,16 +82,9 @@ public:
};

/// Pattern to convert a kernel function in GPU dialect within a spv.module.
class KernelFnConversion final : public SPIRVOpLowering<gpu::GPUFuncOp> {
class GPUFuncOpConversion final : public SPIRVOpLowering<gpu::GPUFuncOp> {
public:
  KernelFnConversion(MLIRContext *context, SPIRVTypeConverter &converter,
                     ArrayRef<int64_t> workGroupSize,
                     PatternBenefit benefit = 1)
      : SPIRVOpLowering<gpu::GPUFuncOp>(context, converter, benefit) {
    auto config = workGroupSize.take_front(3);
    workGroupSizeAsInt32.assign(config.begin(), config.end());
    workGroupSizeAsInt32.resize(3, 1);
  }
  using SPIRVOpLowering<gpu::GPUFuncOp>::SPIRVOpLowering;

  PatternMatchResult
  matchAndRewrite(gpu::GPUFuncOp funcOp, ArrayRef<Value> operands,
@@ -352,13 +345,11 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, SPIRVTypeConverter &typeConverter,
  return newFuncOp;
}

PatternMatchResult
KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp,
                                    ArrayRef<Value> operands,
PatternMatchResult GPUFuncOpConversion::matchAndRewrite(
    gpu::GPUFuncOp funcOp, ArrayRef<Value> operands,
    ConversionPatternRewriter &rewriter) const {
  if (!gpu::GPUDialect::isKernel(funcOp)) {
  if (!gpu::GPUDialect::isKernel(funcOp))
    return matchFailure();
  }

  SmallVector<spirv::InterfaceVarABIAttr, 4> argABI;
  for (auto argNum : llvm::seq<unsigned>(0, funcOp.getNumArguments())) {
@@ -366,14 +357,15 @@ KernelFnConversion::matchAndRewrite(gpu::GPUFuncOp funcOp,
        0, argNum, spirv::StorageClass::StorageBuffer, rewriter.getContext()));
  }

  auto context = rewriter.getContext();
  auto entryPointAttr =
      spirv::getEntryPointABIAttr(workGroupSizeAsInt32, context);
  auto entryPointAttr = spirv::lookupEntryPointABI(funcOp);
  if (!entryPointAttr) {
    funcOp.emitRemark("match failure: missing 'spv.entry_point_abi' attribute");
    return matchFailure();
  }
  FuncOp newFuncOp = lowerAsEntryFunction(funcOp, typeConverter, rewriter,
                                          entryPointAttr, argABI);
  if (!newFuncOp) {
  if (!newFuncOp)
    return matchFailure();
  }
  newFuncOp.removeAttr(Identifier::get(gpu::GPUDialect::getKernelFuncAttrName(),
                                       rewriter.getContext()));
  return matchSuccess();
@@ -429,13 +421,11 @@ namespace {

void mlir::populateGPUToSPIRVPatterns(MLIRContext *context,
                                      SPIRVTypeConverter &typeConverter,
                                      OwningRewritePatternList &patterns,
                                      ArrayRef<int64_t> workGroupSize) {
                                      OwningRewritePatternList &patterns) {
  populateWithGenerated(context, &patterns);
  patterns.insert<KernelFnConversion>(context, typeConverter, workGroupSize);
  patterns.insert<
      ForOpConversion, GPUModuleConversion, GPUReturnOpConversion,
      IfOpConversion,
      ForOpConversion, GPUFuncOpConversion, GPUModuleConversion,
      GPUReturnOpConversion, IfOpConversion,
      LaunchConfigConversion<gpu::BlockIdOp, spirv::BuiltIn::WorkgroupId>,
      LaunchConfigConversion<gpu::GridDimOp, spirv::BuiltIn::NumWorkgroups>,
      LaunchConfigConversion<gpu::ThreadIdOp,
+7 −24
Original line number Diff line number Diff line
@@ -24,33 +24,17 @@
using namespace mlir;

namespace {
/// Pass to lower GPU Dialect to SPIR-V. The pass only converts those functions
/// that have the "gpu.kernel" attribute, i.e. those functions that are
/// referenced in gpu::LaunchKernelOp operations. For each such function
/// Pass to lower GPU Dialect to SPIR-V. The pass only converts the gpu.func ops
/// inside gpu.module ops. i.e., the function that are referenced in
/// gpu.launch_func ops. For each such function
///
/// 1) Create a spirv::ModuleOp, and clone the function into spirv::ModuleOp
/// (the original function is still needed by the gpu::LaunchKernelOp, so cannot
/// replace it).
///
/// 2) Lower the body of the spirv::ModuleOp.
class GPUToSPIRVPass : public ModulePass<GPUToSPIRVPass> {
public:
  GPUToSPIRVPass() = default;
  GPUToSPIRVPass(const GPUToSPIRVPass &) {}
  GPUToSPIRVPass(ArrayRef<int64_t> workGroupSize) {
    this->workGroupSize = workGroupSize;
  }

struct GPUToSPIRVPass : public ModulePass<GPUToSPIRVPass> {
  void runOnModule() override;

private:
  /// Command line option to specify the workgroup size.
  ListOption<int64_t> workGroupSize{
      *this, "workgroup-size",
      llvm::cl::desc(
          "Workgroup Sizes in the SPIR-V module for x, followed by y, followed "
          "by z dimension of the dispatch (others will be ignored)"),
      llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated};
};
} // namespace

@@ -70,7 +54,7 @@ void GPUToSPIRVPass::runOnModule() {

  SPIRVTypeConverter typeConverter;
  OwningRewritePatternList patterns;
  populateGPUToSPIRVPatterns(context, typeConverter, patterns, workGroupSize);
  populateGPUToSPIRVPatterns(context, typeConverter, patterns);
  populateStandardToSPIRVPatterns(context, typeConverter, patterns);

  std::unique_ptr<ConversionTarget> target = spirv::SPIRVConversionTarget::get(
@@ -84,9 +68,8 @@ void GPUToSPIRVPass::runOnModule() {
  }
}

std::unique_ptr<OpPassBase<ModuleOp>>
mlir::createConvertGPUToSPIRVPass(ArrayRef<int64_t> workGroupSize) {
  return std::make_unique<GPUToSPIRVPass>(workGroupSize);
std::unique_ptr<OpPassBase<ModuleOp>> mlir::createConvertGPUToSPIRVPass() {
  return std::make_unique<GPUToSPIRVPass>();
}

static PassRegistration<GPUToSPIRVPass>
Loading