Commit c0a26a35 authored by Nicolas Vasilache's avatar Nicolas Vasilache
Browse files

Revert "[mlir][Linalg] Add tensor support to Linalg EDSC Builders"

This reverts commit 89e19e8e.

This committed a stale version of the diff https://reviews.llvm.org/D72863 .. I apologize, reverting.
parent 89e19e8e
Loading
Loading
Loading
Loading
+18 −116
Original line number Diff line number Diff line
@@ -110,14 +110,11 @@ struct StructuredIndexed {

  operator Value() const /* implicit */ { return value; }
  ArrayRef<AffineExpr> getExprs() { return exprs; }
  Type getType() { return value.getType(); }

private:
  StructuredIndexed(Value v, ArrayRef<AffineExpr> indexings)
      : value(v), exprs(indexings.begin(), indexings.end()) {
    assert((v.getType().isa<MemRefType>() ||
            v.getType().isa<RankedTensorType>()) &&
           "MemRef or RankedTensor expected");
    assert(v.getType().isa<MemRefType>() && "MemRefType expected");
  }
  StructuredIndexed(ValueHandle v, ArrayRef<AffineExpr> indexings)
      : StructuredIndexed(v.getValue(), indexings) {}
@@ -128,21 +125,9 @@ private:

inline void defaultRegionBuilder(ArrayRef<BlockArgument> args) {}

/// Build a `linalg.generic` op with the specified inputs, outputs and region.
///
/// `otherValues` and `otherAttributes` may be passed and will be appended as
/// operands and attributes respectively.
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
Operation *makeGenericLinalgOp(
    ArrayRef<IterType> iteratorTypes, ArrayRef<StructuredIndexed> inputs,
    ArrayRef<StructuredIndexed> outputs, ArrayRef<Type> resultTensorTypes = {},
    ArrayRef<StructuredIndexed> outputs,
    function_ref<void(ArrayRef<BlockArgument>)> regionBuilder =
        defaultRegionBuilder,
    ArrayRef<Value> otherValues = {}, ArrayRef<Attribute> otherAttributes = {});
@@ -182,77 +167,32 @@ void macRegionBuilder(ArrayRef<BlockArgument> args);
/// with in-place semantics and parallelism.

/// Unary pointwise operation (with broadcast) entry point.
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
using UnaryPointwiseOpBuilder = function_ref<Value(ValueHandle)>;
Operation *linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
                            StructuredIndexed I, StructuredIndexed O,
                            ArrayRef<Type> resultTensorTypes = {});
                            StructuredIndexed I, StructuredIndexed O);

/// Build a linalg.pointwise with all `parallel` iterators and a region that
/// computes `O = tanh(I)`. The client is responsible for specifying the proper
/// indexings when creating the StructuredIndexed.
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
Operation *linalg_pointwise_tanh(StructuredIndexed I, StructuredIndexed O,
                                 ArrayRef<Type> resultTensorTypes = {});
Operation *linalg_pointwise_tanh(StructuredIndexed I, StructuredIndexed O);

/// Binary pointwise operation (with broadcast) entry point.
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
using BinaryPointwiseOpBuilder = function_ref<Value(ValueHandle, ValueHandle)>;
Operation *linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
                            StructuredIndexed I1, StructuredIndexed I2,
                            StructuredIndexed O,
                            ArrayRef<Type> resultTensorTypes = {});
                            StructuredIndexed O);

/// Build a linalg.pointwise with all `parallel` iterators and a region that
/// computes `O = I1 + I2`. The client is responsible for specifying the proper
/// indexings when creating the StructuredIndexed.
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
Operation *linalg_pointwise_add(StructuredIndexed I1, StructuredIndexed I2,
                                StructuredIndexed O,
                                ArrayRef<Type> resultTensorTypes = {});
                                StructuredIndexed O);

/// Build a linalg.pointwise with all `parallel` iterators and a region that
/// computes `O = max(I!, I2)`. The client is responsible for specifying the
/// proper indexings when creating the StructuredIndexed.
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
Operation *linalg_pointwise_max(StructuredIndexed I1, StructuredIndexed I2,
                                StructuredIndexed O,
                                ArrayRef<Type> resultTensorTypes = {});
                                StructuredIndexed O);

// TODO(ntv): Implement more useful pointwise operations on a per-need basis.

@@ -263,23 +203,11 @@ Operation *linalg_pointwise_max(StructuredIndexed I1, StructuredIndexed I2,
///    |
///    |  C(m, n) += A(m, k) * B(k, n)
/// ```
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
Operation *linalg_matmul(ValueHandle vA, ValueHandle vB, ValueHandle vC,
                         ArrayRef<Type> resultTensorTypes = {});
Operation *linalg_matmul(ValueHandle vA, ValueHandle vB, ValueHandle vC);

template <typename Container>
Operation *linalg_matmul(Container values,
                         ArrayRef<Type> resultTensorTypes = {}) {
template <typename Container> Operation *linalg_matmul(Container values) {
  assert(values.size() == 3 && "Expected exactly 3 values");
  assert(resultTensorTypes.size() <= 1 && "Expected at most 1 result tensor");
  return linalg_matmul(values[0], values[1], values[2], resultTensorTypes);
  return linalg_matmul(values[0], values[1], values[2]);
}

/// Build a linalg.generic, under the current ScopedContext, at the current
@@ -303,28 +231,16 @@ Operation *linalg_matmul(Container values,
///
/// For now `...` must be empty (i.e. only 2-D convolutions are supported).
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
//
// TODO(ntv) Extend convolution rank with some template magic.
Operation *linalg_conv_nhwc(ValueHandle vI, ValueHandle vW, ValueHandle vO,
                            ArrayRef<Type> resultTensorTypes = {},
                            ArrayRef<int> strides = {},
                            ArrayRef<int> dilations = {});

template <typename Container>
Operation *
linalg_conv_nhwc(Container values, ArrayRef<Type> resultTensorTypes = {},
                 ArrayRef<int> strides = {}, ArrayRef<int> dilations = {}) {
Operation *linalg_conv_nhwc(Container values, ArrayRef<int> strides = {},
                            ArrayRef<int> dilations = {}) {
  assert(values.size() == 3 && "Expected exactly 3 values");
  assert(resultTensorTypes.size() <= 1 && "Expected at most 1 result tensor");
  return linalg_conv_nhwc(values[0], values[1], values[2], resultTensorTypes,
                          strides, dilations);
  return linalg_conv_nhwc(values[0], values[1], values[2], strides, dilations);
}

/// Build a linalg.generic, under the current ScopedContext, at the current
@@ -333,7 +249,7 @@ linalg_conv_nhwc(Container values, ArrayRef<Type> resultTensorTypes = {},
///    (batch, dm, c, [h, w, ...], [kh, kw, ...]) =
///    |  (par, par, par, [par, par, ...], [red, red, ...])
///    |
///    | O(batch, [h, w, ...], c * depthMultiplier) +=
///    | O(batch, [h, w, ...], c * depth_multiplier) +=
///    |   I(batch,
///    |     [
///    |       stride[0] * h + dilations[0] * kh,
@@ -341,40 +257,26 @@ linalg_conv_nhwc(Container values, ArrayRef<Type> resultTensorTypes = {},
///          ],
///    |     c)
///    |   *
///    |   W([kh, kw, ...], c, depthMultiplier)
///    |   W([kh, kw, ...], c, depth_multiplier)
/// ```
/// If `dilations` or `strides` are left empty, the default value of `1` is used
/// along each relevant dimension.
///
/// For now `...` must be empty (i.e. only 2-D convolutions are supported).
///
/// This accepts both buffers and tensors as `inputs` but only buffers as
/// `outputs`. Output tensors can be specified with `resultTensorTypes`, in
/// which case, the canonical identity indexing_map is assumed.
//
// TODO(ntv) In the future we may want to relax this identity assumption (e.g.
// for automatic differentiation purposes). In that case we will want to make
// StructuredIndexed work with ValueHandle to encode type or value.
//
// TODO(ntv) Extend convolution rank with some template magic.
Operation *linalg_dilated_conv_nhwc(ValueHandle vI, ValueHandle vW,
                                    ValueHandle vO,
                                    ArrayRef<Type> resultTensorTypes = {},
                                    int depthMultiplier = 1,
                                    ValueHandle vO, int depth_multiplier = 1,
                                    ArrayRef<int> strides = {},
                                    ArrayRef<int> dilations = {});

template <typename Container>
Operation *linalg_dilated_conv_nhwc(Container values,
                                    ArrayRef<Type> resultTensorTypes = {},
                                    int depthMultiplier = 1,
Operation *linalg_dilated_conv_nhwc(Container values, int depth_multiplier,
                                    ArrayRef<int> strides = {},
                                    ArrayRef<int> dilations = {}) {
  assert(values.size() == 3 && "Expected exactly 3 values");
  assert(resultTensorTypes.size() <= 1 && "Expected at most 1 result tensor");
  return linalg_dilated_conv_nhwc(values[0], values[1], values[2],
                                  resultTensorTypes, depthMultiplier, strides,
                                  dilations);
                                  depth_multiplier, strides, dilations);
}

} // namespace ops
+41 −91
Original line number Diff line number Diff line
@@ -128,20 +128,16 @@ static void getMaxDimIndex(ArrayRef<StructuredIndexed> structuredIndices,

Operation *mlir::edsc::makeGenericLinalgOp(
    ArrayRef<IterType> iteratorTypes, ArrayRef<StructuredIndexed> inputs,
    ArrayRef<StructuredIndexed> outputBuffers, ArrayRef<Type> resultTensorTypes,
    ArrayRef<StructuredIndexed> outputs,
    function_ref<void(ArrayRef<BlockArgument>)> regionBuilder,
    ArrayRef<Value> otherValues, ArrayRef<Attribute> otherAttributes) {
  assert(
      llvm::all_of(llvm::make_range(outputBuffers.begin(), outputBuffers.end()),
                   [](Value v) { return v.getType().isa<MemRefType>(); }) &&
      "output operands must all be buffers.");
  auto &builder = edsc::ScopedContext::getBuilder();
  auto *ctx = builder.getContext();
  unsigned nInputs = inputs.size();
  unsigned nOutputs = outputBuffers.size() + resultTensorTypes.size();
  unsigned nOutputs = outputs.size();
  unsigned maxPos = 0;
  getMaxDimIndex(inputs, maxPos);
  getMaxDimIndex(outputBuffers, maxPos);
  getMaxDimIndex(outputs, maxPos);
  // maxPos is 0 indexed, need to turn this into a count (i.e. +1)
  unsigned nDims = maxPos + 1;

@@ -150,7 +146,7 @@ Operation *mlir::edsc::makeGenericLinalgOp(
  for (auto in : inputs)
    maps.push_back(
        AffineMap::get(/*dimCount=*/nDims, /*symbolCount=*/0, in.getExprs()));
  for (auto out : outputBuffers)
  for (auto out : outputs)
    maps.push_back(
        AffineMap::get(/*dimCount=*/nDims, /*symbolCount=*/0, out.getExprs()));

@@ -158,7 +154,7 @@ Operation *mlir::edsc::makeGenericLinalgOp(
  SmallVector<Value, 4> values;
  values.reserve(nViews);
  values.append(inputs.begin(), inputs.end());
  values.append(outputBuffers.begin(), outputBuffers.end());
  values.append(outputs.begin(), outputs.end());

  auto iteratorStrTypes = functional::map(toString, iteratorTypes);
  // clang-format off
@@ -166,7 +162,7 @@ Operation *mlir::edsc::makeGenericLinalgOp(
      edsc::ScopedContext::getBuilder()
          .create<linalg::GenericOp>(
              edsc::ScopedContext::getLocation(),
              resultTensorTypes,
              ArrayRef<Type>{}, // TODO(ntv): support tensors
              values,
              IntegerAttr::get(IntegerType::get(64, ctx), nInputs),
              IntegerAttr::get(IntegerType::get(64, ctx), nOutputs),
@@ -211,8 +207,7 @@ void mlir::edsc::ops::macRegionBuilder(ArrayRef<BlockArgument> args) {

Operation *mlir::edsc::ops::linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
                                             StructuredIndexed I,
                                             StructuredIndexed O,
                                             ArrayRef<Type> resultTensorTypes) {
                                             StructuredIndexed O) {
  SmallVector<edsc::IterType, 4> iterTypes(O.getExprs().size(),
                                           edsc::IterType::Parallel);
  auto fun = [&unaryOp](ArrayRef<BlockArgument> args) {
@@ -220,30 +215,22 @@ Operation *mlir::edsc::ops::linalg_pointwise(UnaryPointwiseOpBuilder unaryOp,
    ValueHandle a(args[0]);
    linalg_yield(unaryOp(a));
  };

  // Distinguish between tensor and buffer semantics.
  if (O.getType().isa<MemRefType>()) {
    assert(resultTensorTypes.empty());
    return makeGenericLinalgOp(iterTypes, {I}, {O}, {}, fun);
  }
  return makeGenericLinalgOp(iterTypes, {I, O}, {}, resultTensorTypes, fun);
  return makeGenericLinalgOp(iterTypes, {I}, {O}, fun);
}

Operation *
mlir::edsc::ops::linalg_pointwise_tanh(StructuredIndexed I, StructuredIndexed O,
                                       ArrayRef<Type> resultTensorTypes) {
Operation *mlir::edsc::ops::linalg_pointwise_tanh(StructuredIndexed I,
                                                  StructuredIndexed O) {
  ;
  using edsc::intrinsics::tanh;
  UnaryPointwiseOpBuilder unOp([](ValueHandle a) -> Value { return tanh(a); });
  return linalg_pointwise(unOp, I, O, resultTensorTypes);
  return linalg_pointwise(unOp, I, O);
}

/// Binary pointwise operation (with broadcast) entry point.
Operation *mlir::edsc::ops::linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
                                             StructuredIndexed I1,
                                             StructuredIndexed I2,
                                             StructuredIndexed O,
                                             ArrayRef<Type> resultTensorTypes) {
                                             StructuredIndexed O) {
  SmallVector<edsc::IterType, 4> iterTypes(O.getExprs().size(),
                                           edsc::IterType::Parallel);
  auto fun = [&binaryOp](ArrayRef<BlockArgument> args) {
@@ -251,62 +238,45 @@ Operation *mlir::edsc::ops::linalg_pointwise(BinaryPointwiseOpBuilder binaryOp,
    ValueHandle a(args[0]), b(args[1]);
    linalg_yield(binaryOp(a, b));
  };
  // Distinguish between tensor and buffer semantics.
  if (O.getType().isa<MemRefType>()) {
    assert(resultTensorTypes.empty());
    return makeGenericLinalgOp(iterTypes, {I1, I2}, {O}, {}, fun);
  }
  return makeGenericLinalgOp(iterTypes, {I1, I2, O}, {}, resultTensorTypes,
                             fun);
  return makeGenericLinalgOp(iterTypes, {I1, I2}, {O}, fun);
}

Operation *
mlir::edsc::ops::linalg_pointwise_add(StructuredIndexed I1,
                                      StructuredIndexed I2, StructuredIndexed O,
                                      ArrayRef<Type> resultTensorTypes) {
Operation *mlir::edsc::ops::linalg_pointwise_add(StructuredIndexed I1,
                                                 StructuredIndexed I2,
                                                 StructuredIndexed O) {
  using edsc::op::operator+;
  BinaryPointwiseOpBuilder binOp(
      [](ValueHandle a, ValueHandle b) -> Value { return a + b; });
  return linalg_pointwise(binOp, I1, I2, O, resultTensorTypes);
  return linalg_pointwise(binOp, I1, I2, O);
}

Operation *
mlir::edsc::ops::linalg_pointwise_max(StructuredIndexed I1,
                                      StructuredIndexed I2, StructuredIndexed O,
                                      ArrayRef<Type> resultTensorTypes) {
Operation *mlir::edsc::ops::linalg_pointwise_max(StructuredIndexed I1,
                                                 StructuredIndexed I2,
                                                 StructuredIndexed O) {
  BinaryPointwiseOpBuilder binOp([](ValueHandle a, ValueHandle b) -> Value {
    using edsc::intrinsics::select;
    using edsc::op::operator>;
    return select(a > b, a, b).getValue();
  });
  return linalg_pointwise(binOp, I1, I2, O, resultTensorTypes);
  return linalg_pointwise(binOp, I1, I2, O);
}

Operation *mlir::edsc::ops::linalg_matmul(ValueHandle vA, ValueHandle vB,
                                          ValueHandle vC,
                                          ArrayRef<Type> resultTensorTypes) {
                                          ValueHandle vC) {
  // clang-format off
  AffineExpr m, n, k;
  bindDims(ScopedContext::getContext(), m, n, k);
  StructuredIndexed A(vA), B(vB), C(vC);

  assert(!C.getType().isa<MemRefType>() || resultTensorTypes.empty());
  StructuredIndexed allIndexed[3]{A({m, k}), B({k, n}), C({m, n})};
  ArrayRef<StructuredIndexed> inputs =
      (C.getType().isa<MemRefType>())
          ? ArrayRef<StructuredIndexed>{allIndexed, allIndexed + 2}
          : ArrayRef<StructuredIndexed>{allIndexed, allIndexed + 3};
  ArrayRef<StructuredIndexed> outputs =
      (C.getType().isa<MemRefType>())
          ? ArrayRef<StructuredIndexed>{allIndexed + 2, allIndexed + 3}
          : ArrayRef<StructuredIndexed>{};
  return makeGenericLinalgOp(
      {IterType::Parallel, IterType::Parallel, IterType::Reduction}, inputs,
      outputs, resultTensorTypes, macRegionBuilder);
    {IterType::Parallel, IterType::Parallel, IterType::Reduction},
    {A({m, k}), B({k, n})},
    {C({m, n})},
    macRegionBuilder);
  // clang-format on
}

Operation *mlir::edsc::ops::linalg_conv_nhwc(ValueHandle vI, ValueHandle vW,
                                             ValueHandle vO,
                                             ArrayRef<Type> resultTensorTypes,
                                             ArrayRef<int> strides,
                                             ArrayRef<int> dilations) {
  MLIRContext *ctx = ScopedContext::getContext();
@@ -324,33 +294,23 @@ Operation *mlir::edsc::ops::linalg_conv_nhwc(ValueHandle vI, ValueHandle vW,
  bindDims(ctx, b, f, h, w, kh, kw, c);
  unsigned numDims = c.cast<AffineDimExpr>().getPosition() + 1;
  StructuredIndexed I(vI), W(vW), O(vO);

  assert(!O.getType().isa<MemRefType>() || resultTensorTypes.empty());
  // Roundtrip to flattened form to serve as canonicalization and ensure
  // consistent ordering of subexpressions.
  // clang-format off
  StructuredIndexed allIndexed[3] = {
  return makeGenericLinalgOp(
    {par, par, par, par, red, red, red}, {
      I({b,
         // Roundtrip to flattened form to serve as canonicalization and ensure
         // consistent ordering of subexpressions.
         simplifyAffineExpr(s[0] * h + d[0] * kh, numDims, 0),
         simplifyAffineExpr(s[1] * w + d[1] * kw, numDims, 0),
         c}),
      W({kh, kw, c, f}),
      O({b, h, w, f})};
      W({kh, kw, c, f})}, {
      O({b, h, w, f})},
    macRegionBuilder);
  // clang-format on
  auto inputs = (O.getType().isa<MemRefType>())
                    ? ArrayRef<StructuredIndexed>{allIndexed, allIndexed + 2}
                    : ArrayRef<StructuredIndexed>{allIndexed, allIndexed + 3};
  ArrayRef<StructuredIndexed> outputs =
      (O.getType().isa<MemRefType>())
          ? ArrayRef<StructuredIndexed>{allIndexed + 2, allIndexed + 3}
          : ArrayRef<StructuredIndexed>{};
  return makeGenericLinalgOp({par, par, par, par, red, red, red}, inputs,
                             outputs, resultTensorTypes, macRegionBuilder);
}

Operation *mlir::edsc::ops::linalg_dilated_conv_nhwc(
    ValueHandle vI, ValueHandle vW, ValueHandle vO,
    ArrayRef<Type> resultTensorTypes, int depthMultiplier,
    ValueHandle vI, ValueHandle vW, ValueHandle vO, int depth_multiplier,
    ArrayRef<int> strides, ArrayRef<int> dilations) {
  MLIRContext *ctx = ScopedContext::getContext();
  // TODO(ntv) some template magic to make everything rank-polymorphic.
@@ -368,26 +328,16 @@ Operation *mlir::edsc::ops::linalg_dilated_conv_nhwc(
  bindDims(ctx, b, dm, c, h, w, kh, kw);
  unsigned numDims = kw.cast<AffineDimExpr>().getPosition() + 1;
  StructuredIndexed I(vI), W(vW), O(vO);
  // Roundtrip to flattened form to serve as canonicalization and ensure
  // consistent ordering of subexpressions.
  // clang-format off
  StructuredIndexed allIndexed[3] = {
  return makeGenericLinalgOp(
    {par, par, par, par, par, red, red}, {
      I({b,
         // Roundtrip to flattened form to serve as canonicalization and ensure
         // consistent ordering of subexpressions.
         simplifyAffineExpr(s[0] * h + d[0] * kh, numDims, 0),
         simplifyAffineExpr(s[1] * w + d[1] * kw, numDims, 0),
         c}),
      W({kh, kw, c, dm}),
      O({b, h, w, simplifyAffineExpr(c * depthMultiplier + dm, numDims, 0)})};
      W({kh, kw, c, dm})}, {
      O({b, h, w, simplifyAffineExpr(c * depth_multiplier + dm, numDims, 0)})},
    macRegionBuilder);
  // clang-format on
  auto inputs = (O.getType().isa<MemRefType>())
                    ? ArrayRef<StructuredIndexed>{allIndexed, allIndexed + 2}
                    : ArrayRef<StructuredIndexed>{allIndexed, allIndexed + 3};
  ArrayRef<StructuredIndexed> outputs =
      (O.getType().isa<MemRefType>())
          ? ArrayRef<StructuredIndexed>{allIndexed + 2, allIndexed + 3}
          : ArrayRef<StructuredIndexed>{};
  return makeGenericLinalgOp({par, par, par, par, par, red, red}, inputs,
                             outputs, resultTensorTypes, macRegionBuilder);
}
+5 −39

File changed.

Preview size limit exceeded, changes collapsed.