Commit d891d738 authored by Rahul Joshi's avatar Rahul Joshi
Browse files

[MLIR][NFC] Adopt variadic isa<>

Differential Revision: https://reviews.llvm.org/D82489
parent 47ac4533
......@@ -859,8 +859,7 @@ void mlir::getDependenceComponents(
// Collect all load and store ops in loop nest rooted at 'forOp'.
SmallVector<Operation *, 8> loadAndStoreOpInsts;
forOp.getOperation()->walk([&](Operation *opInst) {
if (isa<AffineReadOpInterface>(opInst) ||
isa<AffineWriteOpInterface>(opInst))
if (isa<AffineReadOpInterface, AffineWriteOpInterface>(opInst))
loadAndStoreOpInsts.push_back(opInst);
});
......
......@@ -291,8 +291,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
// No vectorization across unknown regions.
auto regions = matcher::Op([](Operation &op) -> bool {
return op.getNumRegions() != 0 &&
!(isa<AffineIfOp>(op) || isa<AffineForOp>(op));
return op.getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(op);
});
SmallVector<NestedMatch, 8> regionsMatched;
regions.match(forOp, &regionsMatched);
......
......@@ -145,7 +145,7 @@ NestedPattern For(FilterFunctionType filter, ArrayRef<NestedPattern> nested) {
}
bool isLoadOrStore(Operation &op) {
return isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op);
return isa<AffineLoadOp, AffineStoreOp>(op);
}
} // end namespace matcher
......
......@@ -85,8 +85,7 @@ static void getBackwardSliceImpl(Operation *op,
if (!op)
return;
assert((op->getNumRegions() == 0 || isa<AffineForOp>(op) ||
isa<scf::ForOp>(op)) &&
assert((op->getNumRegions() == 0 || isa<AffineForOp, scf::ForOp>(op)) &&
"unexpected generic op with regions");
// Evaluate whether we should keep this def.
......
......@@ -196,7 +196,7 @@ LogicalResult MemRefRegion::unionBoundingBox(const MemRefRegion &other) {
LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
ComputationSliceState *sliceState,
bool addMemRefDimBounds) {
assert((isa<AffineReadOpInterface>(op) || isa<AffineWriteOpInterface>(op)) &&
assert((isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) &&
"affine read/write op expected");
MemRefAccess access(op);
......
......@@ -141,9 +141,8 @@ bool mlir::isValidDim(Value value) {
// This value has to be a block argument for an op that has the
// `AffineScope` trait or for an affine.for or affine.parallel.
auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
return parentOp &&
(parentOp->hasTrait<OpTrait::AffineScope>() ||
isa<AffineForOp>(parentOp) || isa<AffineParallelOp>(parentOp));
return parentOp && (parentOp->hasTrait<OpTrait::AffineScope>() ||
isa<AffineForOp, AffineParallelOp>(parentOp));
}
// Value can be used as a dimension id iff it meets one of the following
......@@ -165,7 +164,7 @@ bool mlir::isValidDim(Value value, Region *region) {
// This value has to be a block argument for an affine.for or an
// affine.parallel.
auto *parentOp = value.cast<BlockArgument>().getOwner()->getParentOp();
return isa<AffineForOp>(parentOp) || isa<AffineParallelOp>(parentOp);
return isa<AffineForOp, AffineParallelOp>(parentOp);
}
// Affine apply operation is ok if all of its operands are ok.
......
......@@ -120,8 +120,7 @@ AffineDataCopyGeneration::runOnBlock(Block *block,
// Get to the first load, store, or for op (that is not a copy nest itself).
auto curBegin =
std::find_if(block->begin(), block->end(), [&](Operation &op) {
return (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
isa<AffineForOp>(op)) &&
return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) &&
copyNests.count(&op) == 0;
});
......@@ -171,8 +170,7 @@ AffineDataCopyGeneration::runOnBlock(Block *block,
}
// Get to the next load or store op after 'forOp'.
curBegin = std::find_if(std::next(it), block->end(), [&](Operation &op) {
return (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op) ||
isa<AffineForOp>(op)) &&
return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) &&
copyNests.count(&op) == 0;
});
it = curBegin;
......
......@@ -63,10 +63,7 @@ areAllOpsInTheBlockListInvariant(Region &blockList, Value indVar,
static bool isMemRefDereferencingOp(Operation &op) {
// TODO(asabne): Support DMA Ops.
if (isa<AffineLoadOp>(op) || isa<AffineStoreOp>(op)) {
return true;
}
return false;
return isa<AffineLoadOp, AffineStoreOp>(op);
}
// Returns true if the individual op is loop invariant.
......
......@@ -93,7 +93,7 @@ void SimplifyAffineStructures::runOnFunction() {
// The simplification of the attribute will likely simplify the op. Try to
// fold / apply canonicalization patterns when we have affine dialect ops.
if (isa<AffineForOp>(op) || isa<AffineIfOp>(op) || isa<AffineApplyOp>(op))
if (isa<AffineForOp, AffineIfOp, AffineApplyOp>(op))
applyOpPatternsAndFold(op, patterns);
});
......
......@@ -561,7 +561,7 @@ makePatterns(const DenseSet<Operation *> &parallelLoops, int vectorRank,
static NestedPattern &vectorTransferPattern() {
static auto pattern = matcher::Op([](Operation &op) {
return isa<vector::TransferReadOp>(op) || isa<vector::TransferWriteOp>(op);
return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
});
return pattern;
}
......
......@@ -54,7 +54,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
}
static bool isSinkingBeneficiary(Operation *op) {
return isa<ConstantOp>(op) || isa<DimOp>(op);
return isa<ConstantOp, DimOp>(op);
}
LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
......
......@@ -38,7 +38,7 @@ void mlir::linalg::hoistViewAllocOps(FuncOp func) {
while (changed) {
changed = false;
func.walk([&changed](Operation *op) {
if (!isa<AllocOp>(op) && !isa<AllocaOp>(op) && !isa<DeallocOp>(op))
if (!isa<AllocOp, AllocaOp, DeallocOp>(op))
return;
LLVM_DEBUG(DBGS() << "Candidate for hoisting: " << *op << "\n");
......@@ -64,15 +64,14 @@ void mlir::linalg::hoistViewAllocOps(FuncOp func) {
v = op->getResult(0);
}
if (v && !llvm::all_of(v.getUses(), [&](OpOperand &operand) {
return isa<ViewLikeOpInterface>(operand.getOwner()) ||
isa<DeallocOp>(operand.getOwner());
return isa<ViewLikeOpInterface, DeallocOp>(operand.getOwner());
})) {
LLVM_DEBUG(DBGS() << "Found non view-like or dealloc use: bail\n");
return;
}
// Move AllocOp before the loop.
if (isa<AllocOp>(op) || isa<AllocaOp>(op))
if (isa<AllocOp, AllocaOp>(op))
loop.moveOutOfLoop({op});
else // Move DeallocOp outside of the loop.
op->moveAfter(loop);
......
......@@ -37,7 +37,7 @@ LogicalResult mlir::linalg::interchangeGenericLinalgOpPrecondition(
if (interchangeVector.empty())
return failure();
// Transformation applies to generic ops only.
if (!isa<GenericOp>(op) && !isa<IndexedGenericOp>(op))
if (!isa<GenericOp, IndexedGenericOp>(op))
return failure();
LinalgOp linOp = cast<LinalgOp>(op);
// Transformation applies to buffers only.
......
......@@ -76,7 +76,7 @@ LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
for (Type outputTensorType : linalgOp.getOutputTensorTypes())
if (!outputTensorType.cast<ShapedType>().hasStaticShape())
return failure();
if (isa<linalg::MatmulOp>(op) || isa<linalg::FillOp>(op))
if (isa<linalg::MatmulOp, linalg::FillOp>(op))
return success();
auto genericOp = dyn_cast<linalg::GenericOp>(op);
......
......@@ -831,7 +831,7 @@ static LogicalResult verify(YieldOp op) {
auto results = parentOp->getResults();
auto operands = op.getOperands();
if (isa<IfOp>(parentOp) || isa<ForOp>(parentOp)) {
if (isa<IfOp, ForOp>(parentOp)) {
if (parentOp->getNumResults() != op.getNumOperands())
return op.emitOpError() << "parent of yield must have same number of "
"results as the yield operands";
......
......@@ -45,8 +45,7 @@ using namespace mlir::spirv;
static inline bool containsReturn(Region &region) {
return llvm::any_of(region, [](Block &block) {
Operation *terminator = block.getTerminator();
return isa<spirv::ReturnOp>(terminator) ||
isa<spirv::ReturnValueOp>(terminator);
return isa<spirv::ReturnOp, spirv::ReturnValueOp>(terminator);
});
}
......@@ -62,8 +61,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
// Return true here when inlining into spv.func, spv.selection, and
// spv.loop operations.
auto *op = dest->getParentOp();
return isa<spirv::FuncOp>(op) || isa<spirv::SelectionOp>(op) ||
isa<spirv::LoopOp>(op);
return isa<spirv::FuncOp, spirv::SelectionOp, spirv::LoopOp>(op);
}
/// Returns true if the given operation 'op', that is registered to this
......@@ -72,7 +70,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
bool isLegalToInline(Operation *op, Region *dest,
BlockAndValueMapping &) const final {
// TODO(antiagainst): Enable inlining structured control flows with return.
if ((isa<spirv::SelectionOp>(op) || isa<spirv::LoopOp>(op)) &&
if ((isa<spirv::SelectionOp, spirv::LoopOp>(op)) &&
containsReturn(op->getRegion(0)))
return false;
// TODO(antiagainst): we need to filter OpKill here to avoid inlining it to
......
......@@ -22,8 +22,7 @@ using namespace mlir;
//===----------------------------------------------------------------------===//
bool MemoryEffects::Effect::classof(const SideEffects::Effect *effect) {
return isa<Allocate>(effect) || isa<Free>(effect) || isa<Read>(effect) ||
isa<Write>(effect);
return isa<Allocate, Free, Read, Write>(effect);
}
//===----------------------------------------------------------------------===//
......
......@@ -97,7 +97,7 @@ private:
/// Returns true if the given pass is hidden from IR printing.
static bool isHiddenPass(Pass *pass) {
return isa<OpToOpPassAdaptor>(pass) || isa<VerifierPass>(pass);
return isa<OpToOpPassAdaptor, VerifierPass>(pass);
}
static void printIR(Operation *op, bool printModuleScope, raw_ostream &out,
......
......@@ -34,8 +34,7 @@ std::string tblgen::Dialect::getCppClassName() const {
static StringRef getAsStringOrEmpty(const llvm::Record &record,
StringRef fieldName) {
if (auto valueInit = record.getValueInit(fieldName)) {
if (llvm::isa<llvm::CodeInit>(valueInit) ||
llvm::isa<llvm::StringInit>(valueInit))
if (llvm::isa<llvm::CodeInit, llvm::StringInit>(valueInit))
return record.getValueAsString(fieldName);
}
return "";
......
......@@ -558,7 +558,7 @@ StringRef tblgen::Operator::getSummary() const {
bool tblgen::Operator::hasAssemblyFormat() const {
auto *valueInit = def.getValueInit("assemblyFormat");
return isa<llvm::CodeInit>(valueInit) || isa<llvm::StringInit>(valueInit);
return isa<llvm::CodeInit, llvm::StringInit>(valueInit);
}
StringRef tblgen::Operator::getAssemblyFormat() const {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment