[mlir] Strip away lambdas (NFC) (#143280)
We don't need lambdas here.
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index fe53d03..01cc500 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -402,9 +402,7 @@
return !VectorType::isValidElementType(type);
}))
return true;
- return llvm::any_of(op.getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- });
+ return !llvm::all_of(op.getResultTypes(), VectorType::isValidElementType);
});
SmallVector<NestedMatch, 8> opsMatched;
types.match(forOp, &opsMatched);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index afae84e..8c2db12 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -820,9 +820,8 @@
return failure();
}
- if (llvm::any_of(extractOp->getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(extractOp->getResultTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
@@ -2287,14 +2286,12 @@
})) {
continue;
}
- if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(innerOp.getOperandTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
- if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(innerOp.getResultTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
}
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2196199..34ae83b 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -799,8 +799,7 @@
"before singleton level";
auto *curCOOEnd = std::find_if_not(it, lvlTypes.end(), isSingletonLT);
- if (!std::all_of(it, curCOOEnd,
- [](LevelType i) { return isSingletonLT(i); }))
+ if (!std::all_of(it, curCOOEnd, isSingletonLT))
return emitError() << "expected all singleton lvlTypes "
"following a singleton level";
// We can potentially support mixed SoA/AoS singleton levels.
@@ -833,8 +832,7 @@
it != std::end(lvlTypes)) {
if (it != lvlTypes.end() - 1)
return emitError() << "expected n_out_of_m to be the last level type";
- if (!std::all_of(lvlTypes.begin(), it,
- [](LevelType i) { return isDenseLT(i); }))
+ if (!std::all_of(lvlTypes.begin(), it, isDenseLT))
return emitError() << "expected all dense lvlTypes "
"before a n_out_of_m level";
if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 6e67377..04242ca 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1061,8 +1061,7 @@
void EmptyOp::build(OpBuilder &builder, OperationState &result,
ArrayRef<int64_t> staticShape, Type elementType,
Attribute encoding) {
- assert(all_of(staticShape,
- [](int64_t sz) { return !ShapedType::isDynamic(sz); }) &&
+ assert(none_of(staticShape, ShapedType::isDynamic) &&
"expected only static sizes");
build(builder, result, staticShape, elementType, ValueRange{}, encoding);
}