[mlir][NFC] update `mlir/Dialect` create APIs (33/n) (#150659)
See https://github.com/llvm/llvm-project/pull/147168 for more info.
GitOrigin-RevId: c090ed53fb73f59cf221f5610430af8047758117
diff --git a/lib/Dialect/AMX/IR/AMXDialect.cpp b/lib/Dialect/AMX/IR/AMXDialect.cpp
index 748ff1e..6f3110c 100644
--- a/lib/Dialect/AMX/IR/AMXDialect.cpp
+++ b/lib/Dialect/AMX/IR/AMXDialect.cpp
@@ -96,9 +96,8 @@
MemRefDescriptor memrefDescriptor(base);
auto attr = rewriter.getI64IntegerAttr(bytes);
Value scale = LLVM::ConstantOp::create(rewriter, loc, llvmInt64Type, attr);
- return rewriter
- .create<LLVM::MulOp>(loc, llvmInt64Type, scale,
- memrefDescriptor.stride(rewriter, loc, preLast))
+ return LLVM::MulOp::create(rewriter, loc, llvmInt64Type, scale,
+ memrefDescriptor.stride(rewriter, loc, preLast))
.getResult();
}
// Use direct constant for static stride.
diff --git a/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index 994d485..f7b0b87 100644
--- a/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -688,8 +688,8 @@
if (failed(bufferType))
return failure();
ensureToBufferOpIsValid(value, *bufferType);
- return rewriter
- .create<bufferization::ToBufferOp>(value.getLoc(), *bufferType, value)
+ return bufferization::ToBufferOp::create(rewriter, value.getLoc(),
+ *bufferType, value)
.getResult();
}
@@ -772,9 +772,8 @@
// Default bufferallocation via AllocOp.
if (bufferAlignment != 0)
- return b
- .create<memref::AllocOp>(loc, type, dynShape,
- b.getI64IntegerAttr(bufferAlignment))
+ return memref::AllocOp::create(b, loc, type, dynShape,
+ b.getI64IntegerAttr(bufferAlignment))
.getResult();
return memref::AllocOp::create(b, loc, type, dynShape).getResult();
}
diff --git a/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp b/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp
index f0d65b0..e9ad13f 100644
--- a/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp
+++ b/lib/Dialect/Bufferization/Transforms/LowerDeallocations.cpp
@@ -482,10 +482,10 @@
// Build the first for loop that computes aliasing with retained
// memrefs.
- Value noRetainAlias =
- builder
- .create<scf::ForOp>(
- loc, c0, toRetainSize, c1, trueValue,
+ Value
+ noRetainAlias =
+ scf::ForOp::create(
+ builder, loc, c0, toRetainSize, c1, trueValue,
[&](OpBuilder &builder, Location loc, Value i,
ValueRange iterArgs) {
Value retainValue = memref::LoadOp::create(
@@ -512,14 +512,14 @@
builder, loc, iterArgs[0], doesntAlias);
scf::YieldOp::create(builder, loc, yieldValue);
})
- .getResult(0);
+ .getResult(0);
// Build the second for loop that adds aliasing with previously
// deallocated memrefs.
- Value noAlias =
- builder
- .create<scf::ForOp>(
- loc, c0, outerIter, c1, noRetainAlias,
+ Value
+ noAlias =
+ scf::ForOp::create(
+ builder, loc, c0, outerIter, c1, noRetainAlias,
[&](OpBuilder &builder, Location loc, Value i,
ValueRange iterArgs) {
Value prevDeallocValue = memref::LoadOp::create(
@@ -531,7 +531,7 @@
builder, loc, iterArgs[0], doesntAlias);
scf::YieldOp::create(builder, loc, yieldValue);
})
- .getResult(0);
+ .getResult(0);
Value shouldDealoc = arith::AndIOp::create(builder, loc, noAlias, cond);
memref::StoreOp::create(builder, loc, shouldDealoc, deallocCondsMemref,
diff --git a/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp b/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp
index 64c178d..725fa24 100644
--- a/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp
+++ b/lib/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation.cpp
@@ -750,17 +750,16 @@
// Insert a runtime check and only clone if we still don't have ownership at
// runtime.
- Value maybeClone = builder
- .create<scf::IfOp>(
- memref.getLoc(), condition,
- [&](OpBuilder &builder, Location loc) {
- scf::YieldOp::create(builder, loc, newMemref);
- },
- [&](OpBuilder &builder, Location loc) {
- Value clone = bufferization::CloneOp::create(
- builder, loc, newMemref);
- scf::YieldOp::create(builder, loc, clone);
- })
+ Value maybeClone = scf::IfOp::create(
+ builder, memref.getLoc(), condition,
+ [&](OpBuilder &builder, Location loc) {
+ scf::YieldOp::create(builder, loc, newMemref);
+ },
+ [&](OpBuilder &builder, Location loc) {
+ Value clone = bufferization::CloneOp::create(
+ builder, loc, newMemref);
+ scf::YieldOp::create(builder, loc, clone);
+ })
.getResult(0);
Value trueVal = buildBoolValue(builder, memref.getLoc(), true);
state.updateOwnership(maybeClone, trueVal);
diff --git a/lib/Dialect/GPU/Transforms/ShuffleRewriter.cpp b/lib/Dialect/GPU/Transforms/ShuffleRewriter.cpp
index d88f4d5..8e05436 100644
--- a/lib/Dialect/GPU/Transforms/ShuffleRewriter.cpp
+++ b/lib/Dialect/GPU/Transforms/ShuffleRewriter.cpp
@@ -60,14 +60,12 @@
// Shuffle the values.
ValueRange loRes =
- rewriter
- .create<gpu::ShuffleOp>(op.getLoc(), lo, op.getOffset(),
- op.getWidth(), op.getMode())
+ gpu::ShuffleOp::create(rewriter, op.getLoc(), lo, op.getOffset(),
+ op.getWidth(), op.getMode())
.getResults();
ValueRange hiRes =
- rewriter
- .create<gpu::ShuffleOp>(op.getLoc(), hi, op.getOffset(),
- op.getWidth(), op.getMode())
+ gpu::ShuffleOp::create(rewriter, op.getLoc(), hi, op.getOffset(),
+ op.getWidth(), op.getMode())
.getResults();
// Convert lo back to i64.
diff --git a/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp b/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
index b9e2dd5..b45fdf3 100644
--- a/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
+++ b/lib/Dialect/GPU/Transforms/SubgroupReduceLowering.cpp
@@ -197,10 +197,9 @@
// Parallel reduction using butterfly shuffles.
for (unsigned i = ci.clusterStride; i < ci.clusterStride * ci.clusterSize;
i <<= 1) {
- Value shuffled = builder
- .create<gpu::ShuffleOp>(loc, packFn(laneVal), i,
- /*width=*/ci.subgroupSize,
- /*mode=*/gpu::ShuffleMode::XOR)
+ Value shuffled = gpu::ShuffleOp::create(builder, loc, packFn(laneVal), i,
+ /*width=*/ci.subgroupSize,
+ /*mode=*/gpu::ShuffleMode::XOR)
.getShuffleResult();
laneVal = vector::makeArithReduction(builder, loc,
gpu::convertReductionKind(mode),
diff --git a/lib/Dialect/MemRef/Transforms/IndependenceTransforms.cpp b/lib/Dialect/MemRef/Transforms/IndependenceTransforms.cpp
index 66c1aa6..d5e2b97 100644
--- a/lib/Dialect/MemRef/Transforms/IndependenceTransforms.cpp
+++ b/lib/Dialect/MemRef/Transforms/IndependenceTransforms.cpp
@@ -56,9 +56,8 @@
// Create a memref::SubViewOp.
SmallVector<OpFoldResult> offsets(newSizes.size(), b.getIndexAttr(0));
SmallVector<OpFoldResult> strides(newSizes.size(), b.getIndexAttr(1));
- return b
- .create<SubViewOp>(loc, newAllocaOp, offsets, allocaOp.getMixedSizes(),
- strides)
+ return SubViewOp::create(b, loc, newAllocaOp, offsets,
+ allocaOp.getMixedSizes(), strides)
.getResult();
}
diff --git a/lib/Dialect/MemRef/Transforms/RuntimeOpVerification.cpp b/lib/Dialect/MemRef/Transforms/RuntimeOpVerification.cpp
index 1f03e9a..d3a77c0 100644
--- a/lib/Dialect/MemRef/Transforms/RuntimeOpVerification.cpp
+++ b/lib/Dialect/MemRef/Transforms/RuntimeOpVerification.cpp
@@ -185,9 +185,8 @@
int64_t dim) -> Value {
return type.isDynamicDim(dim)
? DimOp::create(builder, loc, memRef, dim).getResult()
- : builder
- .create<arith::ConstantIndexOp>(loc,
- type.getDimSize(dim))
+ : arith::ConstantIndexOp::create(builder, loc,
+ type.getDimSize(dim))
.getResult();
};
Value sourceDim = getDimSize(copyOp.getSource(), rankedSourceType, i);
diff --git a/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp b/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp
index 58cd160..9e37bc5 100644
--- a/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp
+++ b/lib/Dialect/Quant/Transforms/LowerQuantOps.cpp
@@ -148,16 +148,14 @@
auto axisValue = arith::ConstantIndexOp::create(builder, loc, axis);
auto axisNextValue = arith::ConstantIndexOp::create(builder, loc, axis + 1);
auto shapeLeft =
- builder
- .create<shape::SplitAtOp>(loc, TypeRange{shapeType, shapeType},
- inputShape, axisValue)
+ shape::SplitAtOp::create(builder, loc, TypeRange{shapeType, shapeType},
+ inputShape, axisValue)
.getResult(0);
auto sizeLeft =
shape::NumElementsOp::create(builder, loc, indexType, shapeLeft);
auto shapeRight =
- builder
- .create<shape::SplitAtOp>(loc, TypeRange{shapeType, shapeType},
- inputShape, axisNextValue)
+ shape::SplitAtOp::create(builder, loc, TypeRange{shapeType, shapeType},
+ inputShape, axisNextValue)
.getResult(1);
auto sizeRight =
shape::NumElementsOp::create(builder, loc, indexType, shapeRight);
@@ -557,25 +555,24 @@
SmallVector<AffineMap> indexingMaps{
builder.getMultiDimIdentityMap(inputRank), channelAxisAffineMap,
channelAxisAffineMap, builder.getMultiDimIdentityMap(inputRank)};
- auto result = builder
- .create<linalg::GenericOp>(
- loc,
- init.getType(), // resultType
- ValueRange{input, scales, zeroPoints}, // inputs
- ValueRange{init}, // outputs
- indexingMaps, iteratorTypes,
- [&](OpBuilder &builder, Location loc, ValueRange args) {
- assert(args.size() == 4);
- auto input = args[0];
- auto scale = args[1];
- auto zeroPoint = args[2];
+ auto result = linalg::GenericOp::create(
+ builder, loc,
+ init.getType(), // resultType
+ ValueRange{input, scales, zeroPoints}, // inputs
+ ValueRange{init}, // outputs
+ indexingMaps, iteratorTypes,
+ [&](OpBuilder &builder, Location loc, ValueRange args) {
+ assert(args.size() == 4);
+ auto input = args[0];
+ auto scale = args[1];
+ auto zeroPoint = args[2];
- auto result =
- convertRanked(builder, loc, op, input, {}, scale,
- zeroPoint, quantizedType);
+ auto result =
+ convertRanked(builder, loc, op, input, {}, scale,
+ zeroPoint, quantizedType);
- linalg::YieldOp::create(builder, loc, result);
- })
+ linalg::YieldOp::create(builder, loc, result);
+ })
.getResult(0);
return result;
@@ -660,25 +657,24 @@
SmallVector<AffineMap> indexingMaps{
builder.getMultiDimIdentityMap(inputRank), affineMap, affineMap,
builder.getMultiDimIdentityMap(inputRank)};
- auto result = builder
- .create<linalg::GenericOp>(
- loc,
- init.getType(), // resultType
- ValueRange{input, scales, zeroPoints}, // inputs
- ValueRange{init}, // outputs
- indexingMaps, iteratorTypes,
- [&](OpBuilder &builder, Location loc, ValueRange args) {
- assert(args.size() == 4);
- auto input = args[0];
- auto scale = args[1];
- auto zeroPoint = args[2];
+ auto result = linalg::GenericOp::create(
+ builder, loc,
+ init.getType(), // resultType
+ ValueRange{input, scales, zeroPoints}, // inputs
+ ValueRange{init}, // outputs
+ indexingMaps, iteratorTypes,
+ [&](OpBuilder &builder, Location loc, ValueRange args) {
+ assert(args.size() == 4);
+ auto input = args[0];
+ auto scale = args[1];
+ auto zeroPoint = args[2];
- auto result =
- convertRanked(builder, loc, op, input, {}, scale,
- zeroPoint, quantizedType);
+ auto result =
+ convertRanked(builder, loc, op, input, {}, scale,
+ zeroPoint, quantizedType);
- linalg::YieldOp::create(builder, loc, result);
- })
+ linalg::YieldOp::create(builder, loc, result);
+ })
.getResult(0);
return result;
diff --git a/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp b/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
index 64c4d60..f8799c5 100644
--- a/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -497,10 +497,10 @@
size_t idx = it.index();
Value val = it.value();
if (tensorIndices.contains(idx)) {
- result.push_back(rewriter
- .create<bufferization::ToTensorOp>(
- val.getLoc(), oldBbArgs[idx].getType(), val)
- .getResult());
+ result.push_back(
+ bufferization::ToTensorOp::create(rewriter, val.getLoc(),
+ oldBbArgs[idx].getType(), val)
+ .getResult());
} else {
result.push_back(val);
}
diff --git a/lib/Dialect/SCF/Utils/Utils.cpp b/lib/Dialect/SCF/Utils/Utils.cpp
index 5b0c604..5731795 100644
--- a/lib/Dialect/SCF/Utils/Utils.cpp
+++ b/lib/Dialect/SCF/Utils/Utils.cpp
@@ -827,9 +827,8 @@
productOf = v;
}
if (!productOf) {
- productOf = rewriter
- .create<arith::ConstantOp>(
- loc, rewriter.getOneAttr(getType(values.front())))
+ productOf = arith::ConstantOp::create(
+ rewriter, loc, rewriter.getOneAttr(getType(values.front())))
.getResult();
}
return productOf.value();
diff --git a/lib/Dialect/Shape/IR/Shape.cpp b/lib/Dialect/Shape/IR/Shape.cpp
index e24f0f8..5ba8289 100644
--- a/lib/Dialect/Shape/IR/Shape.cpp
+++ b/lib/Dialect/Shape/IR/Shape.cpp
@@ -1702,9 +1702,8 @@
return failure();
Location loc = op.getLoc();
Value constShape =
- rewriter
- .create<ConstShapeOp>(loc,
- rewriter.getIndexTensorAttr(type.getShape()))
+ ConstShapeOp::create(rewriter, loc,
+ rewriter.getIndexTensorAttr(type.getShape()))
.getResult();
if (constShape.getType() != op.getResult().getType())
constShape = tensor::CastOp::create(rewriter, loc,
diff --git a/lib/Dialect/Shard/Transforms/Partition.cpp b/lib/Dialect/Shard/Transforms/Partition.cpp
index 5fe5566..3e3d476 100644
--- a/lib/Dialect/Shard/Transforms/Partition.cpp
+++ b/lib/Dialect/Shard/Transforms/Partition.cpp
@@ -70,10 +70,8 @@
TypedValue<ShapedType> sourceShard, GridOp grid,
int64_t splitTensorAxis, GridAxis splitGridAxis) {
TypedValue<ShapedType> targetShard = cast<TypedValue<ShapedType>>(
- builder
- .create<AllSliceOp>(sourceShard, grid,
- ArrayRef<GridAxis>(splitGridAxis),
- splitTensorAxis)
+ AllSliceOp::create(builder, sourceShard, grid,
+ ArrayRef<GridAxis>(splitGridAxis), splitTensorAxis)
.getResult());
Sharding targetSharding = targetShardingInSplitLastAxis(
builder.getContext(), sourceSharding, splitTensorAxis, splitGridAxis);
@@ -420,16 +418,15 @@
// Finally update the halo.
auto updateHaloResult =
- builder
- .create<UpdateHaloOp>(
- sourceShard.getLoc(),
- RankedTensorType::get(outShape,
- sourceShard.getType().getElementType()),
- initOprnd, grid.getSymName(),
- GridAxesArrayAttr::get(builder.getContext(),
- sourceSharding.getSplitAxes()),
- targetSharding.getDynamicHaloSizes(),
- targetSharding.getStaticHaloSizes())
+ UpdateHaloOp::create(
+ builder, sourceShard.getLoc(),
+ RankedTensorType::get(outShape,
+ sourceShard.getType().getElementType()),
+ initOprnd, grid.getSymName(),
+ GridAxesArrayAttr::get(builder.getContext(),
+ sourceSharding.getSplitAxes()),
+ targetSharding.getDynamicHaloSizes(),
+ targetSharding.getStaticHaloSizes())
.getResult();
return std::make_tuple(cast<TypedValue<ShapedType>>(updateHaloResult),
targetSharding);
diff --git a/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp b/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
index a52872d..3b4140e 100644
--- a/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
+++ b/lib/Dialect/SparseTensor/Transforms/SparseBufferRewriting.cpp
@@ -931,10 +931,9 @@
FlatSymbolRefAttr partitionFunc = getMangledSortHelperFunc(
builder, func, {IndexType::get(context)}, kPartitionFuncNamePrefix, xPerm,
ny, args.drop_back(nTrailingP), createPartitionFunc);
- Value p = builder
- .create<func::CallOp>(loc, partitionFunc,
- TypeRange{IndexType::get(context)},
- args.drop_back(nTrailingP))
+ Value p = func::CallOp::create(builder, loc, partitionFunc,
+ TypeRange{IndexType::get(context)},
+ args.drop_back(nTrailingP))
.getResult(0);
Value lenLow = arith::SubIOp::create(builder, loc, p, lo);
@@ -1028,9 +1027,8 @@
FlatSymbolRefAttr searchFunc = getMangledSortHelperFunc(
builder, func, {IndexType::get(context)}, kBinarySearchFuncNamePrefix,
xPerm, ny, operands, createBinarySearchFunc);
- Value p = builder
- .create<func::CallOp>(loc, searchFunc, TypeRange{c1.getType()},
- operands)
+ Value p = func::CallOp::create(builder, loc, searchFunc,
+ TypeRange{c1.getType()}, operands)
.getResult(0);
// Move the value at data[i] to a temporary location.
diff --git a/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
index a317abd..0bd1d34 100644
--- a/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
+++ b/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp
@@ -98,10 +98,10 @@
Value numT = constantIndex(builder, loc, numThreads);
gpu::KernelDim3 gridSize = {one, one, one};
gpu::KernelDim3 blckSize = {numT, one, one};
- return builder
- .create<gpu::LaunchFuncOp>(loc, gpuFunc, gridSize, blckSize,
- /*dynSharedMemSz*/ none, args,
- builder.getType<gpu::AsyncTokenType>(), tokens)
+ return gpu::LaunchFuncOp::create(builder, loc, gpuFunc, gridSize, blckSize,
+ /*dynSharedMemSz*/ none, args,
+ builder.getType<gpu::AsyncTokenType>(),
+ tokens)
.getAsyncToken();
}
@@ -1168,7 +1168,7 @@
using OpRewritePattern<scf::ParallelOp>::OpRewritePattern;
ForallRewriter(MLIRContext *context, unsigned nT)
- : OpRewritePattern(context), numThreads(nT){};
+ : OpRewritePattern(context), numThreads(nT) {};
LogicalResult matchAndRewrite(scf::ParallelOp forallOp,
PatternRewriter &rewriter) const override {
diff --git a/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp b/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
index dfb1274..9cd4896 100644
--- a/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
+++ b/lib/Dialect/SparseTensor/Transforms/SparseIterationToScf.cpp
@@ -443,8 +443,8 @@
addSourceMaterialization([](OpBuilder &builder, IterSpaceType spTp,
ValueRange inputs, Location loc) -> Value {
- return builder
- .create<UnrealizedConversionCastOp>(loc, TypeRange(spTp), inputs)
+ return UnrealizedConversionCastOp::create(builder, loc, TypeRange(spTp),
+ inputs)
.getResult(0);
});
}
diff --git a/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
index 70795e2..7a26cd3 100644
--- a/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
+++ b/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp
@@ -412,13 +412,13 @@
if (memTp.getRank() > 1)
return mem;
// Truncate linear memrefs to given size.
- return builder
- .create<memref::SubViewOp>(
- loc, MemRefType::get({ShapedType::kDynamic}, memTp.getElementType()),
- mem, ValueRange{}, ValueRange{sz}, ValueRange{},
- ArrayRef<int64_t>{0}, // static offset
- ArrayRef<int64_t>{ShapedType::kDynamic}, // dynamic size
- ArrayRef<int64_t>{1}) // static stride
+ return memref::SubViewOp::create(
+ builder, loc,
+ MemRefType::get({ShapedType::kDynamic}, memTp.getElementType()),
+ mem, ValueRange{}, ValueRange{sz}, ValueRange{},
+ ArrayRef<int64_t>{0}, // static offset
+ ArrayRef<int64_t>{ShapedType::kDynamic}, // dynamic size
+ ArrayRef<int64_t>{1}) // static stride
.getResult();
}
@@ -449,7 +449,7 @@
public:
SparseInsertGenerator(TensorType rtp, TypeRange retTypes, ValueRange params,
bool genCall)
- : FuncCallOrInlineGenerator(retTypes, params, genCall), rtp(rtp){};
+ : FuncCallOrInlineGenerator(retTypes, params, genCall), rtp(rtp) {};
/// Generates code along an insertion path without the need for a "cursor".
/// This current insertion strategy comes at the expense of some testing
diff --git a/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp b/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
index b444ac5..79f4e7f 100644
--- a/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
+++ b/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp
@@ -904,9 +904,8 @@
dstTp->withoutDimToLvl(),
!srcTp->isAllOrdered() || !srcTp->isIdentity() || !dstTp->isIdentity());
SmallVector<Value> dynSizes;
- Value buffer = rewriter
- .create<AllocTensorOp>(loc, bufferTp, dynSizes, Value(),
- nnz, Attribute())
+ Value buffer = AllocTensorOp::create(rewriter, loc, bufferTp, dynSizes,
+ Value(), nnz, Attribute())
.getResult();
// Convert src coordinates to dst coordinates by first collapsing it to 1D
@@ -1013,9 +1012,8 @@
!srcTp.isAllOrdered() || !srcTp.isIdentity() || !dstTp.isIdentity());
Value buffer =
- rewriter
- .create<AllocTensorOp>(loc, bufferTp, dstDynSizes, Value(),
- /*sizeHint=*/nnz, Attribute())
+ AllocTensorOp::create(rewriter, loc, bufferTp, dstDynSizes, Value(),
+ /*sizeHint=*/nnz, Attribute())
.getResult();
// Implement the sparse2sparse reshape as follows:
diff --git a/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index bc11e56..c3356c1 100644
--- a/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -784,8 +784,8 @@
auto toValue = [&](OpFoldResult ofr) {
if (auto value = dyn_cast<Value>(ofr))
return value;
- return rewriter
- .create<arith::ConstantIndexOp>(loc, *getConstantIntValue(ofr))
+ return arith::ConstantIndexOp::create(rewriter, loc,
+ *getConstantIntValue(ofr))
.getResult();
};
@@ -919,9 +919,8 @@
auto memrefType = MemRefType::get(
srcType.getShape(), srcType.getElementType(), AffineMap(),
cast<BaseMemRefType>(srcBuffer->getType()).getMemorySpace());
- srcBuffer = rewriter
- .create<bufferization::ToBufferOp>(
- op->getLoc(), memrefType, *tensorAlloc)
+ srcBuffer = bufferization::ToBufferOp::create(rewriter, op->getLoc(),
+ memrefType, *tensorAlloc)
.getResult();
}
diff --git a/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp b/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp
index 43d9d70..9fd27d3 100644
--- a/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp
+++ b/lib/Dialect/Tensor/Transforms/IndependenceTransforms.cpp
@@ -130,8 +130,7 @@
// Create a tensor::ExtractSliceOp.
SmallVector<OpFoldResult> offsets(newSizes.size(), b.getIndexAttr(0));
SmallVector<OpFoldResult> strides(newSizes.size(), b.getIndexAttr(1));
- return b
- .create<ExtractSliceOp>(loc, newEmptyOp, offsets, emptyOp.getMixedSizes(),
- strides)
+ return ExtractSliceOp::create(b, loc, newEmptyOp, offsets,
+ emptyOp.getMixedSizes(), strides)
.getResult();
}
diff --git a/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp b/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
index e0af2f7..2ec23e1 100644
--- a/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
+++ b/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
@@ -385,10 +385,9 @@
return getValueOrCreateConstantIndexOp(rewriter, loc, ofr);
});
OpFoldResult collapsedOffset =
- rewriter
- .create<affine::AffineLinearizeIndexOp>(loc, offsetVals,
- reassocGroupSizes,
- /*disjoint=*/true)
+ affine::AffineLinearizeIndexOp::create(rewriter, loc, offsetVals,
+ reassocGroupSizes,
+ /*disjoint=*/true)
.getResult();
collapsedOffsets.push_back(collapsedOffset);
collapsedSizes.push_back(collapsedSize);
diff --git a/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index 1ad2c80..6d2cbb5 100644
--- a/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -707,9 +707,8 @@
auto size_op =
getTosaConstShape(rewriter, sliceOp.getLoc(), sliceSizes);
replaceWithSlice =
- rewriter
- .create<tosa::SliceOp>(sliceOp.getLoc(), sliceOp.getType(),
- input, start_op, size_op)
+ tosa::SliceOp::create(rewriter, sliceOp.getLoc(), sliceOp.getType(),
+ input, start_op, size_op)
.getResult();
break;
}
diff --git a/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp b/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
index 9474299..0bec0da 100644
--- a/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
+++ b/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp
@@ -81,9 +81,8 @@
dyn_cast<RankedTensorType>(input.getType()).getElementType());
auto revisedInputShapeValue =
getTosaConstShape(rewriter, op.getLoc(), revisedInputShape);
- input = rewriter
- .create<tosa::ReshapeOp>(op.getLoc(), inputType, input,
- revisedInputShapeValue)
+ input = tosa::ReshapeOp::create(rewriter, op.getLoc(), inputType, input,
+ revisedInputShapeValue)
.getResult();
Type resultETy = resultType.getElementType();
@@ -162,9 +161,8 @@
shiftType, rewriter.getIntegerAttr(shiftElementType, 0));
Value constZero =
tosa::ConstOp::create(rewriter, op.getLoc(), shiftType, shiftZeroAttr);
- Value mulValue = rewriter
- .create<tosa::MulOp>(op.getLoc(), mulShapeType, input,
- weight, constZero)
+ Value mulValue = tosa::MulOp::create(rewriter, op.getLoc(), mulShapeType,
+ input, weight, constZero)
.getResult();
// Reshape output to [N, H, W, C * M].