| //===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "mlir/Dialect/SparseTensor/Pipelines/Passes.h" |
| |
| #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" |
| #include "mlir/Conversion/Passes.h" |
| #include "mlir/Dialect/Arith/Transforms/Passes.h" |
| #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h" |
| #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h" |
| #include "mlir/Dialect/Bufferization/Transforms/Passes.h" |
| #include "mlir/Dialect/Func/IR/FuncOps.h" |
| #include "mlir/Dialect/GPU/IR/GPUDialect.h" |
| #include "mlir/Dialect/GPU/Transforms/Passes.h" |
| #include "mlir/Dialect/LLVMIR/NVVMDialect.h" |
| #include "mlir/Dialect/Linalg/Passes.h" |
| #include "mlir/Dialect/MemRef/Transforms/Passes.h" |
| #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" |
| #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" |
| #include "mlir/Pass/PassManager.h" |
| #include "mlir/Transforms/Passes.h" |
| |
| using namespace mlir; |
| using namespace mlir::sparse_tensor; |
| |
| /// Return configuration options for One-Shot Bufferize. |
| static bufferization::OneShotBufferizationOptions |
| getBufferizationOptions(bool analysisOnly) { |
| using namespace bufferization; |
| OneShotBufferizationOptions options; |
| options.bufferizeFunctionBoundaries = true; |
| // TODO(springerm): To spot memory leaks more easily, returning dense allocs |
| // should be disallowed. |
| options.allowReturnAllocs = true; |
| options.setFunctionBoundaryTypeConversion(LayoutMapOption::IdentityLayoutMap); |
| options.unknownTypeConverterFn = [](Value value, Attribute memorySpace, |
| const BufferizationOptions &options) { |
| return getMemRefTypeWithStaticIdentityLayout( |
| cast<TensorType>(value.getType()), memorySpace); |
| }; |
| if (analysisOnly) { |
| options.testAnalysisOnly = true; |
| options.printConflicts = true; |
| } |
| return options; |
| } |
| |
| //===----------------------------------------------------------------------===// |
| // Pipeline implementation. |
| //===----------------------------------------------------------------------===// |
| |
| void mlir::sparse_tensor::buildSparseCompiler( |
| OpPassManager &pm, const SparseCompilerOptions &options) { |
| pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass()); |
| pm.addPass(createSparsificationAndBufferizationPass( |
| getBufferizationOptions(options.testBufferizationAnalysisOnly), |
| options.sparsificationOptions(), options.sparseTensorConversionOptions(), |
| options.createSparseDeallocs, options.enableRuntimeLibrary, |
| options.enableBufferInitialization, options.vectorLength, |
| /*enableVLAVectorization=*/options.armSVE, |
| /*enableSIMDIndex32=*/options.force32BitVectorIndices)); |
| if (options.testBufferizationAnalysisOnly) |
| return; |
| pm.addNestedPass<func::FuncOp>(createCanonicalizerPass()); |
| pm.addNestedPass<func::FuncOp>( |
| mlir::bufferization::createFinalizingBufferizePass()); |
| |
| // GPU code generation. |
| const bool gpuCodegen = options.gpuTriple.hasValue(); |
| if (gpuCodegen) { |
| pm.addPass(createSparseGPUCodegenPass()); |
| pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass()); |
| pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass()); |
| pm.addNestedPass<gpu::GPUModuleOp>(createLowerGpuOpsToNVVMOpsPass()); |
| } |
| |
| // TODO(springerm): Add sparse support to the BufferDeallocation pass and add |
| // it to this pipeline. |
| pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass()); |
| pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass()); |
| pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass()); |
| pm.addPass(memref::createExpandStridedMetadataPass()); |
| pm.addPass(createLowerAffinePass()); |
| pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); |
| pm.addPass(createFinalizeMemRefToLLVMConversionPass()); |
| pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass()); |
| pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass()); |
| pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass()); |
| pm.addPass(createConvertMathToLibmPass()); |
| pm.addPass(createConvertComplexToLibmPass()); |
| // Repeat convert-vector-to-llvm. |
| pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); |
| pm.addPass(createConvertComplexToLLVMPass()); |
| pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions())); |
| pm.addPass(createConvertFuncToLLVMPass()); |
| |
| // Finalize GPU code generation. |
| if (gpuCodegen) { |
| #if MLIR_GPU_TO_CUBIN_PASS_ENABLE |
| pm.addNestedPass<gpu::GPUModuleOp>(createGpuSerializeToCubinPass( |
| options.gpuTriple, options.gpuChip, options.gpuFeatures)); |
| #endif |
| pm.addPass(createGpuToLLVMConversionPass()); |
| } |
| |
| pm.addPass(createReconcileUnrealizedCastsPass()); |
| } |
| |
| //===----------------------------------------------------------------------===// |
| // Pipeline registration. |
| //===----------------------------------------------------------------------===// |
| |
| void mlir::sparse_tensor::registerSparseTensorPipelines() { |
| PassPipelineRegistration<SparseCompilerOptions>( |
| "sparse-compiler", |
| "The standard pipeline for taking sparsity-agnostic IR using the" |
| " sparse-tensor type, and lowering it to LLVM IR with concrete" |
| " representations and algorithms for sparse tensors.", |
| buildSparseCompiler); |
| } |