blob: 4bb5938b2e44ecf80c34f50671fad94a788f011f [file] [log] [blame]
// RUN: mlir-opt %s --sparse-assembler | FileCheck %s --check-prefix=CHECK-HI
// RUN: mlir-opt %s --sparse-assembler \
// RUN: --inline | FileCheck %s --check-prefix=CHECK-INL
// RUN: mlir-opt %s --sparse-assembler \
// RUN: --linalg-generalize-named-ops \
// RUN: --linalg-fuse-elementwise-ops \
// RUN: --sparsification-and-bufferization | FileCheck %s --check-prefix=CHECK-MID
// RUN: mlir-opt %s --sparse-assembler \
// RUN: --sparsifier | FileCheck %s --check-prefix=CHECK-LOW
//
// An example of a module generated by torch-mlir with a sparse tensor from
// torch.sparse. The MLIR sparsifier should be able to provide the external
// API through a wrapper method (spiface and ciface). Various passes should
// compose without trouble.
//
// CHECK-HI-LABEL: func.func @main
// CHECK-HI: sparse_tensor.assemble
// CHECK-HI: call @_internal_main
// CHECK-HI: return
// CHECK-HI: func.func private @_internal_main
// CHECK-HI: linalg.matmul
// CHECK-HI: return
// CHECK-INL-LABEL: func.func @main
// CHECK-INL: sparse_tensor.assemble
// CHECK-INL: linalg.matmul
// CHECK-INL: return
// CHECK-INL-NOT: func.func private @_internal_main
// CHECK-MID-LABEL: func.func @main
// CHECK-MID: memref.load
// CHECK-MID: call @_internal_main
// CHECK-MID: return
// CHECK-MID: func.func private @_internal_main
// CHECK-MID: scf.for
// CHECK-MID: scf.for
// CHECK-MID: return
// CHECK-LOW-LABEL: llvm.func @main
// CHECK-LOW: llvm.call @_internal_main
// CHECK-LOW: llvm.return
// CHECK-LOW: llvm.func @_mlir_ciface_main
// CHECK-LOW: llvm.call @main
// CHECK-LOW: llvm.return
// CHECK-LOW: llvm.func @_internal_main
// CHECK-SAME: {sym_visibility = "private"}
// CHECK-LOW: llvm.return
#csc = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
module {
func.func @main(%arg0: tensor<64x64xf32, #csc>,
%arg1: tensor<64x64xf32>) -> tensor<64x64xf32> attributes {llvm.emit_c_interface} {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.empty() : tensor<64x64xf32>
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<64x64xf32>) -> tensor<64x64xf32>
%2 = linalg.matmul
ins(%arg0, %arg1 : tensor<64x64xf32, #csc>, tensor<64x64xf32>)
outs(%1 : tensor<64x64xf32>) -> tensor<64x64xf32>
return %2 : tensor<64x64xf32>
}
}