[mlir][ptr] Add load and store ops. (#156093)
This patch adds the load and store operations to the ptr dialect. It's
future work to implement SROA and Mem2Reg interfaces, as well as
conversion to LLVM, and add alias information.
This patch also fixes a bug in `OptionalProp` that was causing the
bytecode writer to exit early of writing the Op props if an optional
prop had the default value.
Example:
```mlir
func.func @load_ops(%arg0: !ptr.ptr<#ptr.generic_space>) -> (f32, f32, f32, f32, f32, i64, i32) {
%0 = ptr.load %arg0 : !ptr.ptr<#ptr.generic_space> -> f32
%1 = ptr.load volatile %arg0 : !ptr.ptr<#ptr.generic_space> -> f32
%2 = ptr.load %arg0 nontemporal : !ptr.ptr<#ptr.generic_space> -> f32
%3 = ptr.load %arg0 invariant : !ptr.ptr<#ptr.generic_space> -> f32
%4 = ptr.load %arg0 invariant_group : !ptr.ptr<#ptr.generic_space> -> f32
%5 = ptr.load %arg0 atomic monotonic alignment = 8 : !ptr.ptr<#ptr.generic_space> -> i64
%6 = ptr.load volatile %arg0 atomic syncscope("workgroup") acquire nontemporal alignment = 4 : !ptr.ptr<#ptr.generic_space> -> i32
return %0, %1, %2, %3, %4, %5, %6 : f32, f32, f32, f32, f32, i64, i32
}
func.func @store_ops(%arg0: !ptr.ptr<#ptr.generic_space>, %arg1: f32, %arg2: i64, %arg3: i32) {
ptr.store %arg1, %arg0 : f32, !ptr.ptr<#ptr.generic_space>
ptr.store volatile %arg1, %arg0 : f32, !ptr.ptr<#ptr.generic_space>
ptr.store %arg1, %arg0 nontemporal : f32, !ptr.ptr<#ptr.generic_space>
ptr.store %arg1, %arg0 invariant_group : f32, !ptr.ptr<#ptr.generic_space>
ptr.store %arg2, %arg0 atomic monotonic alignment = 8 : i64, !ptr.ptr<#ptr.generic_space>
ptr.store volatile %arg3, %arg0 atomic syncscope("workgroup") release nontemporal alignment = 4 : i32, !ptr.ptr<#ptr.generic_space>
return
}
```
Finally, this patch allows testing more advanced features of ptr memory
spaces, for example:
```mlir
// mlir-opt -verify-diagnostics
func.func @store_const(%arg0: !ptr.ptr<#test.const_memory_space>, %arg1: i64) {
// expected-error@+1 {{memory space is read-only}}
ptr.store %arg1, %arg0 atomic monotonic alignment = 8 : i64, !ptr.ptr<#test.const_memory_space>
return
}
```diff --git a/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h
index a046755..3e6754c 100644
--- a/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h
+++ b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h
@@ -20,8 +20,8 @@
namespace mlir {
class Operation;
namespace ptr {
-enum class AtomicBinOp : uint64_t;
-enum class AtomicOrdering : uint64_t;
+enum class AtomicBinOp : uint32_t;
+enum class AtomicOrdering : uint32_t;
} // namespace ptr
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td
index 54efeb0..0171b9c 100644
--- a/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td
+++ b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td
@@ -42,7 +42,7 @@
/*methodName=*/ "isValidLoad",
/*args=*/ (ins "::mlir::Type":$type,
"::mlir::ptr::AtomicOrdering":$ordering,
- "::mlir::IntegerAttr":$alignment,
+ "std::optional<int64_t>":$alignment,
"::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError)
>,
InterfaceMethod<
@@ -57,7 +57,7 @@
/*methodName=*/ "isValidStore",
/*args=*/ (ins "::mlir::Type":$type,
"::mlir::ptr::AtomicOrdering":$ordering,
- "::mlir::IntegerAttr":$alignment,
+ "std::optional<int64_t>":$alignment,
"::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError)
>,
InterfaceMethod<
@@ -73,7 +73,7 @@
/*args=*/ (ins "::mlir::ptr::AtomicBinOp":$op,
"::mlir::Type":$type,
"::mlir::ptr::AtomicOrdering":$ordering,
- "::mlir::IntegerAttr":$alignment,
+ "std::optional<int64_t>":$alignment,
"::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError)
>,
InterfaceMethod<
@@ -90,7 +90,7 @@
/*args=*/ (ins "::mlir::Type":$type,
"::mlir::ptr::AtomicOrdering":$successOrdering,
"::mlir::ptr::AtomicOrdering":$failureOrdering,
- "::mlir::IntegerAttr":$alignment,
+ "std::optional<int64_t>":$alignment,
"::llvm::function_ref<::mlir::InFlightDiagnostic()>":$emitError)
>,
InterfaceMethod<
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td
index cc556c6..c169f48 100644
--- a/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td
@@ -15,25 +15,25 @@
// Atomic binary op enum attribute.
//===----------------------------------------------------------------------===//
-def AtomicBinOpXchg : I64EnumAttrCase<"xchg", 0, "xchg">;
-def AtomicBinOpAdd : I64EnumAttrCase<"add", 1, "add">;
-def AtomicBinOpSub : I64EnumAttrCase<"sub", 2, "sub">;
-def AtomicBinOpAnd : I64EnumAttrCase<"_and", 3, "_and">;
-def AtomicBinOpNand : I64EnumAttrCase<"nand", 4, "nand">;
-def AtomicBinOpOr : I64EnumAttrCase<"_or", 5, "_or">;
-def AtomicBinOpXor : I64EnumAttrCase<"_xor", 6, "_xor">;
-def AtomicBinOpMax : I64EnumAttrCase<"max", 7, "max">;
-def AtomicBinOpMin : I64EnumAttrCase<"min", 8, "min">;
-def AtomicBinOpUMax : I64EnumAttrCase<"umax", 9, "umax">;
-def AtomicBinOpUMin : I64EnumAttrCase<"umin", 10, "umin">;
-def AtomicBinOpFAdd : I64EnumAttrCase<"fadd", 11, "fadd">;
-def AtomicBinOpFSub : I64EnumAttrCase<"fsub", 12, "fsub">;
-def AtomicBinOpFMax : I64EnumAttrCase<"fmax", 13, "fmax">;
-def AtomicBinOpFMin : I64EnumAttrCase<"fmin", 14, "fmin">;
-def AtomicBinOpUIncWrap : I64EnumAttrCase<"uinc_wrap", 15, "uinc_wrap">;
-def AtomicBinOpUDecWrap : I64EnumAttrCase<"udec_wrap", 16, "udec_wrap">;
+def AtomicBinOpXchg : I32EnumCase<"xchg", 0, "xchg">;
+def AtomicBinOpAdd : I32EnumCase<"add", 1, "add">;
+def AtomicBinOpSub : I32EnumCase<"sub", 2, "sub">;
+def AtomicBinOpAnd : I32EnumCase<"_and", 3, "_and">;
+def AtomicBinOpNand : I32EnumCase<"nand", 4, "nand">;
+def AtomicBinOpOr : I32EnumCase<"_or", 5, "_or">;
+def AtomicBinOpXor : I32EnumCase<"_xor", 6, "_xor">;
+def AtomicBinOpMax : I32EnumCase<"max", 7, "max">;
+def AtomicBinOpMin : I32EnumCase<"min", 8, "min">;
+def AtomicBinOpUMax : I32EnumCase<"umax", 9, "umax">;
+def AtomicBinOpUMin : I32EnumCase<"umin", 10, "umin">;
+def AtomicBinOpFAdd : I32EnumCase<"fadd", 11, "fadd">;
+def AtomicBinOpFSub : I32EnumCase<"fsub", 12, "fsub">;
+def AtomicBinOpFMax : I32EnumCase<"fmax", 13, "fmax">;
+def AtomicBinOpFMin : I32EnumCase<"fmin", 14, "fmin">;
+def AtomicBinOpUIncWrap : I32EnumCase<"uinc_wrap", 15, "uinc_wrap">;
+def AtomicBinOpUDecWrap : I32EnumCase<"udec_wrap", 16, "udec_wrap">;
-def AtomicBinOp : I64EnumAttr<
+def AtomicBinOp : I32Enum<
"AtomicBinOp",
"ptr.atomicrmw binary operations",
[AtomicBinOpXchg, AtomicBinOpAdd, AtomicBinOpSub, AtomicBinOpAnd,
@@ -48,15 +48,15 @@
// Atomic ordering enum attribute.
//===----------------------------------------------------------------------===//
-def AtomicOrderingNotAtomic : I64EnumAttrCase<"not_atomic", 0, "not_atomic">;
-def AtomicOrderingUnordered : I64EnumAttrCase<"unordered", 1, "unordered">;
-def AtomicOrderingMonotonic : I64EnumAttrCase<"monotonic", 2, "monotonic">;
-def AtomicOrderingAcquire : I64EnumAttrCase<"acquire", 3, "acquire">;
-def AtomicOrderingRelease : I64EnumAttrCase<"release", 4, "release">;
-def AtomicOrderingAcqRel : I64EnumAttrCase<"acq_rel", 5, "acq_rel">;
-def AtomicOrderingSeqCst : I64EnumAttrCase<"seq_cst", 6, "seq_cst">;
+def AtomicOrderingNotAtomic : I32EnumCase<"not_atomic", 0, "not_atomic">;
+def AtomicOrderingUnordered : I32EnumCase<"unordered", 1, "unordered">;
+def AtomicOrderingMonotonic : I32EnumCase<"monotonic", 2, "monotonic">;
+def AtomicOrderingAcquire : I32EnumCase<"acquire", 3, "acquire">;
+def AtomicOrderingRelease : I32EnumCase<"release", 4, "release">;
+def AtomicOrderingAcqRel : I32EnumCase<"acq_rel", 5, "acq_rel">;
+def AtomicOrderingSeqCst : I32EnumCase<"seq_cst", 6, "seq_cst">;
-def AtomicOrdering : I64EnumAttr<
+def AtomicOrdering : I32Enum<
"AtomicOrdering",
"Atomic ordering for LLVM's memory model",
[AtomicOrderingNotAtomic, AtomicOrderingUnordered, AtomicOrderingMonotonic,
@@ -66,6 +66,8 @@
let cppNamespace = "::mlir::ptr";
}
+def AtomicOrderingProp : EnumProp<AtomicOrdering>;
+
//===----------------------------------------------------------------------===//
// Ptr add flags enum properties.
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td
index 440f6e5..1c88efc 100644
--- a/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td
@@ -119,6 +119,133 @@
}
//===----------------------------------------------------------------------===//
+// LoadOp
+//===----------------------------------------------------------------------===//
+
+def AlignmentProp : OptionalProp<I64Prop>;
+
+def Ptr_LoadOp : Pointer_Op<"load", [
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>
+ ]> {
+ let description = [{
+ The `load` operation is used to read from memory. A load may be marked as
+ atomic, volatile, and/or nontemporal.
+
+ An atomic load only supports a limited set of value types, and requires
+ an explicit alignment.
+
+ Examples:
+ ```mlir
+ // A volatile load of a float variable.
+ %0 = ptr.load volatile %ptr : !ptr.ptr -> f32
+
+ // A nontemporal load of a float variable.
+ %0 = ptr.load %ptr nontemporal : !ptr.ptr -> f32
+
+ // An atomic load of an integer variable.
+ %0 = ptr.load %ptr atomic monotonic alignment = 8 : !ptr.ptr -> i64
+ ```
+
+ See the following link for more details on the meaning of `alignment`,
+ `volatile_`, `nontemporal`, `invariant`, `invariant_group`, `ordering`,
+ and `syncscope`:
+ https://llvm.org/docs/LangRef.html#load-instruction
+ }];
+ let arguments = (ins Ptr_PtrType:$ptr,
+ AlignmentProp:$alignment,
+ UnitProp:$volatile_,
+ UnitProp:$nontemporal,
+ UnitProp:$invariant,
+ UnitProp:$invariantGroup,
+ DefaultValuedProp<
+ AtomicOrderingProp,
+ "AtomicOrdering::not_atomic">:$ordering,
+ OptionalAttr<StrAttr>:$syncscope);
+ let results = (outs AnyType:$value);
+ let assemblyFormat = [{
+ (`volatile` $volatile_^)? $ptr
+ (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
+ oilist(
+ `nontemporal` $nontemporal |
+ `invariant` $invariant |
+ `invariant_group` $invariantGroup |
+ `alignment` `=` $alignment
+ )
+ attr-dict `:` qualified(type($ptr)) `->` type($value)
+ }];
+ let builders = [
+ OpBuilder<(ins "Type":$type, "Value":$ptr,
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
+ CArg<"bool", "false">:$isNonTemporal, CArg<"bool", "false">:$isInvariant,
+ CArg<"bool", "false">:$isInvariantGroup,
+ CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering,
+ CArg<"StringRef", "StringRef()">:$syncscope)>
+ ];
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// StoreOp
+//===----------------------------------------------------------------------===//
+
+def Ptr_StoreOp : Pointer_Op<"store", [
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>
+ ]> {
+ let description = [{
+ The `store` operation is used to write to memory. A store may be marked as
+ atomic, volatile, and/or nontemporal.
+
+ An atomic store only supports a limited set of value types, and requires
+ an explicit alignment.
+
+ Examples:
+ ```mlir
+ // A volatile store of a float variable.
+ ptr.store volatile %val, %ptr : f32, !ptr.ptr
+
+ // A nontemporal store of a float variable.
+ ptr.store %val, %ptr nontemporal : f32, !ptr.ptr
+
+ // An atomic store of an integer variable.
+ ptr.store %val, %ptr atomic monotonic alignment = 8: i64, !ptr.ptr
+ ```
+
+ See the following link for more details on the meaning of `alignment`,
+ `volatile_`, `nontemporal`, `invariant_group`, `ordering`, and `syncscope`:
+ https://llvm.org/docs/LangRef.html#store-instruction
+ }];
+ let arguments = (ins AnyType:$value,
+ Ptr_PtrType:$ptr,
+ AlignmentProp:$alignment,
+ UnitProp:$volatile_,
+ UnitProp:$nontemporal,
+ UnitProp:$invariantGroup,
+ DefaultValuedProp<
+ AtomicOrderingProp,
+ "AtomicOrdering::not_atomic">:$ordering,
+ OptionalAttr<StrAttr>:$syncscope);
+ let assemblyFormat = [{
+ (`volatile` $volatile_^)? $value `,` $ptr
+ (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
+ oilist(
+ `nontemporal` $nontemporal |
+ `invariant_group` $invariantGroup |
+ `alignment` `=` $alignment
+ )
+ attr-dict `:` type($value) `,` qualified(type($ptr))
+ }];
+ let builders = [
+ OpBuilder<(ins "Value":$value, "Value":$ptr,
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
+ CArg<"bool", "false">:$isNonTemporal,
+ CArg<"bool", "false">:$isInvariantGroup,
+ CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering,
+ CArg<"StringRef", "StringRef()">:$syncscope)>
+ ];
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
// ToPtrOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/IR/Properties.td b/mlir/include/mlir/IR/Properties.td
index a6221f9..a7ade06 100644
--- a/mlir/include/mlir/IR/Properties.td
+++ b/mlir/include/mlir/IR/Properties.td
@@ -773,9 +773,10 @@
}];
let writeToMlirBytecode = [{
$_writer.writeOwnedBool($_storage.has_value());
- if (!$_storage.has_value())
- return;
- }] # !subst("$_storage", "(*($_storage))", p.writeToMlirBytecode);
+ if ($_storage.has_value()) {
+ }] # !subst("$_storage", "(*($_storage))", p.writeToMlirBytecode) # [{
+ }
+ }];
let hashProperty = !if(!empty(p.hashProperty), p.hashProperty,
[{ hash_value($_storage.has_value() ? std::optional<::llvm::hash_code>{}] #
diff --git a/mlir/lib/Dialect/Ptr/IR/PtrAttrs.cpp b/mlir/lib/Dialect/Ptr/IR/PtrAttrs.cpp
index 772d25d..dd4e906 100644
--- a/mlir/lib/Dialect/Ptr/IR/PtrAttrs.cpp
+++ b/mlir/lib/Dialect/Ptr/IR/PtrAttrs.cpp
@@ -22,26 +22,27 @@
//===----------------------------------------------------------------------===//
bool GenericSpaceAttr::isValidLoad(
- Type type, ptr::AtomicOrdering ordering, IntegerAttr alignment,
+ Type type, ptr::AtomicOrdering ordering, std::optional<int64_t> alignment,
function_ref<InFlightDiagnostic()> emitError) const {
return true;
}
bool GenericSpaceAttr::isValidStore(
- Type type, ptr::AtomicOrdering ordering, IntegerAttr alignment,
+ Type type, ptr::AtomicOrdering ordering, std::optional<int64_t> alignment,
function_ref<InFlightDiagnostic()> emitError) const {
return true;
}
bool GenericSpaceAttr::isValidAtomicOp(
ptr::AtomicBinOp op, Type type, ptr::AtomicOrdering ordering,
- IntegerAttr alignment, function_ref<InFlightDiagnostic()> emitError) const {
+ std::optional<int64_t> alignment,
+ function_ref<InFlightDiagnostic()> emitError) const {
return true;
}
bool GenericSpaceAttr::isValidAtomicXchg(
Type type, ptr::AtomicOrdering successOrdering,
- ptr::AtomicOrdering failureOrdering, IntegerAttr alignment,
+ ptr::AtomicOrdering failureOrdering, std::optional<int64_t> alignment,
function_ref<InFlightDiagnostic()> emitError) const {
return true;
}
diff --git a/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp b/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp
index c5ec0ca..bf87f83 100644
--- a/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp
+++ b/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp
@@ -85,6 +85,122 @@
}
//===----------------------------------------------------------------------===//
+// LoadOp
+//===----------------------------------------------------------------------===//
+
+/// Verifies the attributes and the type of atomic memory access operations.
+template <typename OpTy>
+static LogicalResult
+verifyAtomicMemOp(OpTy memOp, ArrayRef<AtomicOrdering> unsupportedOrderings) {
+ if (memOp.getOrdering() != AtomicOrdering::not_atomic) {
+ if (llvm::is_contained(unsupportedOrderings, memOp.getOrdering()))
+ return memOp.emitOpError("unsupported ordering '")
+ << stringifyAtomicOrdering(memOp.getOrdering()) << "'";
+ if (!memOp.getAlignment())
+ return memOp.emitOpError("expected alignment for atomic access");
+ return success();
+ }
+ if (memOp.getSyncscope()) {
+ return memOp.emitOpError(
+ "expected syncscope to be null for non-atomic access");
+ }
+ return success();
+}
+
+/// Verifies that the alignment attribute is a power of 2 if present.
+static LogicalResult
+verifyAlignment(std::optional<int64_t> alignment,
+ function_ref<InFlightDiagnostic()> emitError) {
+ if (!alignment)
+ return success();
+ if (alignment.value() <= 0)
+ return emitError() << "alignment must be positive";
+ if (!llvm::isPowerOf2_64(alignment.value()))
+ return emitError() << "alignment must be a power of 2";
+ return success();
+}
+
+void LoadOp::getEffects(
+ SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
+ &effects) {
+ effects.emplace_back(MemoryEffects::Read::get(), &getPtrMutable());
+ // Volatile operations can have target-specific read-write effects on
+ // memory besides the one referred to by the pointer operand.
+ // Similarly, atomic operations that are monotonic or stricter cause
+ // synchronization that from a language point-of-view, are arbitrary
+ // read-writes into memory.
+ if (getVolatile_() || (getOrdering() != AtomicOrdering::not_atomic &&
+ getOrdering() != AtomicOrdering::unordered)) {
+ effects.emplace_back(MemoryEffects::Write::get());
+ effects.emplace_back(MemoryEffects::Read::get());
+ }
+}
+
+LogicalResult LoadOp::verify() {
+ auto emitDiag = [&]() -> InFlightDiagnostic { return emitError(); };
+ MemorySpaceAttrInterface ms = getPtr().getType().getMemorySpace();
+ if (!ms.isValidLoad(getResult().getType(), getOrdering(), getAlignment(),
+ emitDiag))
+ return failure();
+ if (failed(verifyAlignment(getAlignment(), emitDiag)))
+ return failure();
+ return verifyAtomicMemOp(*this,
+ {AtomicOrdering::release, AtomicOrdering::acq_rel});
+}
+
+void LoadOp::build(OpBuilder &builder, OperationState &state, Type type,
+ Value addr, unsigned alignment, bool isVolatile,
+ bool isNonTemporal, bool isInvariant, bool isInvariantGroup,
+ AtomicOrdering ordering, StringRef syncscope) {
+ build(builder, state, type, addr,
+ alignment ? std::optional<int64_t>(alignment) : std::nullopt,
+ isVolatile, isNonTemporal, isInvariant, isInvariantGroup, ordering,
+ syncscope.empty() ? nullptr : builder.getStringAttr(syncscope));
+}
+
+//===----------------------------------------------------------------------===//
+// StoreOp
+//===----------------------------------------------------------------------===//
+
+void StoreOp::getEffects(
+ SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
+ &effects) {
+ effects.emplace_back(MemoryEffects::Write::get(), &getPtrMutable());
+ // Volatile operations can have target-specific read-write effects on
+ // memory besides the one referred to by the pointer operand.
+ // Similarly, atomic operations that are monotonic or stricter cause
+ // synchronization that from a language point-of-view, are arbitrary
+ // read-writes into memory.
+ if (getVolatile_() || (getOrdering() != AtomicOrdering::not_atomic &&
+ getOrdering() != AtomicOrdering::unordered)) {
+ effects.emplace_back(MemoryEffects::Write::get());
+ effects.emplace_back(MemoryEffects::Read::get());
+ }
+}
+
+LogicalResult StoreOp::verify() {
+ auto emitDiag = [&]() -> InFlightDiagnostic { return emitError(); };
+ MemorySpaceAttrInterface ms = getPtr().getType().getMemorySpace();
+ if (!ms.isValidStore(getValue().getType(), getOrdering(), getAlignment(),
+ emitDiag))
+ return failure();
+ if (failed(verifyAlignment(getAlignment(), emitDiag)))
+ return failure();
+ return verifyAtomicMemOp(*this,
+ {AtomicOrdering::acquire, AtomicOrdering::acq_rel});
+}
+
+void StoreOp::build(OpBuilder &builder, OperationState &state, Value value,
+ Value addr, unsigned alignment, bool isVolatile,
+ bool isNonTemporal, bool isInvariantGroup,
+ AtomicOrdering ordering, StringRef syncscope) {
+ build(builder, state, value, addr,
+ alignment ? std::optional<int64_t>(alignment) : std::nullopt,
+ isVolatile, isNonTemporal, isInvariantGroup, ordering,
+ syncscope.empty() ? nullptr : builder.getStringAttr(syncscope));
+}
+
+//===----------------------------------------------------------------------===//
// PtrAddOp
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/Ptr/invalid.mlir b/mlir/test/Dialect/Ptr/invalid.mlir
index 19fd715..5702097 100644
--- a/mlir/test/Dialect/Ptr/invalid.mlir
+++ b/mlir/test/Dialect/Ptr/invalid.mlir
@@ -14,3 +14,27 @@
%r = ptr.to_ptr %v : !ptr.ptr<#ptr.generic_space> -> !ptr.ptr<#ptr.generic_space>
return
}
+
+// -----
+
+func.func @invalid_load_alignment(%arg0: !ptr.ptr<#ptr.generic_space>) -> i64 {
+ // expected-error@+1 {{alignment must be a power of 2}}
+ %r = ptr.load %arg0 alignment = 3 : !ptr.ptr<#ptr.generic_space> -> i64
+ return %r : i64
+}
+
+// -----
+
+func.func @invalid_store_alignment(%arg0: !ptr.ptr<#ptr.generic_space>, %arg1: i64) {
+ // expected-error@+1 {{alignment must be a power of 2}}
+ ptr.store %arg1, %arg0 alignment = 3 : i64, !ptr.ptr<#ptr.generic_space>
+ return
+}
+
+// -----
+
+func.func @store_const(%arg0: !ptr.ptr<#test.const_memory_space>, %arg1: i64) {
+ // expected-error@+1 {{memory space is read-only}}
+ ptr.store %arg1, %arg0 atomic monotonic alignment = 8 : i64, !ptr.ptr<#test.const_memory_space>
+ return
+}
diff --git a/mlir/test/Dialect/Ptr/ops.mlir b/mlir/test/Dialect/Ptr/ops.mlir
index eed3272..dc89489 100644
--- a/mlir/test/Dialect/Ptr/ops.mlir
+++ b/mlir/test/Dialect/Ptr/ops.mlir
@@ -1,14 +1,7 @@
-// RUN: mlir-opt %s --verify-roundtrip | FileCheck %s
+// RUN: mlir-opt %s --verify-roundtrip
/// Check op assembly.
-// CHECK-LABEL: @ptr_add_type_offset
func.func @ptr_add_type_offset(%ptr: !ptr.ptr<#ptr.generic_space>) -> !ptr.ptr<#ptr.generic_space> {
- // CHECK: ptr.type_offset f32 : index
- // CHECK-NEXT: ptr.ptr_add %{{.*}}, %{{.*}} : <#ptr.generic_space>, index
- // CHECK-NEXT: ptr.ptr_add %{{.*}}, %{{.*}} : <#ptr.generic_space>, index
- // CHECK-NEXT: ptr.ptr_add nusw %{{.*}}, %{{.*}} : <#ptr.generic_space>, index
- // CHECK-NEXT: ptr.ptr_add nuw %{{.*}}, %{{.*}} : <#ptr.generic_space>, index
- // CHECK-NEXT: ptr.ptr_add inbounds %{{.*}}, %{{.*}} : <#ptr.generic_space>, index
%off = ptr.type_offset f32 : index
%res = ptr.ptr_add %ptr, %off : !ptr.ptr<#ptr.generic_space>, index
%res0 = ptr.ptr_add none %ptr, %off : !ptr.ptr<#ptr.generic_space>, index
@@ -19,7 +12,6 @@
}
/// Check cast ops assembly.
-// CHECK-LABEL: @cast_ops
func.func @cast_ops(%mr: memref<f32, #ptr.generic_space>) -> memref<f32, #ptr.generic_space> {
%ptr = ptr.to_ptr %mr : memref<f32, #ptr.generic_space> -> !ptr.ptr<#ptr.generic_space>
%mda = ptr.get_metadata %mr : memref<f32, #ptr.generic_space>
@@ -27,3 +19,26 @@
%mr0 = ptr.from_ptr %ptr : !ptr.ptr<#ptr.generic_space> -> memref<f32, #ptr.generic_space>
return %res : memref<f32, #ptr.generic_space>
}
+
+/// Check load ops assembly.
+func.func @load_ops(%arg0: !ptr.ptr<#ptr.generic_space>) -> (f32, f32, f32, f32, f32, i64, i32) {
+ %0 = ptr.load %arg0 : !ptr.ptr<#ptr.generic_space> -> f32
+ %1 = ptr.load volatile %arg0 : !ptr.ptr<#ptr.generic_space> -> f32
+ %2 = ptr.load %arg0 nontemporal : !ptr.ptr<#ptr.generic_space> -> f32
+ %3 = ptr.load %arg0 invariant : !ptr.ptr<#ptr.generic_space> -> f32
+ %4 = ptr.load %arg0 invariant_group : !ptr.ptr<#ptr.generic_space> -> f32
+ %5 = ptr.load %arg0 atomic monotonic alignment = 8 : !ptr.ptr<#ptr.generic_space> -> i64
+ %6 = ptr.load volatile %arg0 atomic syncscope("workgroup") acquire nontemporal alignment = 4 : !ptr.ptr<#ptr.generic_space> -> i32
+ return %0, %1, %2, %3, %4, %5, %6 : f32, f32, f32, f32, f32, i64, i32
+}
+
+/// Check store ops assembly.
+func.func @store_ops(%arg0: !ptr.ptr<#ptr.generic_space>, %arg1: f32, %arg2: i64, %arg3: i32) {
+ ptr.store %arg1, %arg0 : f32, !ptr.ptr<#ptr.generic_space>
+ ptr.store volatile %arg1, %arg0 : f32, !ptr.ptr<#ptr.generic_space>
+ ptr.store %arg1, %arg0 nontemporal : f32, !ptr.ptr<#ptr.generic_space>
+ ptr.store %arg1, %arg0 invariant_group : f32, !ptr.ptr<#ptr.generic_space>
+ ptr.store %arg2, %arg0 atomic monotonic alignment = 8 : i64, !ptr.ptr<#ptr.generic_space>
+ ptr.store volatile %arg3, %arg0 atomic syncscope("workgroup") release nontemporal alignment = 4 : i32, !ptr.ptr<#ptr.generic_space>
+ return
+}
diff --git a/mlir/test/lib/Dialect/Test/TestAttributes.cpp b/mlir/test/lib/Dialect/Test/TestAttributes.cpp
index 5890913..af5f1a3 100644
--- a/mlir/test/lib/Dialect/Test/TestAttributes.cpp
+++ b/mlir/test/lib/Dialect/Test/TestAttributes.cpp
@@ -385,13 +385,15 @@
//===----------------------------------------------------------------------===//
bool TestConstMemorySpaceAttr::isValidLoad(
- Type type, mlir::ptr::AtomicOrdering ordering, IntegerAttr alignment,
+ Type type, mlir::ptr::AtomicOrdering ordering,
+ std::optional<int64_t> alignment,
function_ref<InFlightDiagnostic()> emitError) const {
return true;
}
bool TestConstMemorySpaceAttr::isValidStore(
- Type type, mlir::ptr::AtomicOrdering ordering, IntegerAttr alignment,
+ Type type, mlir::ptr::AtomicOrdering ordering,
+ std::optional<int64_t> alignment,
function_ref<InFlightDiagnostic()> emitError) const {
if (emitError)
emitError() << "memory space is read-only";
@@ -400,7 +402,8 @@
bool TestConstMemorySpaceAttr::isValidAtomicOp(
mlir::ptr::AtomicBinOp binOp, Type type, mlir::ptr::AtomicOrdering ordering,
- IntegerAttr alignment, function_ref<InFlightDiagnostic()> emitError) const {
+ std::optional<int64_t> alignment,
+ function_ref<InFlightDiagnostic()> emitError) const {
if (emitError)
emitError() << "memory space is read-only";
return false;
@@ -408,7 +411,7 @@
bool TestConstMemorySpaceAttr::isValidAtomicXchg(
Type type, mlir::ptr::AtomicOrdering successOrdering,
- mlir::ptr::AtomicOrdering failureOrdering, IntegerAttr alignment,
+ mlir::ptr::AtomicOrdering failureOrdering, std::optional<int64_t> alignment,
function_ref<InFlightDiagnostic()> emitError) const {
if (emitError)
emitError() << "memory space is read-only";