| //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// |
| // |
| // The LLVM Compiler Infrastructure |
| // |
| // This file is distributed under the University of Illinois Open Source |
| // License. See LICENSE.TXT for details. |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file exposes the class definitions of all of the subclasses of the |
| // Instruction class. This is meant to be an easy way to get access to all |
| // instruction subclasses. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #ifndef LLVM_IR_INSTRUCTIONS_H |
| #define LLVM_IR_INSTRUCTIONS_H |
| |
| #include "llvm/ADT/ArrayRef.h" |
| #include "llvm/ADT/None.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SmallVector.h" |
| #include "llvm/ADT/StringRef.h" |
| #include "llvm/ADT/Twine.h" |
| #include "llvm/ADT/iterator.h" |
| #include "llvm/ADT/iterator_range.h" |
| #include "llvm/IR/Attributes.h" |
| #include "llvm/IR/BasicBlock.h" |
| #include "llvm/IR/CallingConv.h" |
| #include "llvm/IR/Constant.h" |
| #include "llvm/IR/DerivedTypes.h" |
| #include "llvm/IR/Function.h" |
| #include "llvm/IR/InstrTypes.h" |
| #include "llvm/IR/Instruction.h" |
| #include "llvm/IR/OperandTraits.h" |
| #include "llvm/IR/Type.h" |
| #include "llvm/IR/Use.h" |
| #include "llvm/IR/User.h" |
| #include "llvm/IR/Value.h" |
| #include "llvm/Support/AtomicOrdering.h" |
| #include "llvm/Support/Casting.h" |
| #include "llvm/Support/ErrorHandling.h" |
| #include <cassert> |
| #include <cstddef> |
| #include <cstdint> |
| #include <iterator> |
| |
| namespace llvm { |
| |
| class APInt; |
| class ConstantInt; |
| class DataLayout; |
| class LLVMContext; |
| |
| //===----------------------------------------------------------------------===// |
| // AllocaInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// an instruction to allocate memory on the stack |
| class AllocaInst : public UnaryInstruction { |
| Type *AllocatedType; |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| AllocaInst *cloneImpl() const; |
| |
| public: |
| explicit AllocaInst(Type *Ty, unsigned AddrSpace, |
| Value *ArraySize = nullptr, |
| const Twine &Name = "", |
| Instruction *InsertBefore = nullptr); |
| AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
| const Twine &Name, BasicBlock *InsertAtEnd); |
| |
| AllocaInst(Type *Ty, unsigned AddrSpace, |
| const Twine &Name, Instruction *InsertBefore = nullptr); |
| AllocaInst(Type *Ty, unsigned AddrSpace, |
| const Twine &Name, BasicBlock *InsertAtEnd); |
| |
| AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align, |
| const Twine &Name = "", Instruction *InsertBefore = nullptr); |
| AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align, |
| const Twine &Name, BasicBlock *InsertAtEnd); |
| |
| /// Return true if there is an allocation size parameter to the allocation |
| /// instruction that is not 1. |
| bool isArrayAllocation() const; |
| |
| /// Get the number of elements allocated. For a simple allocation of a single |
| /// element, this will return a constant 1 value. |
| const Value *getArraySize() const { return getOperand(0); } |
| Value *getArraySize() { return getOperand(0); } |
| |
| /// Overload to return most specific pointer type. |
| PointerType *getType() const { |
| return cast<PointerType>(Instruction::getType()); |
| } |
| |
| /// Get allocation size in bits. Returns None if size can't be determined, |
| /// e.g. in case of a VLA. |
| Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const; |
| |
| /// Return the type that is being allocated by the instruction. |
| Type *getAllocatedType() const { return AllocatedType; } |
| /// for use only in special circumstances that need to generically |
| /// transform a whole instruction (eg: IR linking and vectorization). |
| void setAllocatedType(Type *Ty) { AllocatedType = Ty; } |
| |
| /// Return the alignment of the memory that is being allocated by the |
| /// instruction. |
| unsigned getAlignment() const { |
| return (1u << (getSubclassDataFromInstruction() & 31)) >> 1; |
| } |
| void setAlignment(unsigned Align); |
| |
| /// Return true if this alloca is in the entry block of the function and is a |
| /// constant size. If so, the code generator will fold it into the |
| /// prolog/epilog code, so it is basically free. |
| bool isStaticAlloca() const; |
| |
| /// Return true if this alloca is used as an inalloca argument to a call. Such |
| /// allocas are never considered static even if they are in the entry block. |
| bool isUsedWithInAlloca() const { |
| return getSubclassDataFromInstruction() & 32; |
| } |
| |
| /// Specify whether this alloca is used to represent the arguments to a call. |
| void setUsedWithInAlloca(bool V) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) | |
| (V ? 32 : 0)); |
| } |
| |
| /// Return true if this alloca is used as a swifterror argument to a call. |
| bool isSwiftError() const { |
| return getSubclassDataFromInstruction() & 64; |
| } |
| |
| /// Specify whether this alloca is used to represent a swifterror. |
| void setSwiftError(bool V) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~64) | |
| (V ? 64 : 0)); |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return (I->getOpcode() == Instruction::Alloca); |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| // LoadInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// An instruction for reading from memory. This uses the SubclassData field in |
| /// Value to store whether or not the load is volatile. |
| class LoadInst : public UnaryInstruction { |
| void AssertOK(); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| LoadInst *cloneImpl() const; |
| |
| public: |
| LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore); |
| LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); |
| LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false, |
| Instruction *InsertBefore = nullptr); |
| LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false, |
| Instruction *InsertBefore = nullptr) |
| : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, |
| NameStr, isVolatile, InsertBefore) {} |
| LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
| BasicBlock *InsertAtEnd); |
| LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, |
| Instruction *InsertBefore = nullptr) |
| : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, |
| NameStr, isVolatile, Align, InsertBefore) {} |
| LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
| unsigned Align, Instruction *InsertBefore = nullptr); |
| LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
| unsigned Align, BasicBlock *InsertAtEnd); |
| LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, |
| AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
| Instruction *InsertBefore = nullptr) |
| : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, |
| NameStr, isVolatile, Align, Order, SSID, InsertBefore) {} |
| LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
| unsigned Align, AtomicOrdering Order, |
| SyncScope::ID SSID = SyncScope::System, |
| Instruction *InsertBefore = nullptr); |
| LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, |
| unsigned Align, AtomicOrdering Order, SyncScope::ID SSID, |
| BasicBlock *InsertAtEnd); |
| LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); |
| LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd); |
| LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr, |
| bool isVolatile = false, Instruction *InsertBefore = nullptr); |
| explicit LoadInst(Value *Ptr, const char *NameStr = nullptr, |
| bool isVolatile = false, |
| Instruction *InsertBefore = nullptr) |
| : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, |
| NameStr, isVolatile, InsertBefore) {} |
| LoadInst(Value *Ptr, const char *NameStr, bool isVolatile, |
| BasicBlock *InsertAtEnd); |
| |
| /// Return true if this is a load from a volatile memory location. |
| bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } |
| |
| /// Specify whether this is a volatile load or not. |
| void setVolatile(bool V) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
| (V ? 1 : 0)); |
| } |
| |
| /// Return the alignment of the access that is being performed. |
| unsigned getAlignment() const { |
| return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; |
| } |
| |
| void setAlignment(unsigned Align); |
| |
| /// Returns the ordering constraint of this load instruction. |
| AtomicOrdering getOrdering() const { |
| return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); |
| } |
| |
| /// Sets the ordering constraint of this load instruction. May not be Release |
| /// or AcquireRelease. |
| void setOrdering(AtomicOrdering Ordering) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | |
| ((unsigned)Ordering << 7)); |
| } |
| |
| /// Returns the synchronization scope ID of this load instruction. |
| SyncScope::ID getSyncScopeID() const { |
| return SSID; |
| } |
| |
| /// Sets the synchronization scope ID of this load instruction. |
| void setSyncScopeID(SyncScope::ID SSID) { |
| this->SSID = SSID; |
| } |
| |
| /// Sets the ordering constraint and the synchronization scope ID of this load |
| /// instruction. |
| void setAtomic(AtomicOrdering Ordering, |
| SyncScope::ID SSID = SyncScope::System) { |
| setOrdering(Ordering); |
| setSyncScopeID(SSID); |
| } |
| |
| bool isSimple() const { return !isAtomic() && !isVolatile(); } |
| |
| bool isUnordered() const { |
| return (getOrdering() == AtomicOrdering::NotAtomic || |
| getOrdering() == AtomicOrdering::Unordered) && |
| !isVolatile(); |
| } |
| |
| Value *getPointerOperand() { return getOperand(0); } |
| const Value *getPointerOperand() const { return getOperand(0); } |
| static unsigned getPointerOperandIndex() { return 0U; } |
| Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
| |
| /// Returns the address space of the pointer operand. |
| unsigned getPointerAddressSpace() const { |
| return getPointerOperandType()->getPointerAddressSpace(); |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::Load; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| |
| /// The synchronization scope ID of this load instruction. Not quite enough |
| /// room in SubClassData for everything, so synchronization scope ID gets its |
| /// own field. |
| SyncScope::ID SSID; |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| // StoreInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// An instruction for storing to memory. |
| class StoreInst : public Instruction { |
| void AssertOK(); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| StoreInst *cloneImpl() const; |
| |
| public: |
| StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); |
| StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); |
| StoreInst(Value *Val, Value *Ptr, bool isVolatile = false, |
| Instruction *InsertBefore = nullptr); |
| StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); |
| StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
| unsigned Align, Instruction *InsertBefore = nullptr); |
| StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
| unsigned Align, BasicBlock *InsertAtEnd); |
| StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
| unsigned Align, AtomicOrdering Order, |
| SyncScope::ID SSID = SyncScope::System, |
| Instruction *InsertBefore = nullptr); |
| StoreInst(Value *Val, Value *Ptr, bool isVolatile, |
| unsigned Align, AtomicOrdering Order, SyncScope::ID SSID, |
| BasicBlock *InsertAtEnd); |
| |
| // allocate space for exactly two operands |
| void *operator new(size_t s) { |
| return User::operator new(s, 2); |
| } |
| |
| /// Return true if this is a store to a volatile memory location. |
| bool isVolatile() const { return getSubclassDataFromInstruction() & 1; } |
| |
| /// Specify whether this is a volatile store or not. |
| void setVolatile(bool V) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
| (V ? 1 : 0)); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| /// Return the alignment of the access that is being performed |
| unsigned getAlignment() const { |
| return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1; |
| } |
| |
| void setAlignment(unsigned Align); |
| |
| /// Returns the ordering constraint of this store instruction. |
| AtomicOrdering getOrdering() const { |
| return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); |
| } |
| |
| /// Sets the ordering constraint of this store instruction. May not be |
| /// Acquire or AcquireRelease. |
| void setOrdering(AtomicOrdering Ordering) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | |
| ((unsigned)Ordering << 7)); |
| } |
| |
| /// Returns the synchronization scope ID of this store instruction. |
| SyncScope::ID getSyncScopeID() const { |
| return SSID; |
| } |
| |
| /// Sets the synchronization scope ID of this store instruction. |
| void setSyncScopeID(SyncScope::ID SSID) { |
| this->SSID = SSID; |
| } |
| |
| /// Sets the ordering constraint and the synchronization scope ID of this |
| /// store instruction. |
| void setAtomic(AtomicOrdering Ordering, |
| SyncScope::ID SSID = SyncScope::System) { |
| setOrdering(Ordering); |
| setSyncScopeID(SSID); |
| } |
| |
| bool isSimple() const { return !isAtomic() && !isVolatile(); } |
| |
| bool isUnordered() const { |
| return (getOrdering() == AtomicOrdering::NotAtomic || |
| getOrdering() == AtomicOrdering::Unordered) && |
| !isVolatile(); |
| } |
| |
| Value *getValueOperand() { return getOperand(0); } |
| const Value *getValueOperand() const { return getOperand(0); } |
| |
| Value *getPointerOperand() { return getOperand(1); } |
| const Value *getPointerOperand() const { return getOperand(1); } |
| static unsigned getPointerOperandIndex() { return 1U; } |
| Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
| |
| /// Returns the address space of the pointer operand. |
| unsigned getPointerAddressSpace() const { |
| return getPointerOperandType()->getPointerAddressSpace(); |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::Store; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| |
| /// The synchronization scope ID of this store instruction. Not quite enough |
| /// room in SubClassData for everything, so synchronization scope ID gets its |
| /// own field. |
| SyncScope::ID SSID; |
| }; |
| |
| template <> |
| struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // FenceInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// An instruction for ordering other memory operations. |
| class FenceInst : public Instruction { |
| void Init(AtomicOrdering Ordering, SyncScope::ID SSID); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| FenceInst *cloneImpl() const; |
| |
| public: |
| // Ordering may only be Acquire, Release, AcquireRelease, or |
| // SequentiallyConsistent. |
| FenceInst(LLVMContext &C, AtomicOrdering Ordering, |
| SyncScope::ID SSID = SyncScope::System, |
| Instruction *InsertBefore = nullptr); |
| FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, |
| BasicBlock *InsertAtEnd); |
| |
| // allocate space for exactly zero operands |
| void *operator new(size_t s) { |
| return User::operator new(s, 0); |
| } |
| |
| /// Returns the ordering constraint of this fence instruction. |
| AtomicOrdering getOrdering() const { |
| return AtomicOrdering(getSubclassDataFromInstruction() >> 1); |
| } |
| |
| /// Sets the ordering constraint of this fence instruction. May only be |
| /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. |
| void setOrdering(AtomicOrdering Ordering) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | |
| ((unsigned)Ordering << 1)); |
| } |
| |
| /// Returns the synchronization scope ID of this fence instruction. |
| SyncScope::ID getSyncScopeID() const { |
| return SSID; |
| } |
| |
| /// Sets the synchronization scope ID of this fence instruction. |
| void setSyncScopeID(SyncScope::ID SSID) { |
| this->SSID = SSID; |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::Fence; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| |
| /// The synchronization scope ID of this fence instruction. Not quite enough |
| /// room in SubClassData for everything, so synchronization scope ID gets its |
| /// own field. |
| SyncScope::ID SSID; |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| // AtomicCmpXchgInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// an instruction that atomically checks whether a |
| /// specified value is in a memory location, and, if it is, stores a new value |
| /// there. Returns the value that was loaded. |
| /// |
| class AtomicCmpXchgInst : public Instruction { |
| void Init(Value *Ptr, Value *Cmp, Value *NewVal, |
| AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, |
| SyncScope::ID SSID); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| AtomicCmpXchgInst *cloneImpl() const; |
| |
| public: |
| AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, |
| AtomicOrdering SuccessOrdering, |
| AtomicOrdering FailureOrdering, |
| SyncScope::ID SSID, Instruction *InsertBefore = nullptr); |
| AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, |
| AtomicOrdering SuccessOrdering, |
| AtomicOrdering FailureOrdering, |
| SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
| |
| // allocate space for exactly three operands |
| void *operator new(size_t s) { |
| return User::operator new(s, 3); |
| } |
| |
| /// Return true if this is a cmpxchg from a volatile memory |
| /// location. |
| /// |
| bool isVolatile() const { |
| return getSubclassDataFromInstruction() & 1; |
| } |
| |
| /// Specify whether this is a volatile cmpxchg. |
| /// |
| void setVolatile(bool V) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
| (unsigned)V); |
| } |
| |
| /// Return true if this cmpxchg may spuriously fail. |
| bool isWeak() const { |
| return getSubclassDataFromInstruction() & 0x100; |
| } |
| |
| void setWeak(bool IsWeak) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) | |
| (IsWeak << 8)); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| /// Returns the success ordering constraint of this cmpxchg instruction. |
| AtomicOrdering getSuccessOrdering() const { |
| return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); |
| } |
| |
| /// Sets the success ordering constraint of this cmpxchg instruction. |
| void setSuccessOrdering(AtomicOrdering Ordering) { |
| assert(Ordering != AtomicOrdering::NotAtomic && |
| "CmpXchg instructions can only be atomic."); |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | |
| ((unsigned)Ordering << 2)); |
| } |
| |
| /// Returns the failure ordering constraint of this cmpxchg instruction. |
| AtomicOrdering getFailureOrdering() const { |
| return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); |
| } |
| |
| /// Sets the failure ordering constraint of this cmpxchg instruction. |
| void setFailureOrdering(AtomicOrdering Ordering) { |
| assert(Ordering != AtomicOrdering::NotAtomic && |
| "CmpXchg instructions can only be atomic."); |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | |
| ((unsigned)Ordering << 5)); |
| } |
| |
| /// Returns the synchronization scope ID of this cmpxchg instruction. |
| SyncScope::ID getSyncScopeID() const { |
| return SSID; |
| } |
| |
| /// Sets the synchronization scope ID of this cmpxchg instruction. |
| void setSyncScopeID(SyncScope::ID SSID) { |
| this->SSID = SSID; |
| } |
| |
| Value *getPointerOperand() { return getOperand(0); } |
| const Value *getPointerOperand() const { return getOperand(0); } |
| static unsigned getPointerOperandIndex() { return 0U; } |
| |
| Value *getCompareOperand() { return getOperand(1); } |
| const Value *getCompareOperand() const { return getOperand(1); } |
| |
| Value *getNewValOperand() { return getOperand(2); } |
| const Value *getNewValOperand() const { return getOperand(2); } |
| |
| /// Returns the address space of the pointer operand. |
| unsigned getPointerAddressSpace() const { |
| return getPointerOperand()->getType()->getPointerAddressSpace(); |
| } |
| |
| /// Returns the strongest permitted ordering on failure, given the |
| /// desired ordering on success. |
| /// |
| /// If the comparison in a cmpxchg operation fails, there is no atomic store |
| /// so release semantics cannot be provided. So this function drops explicit |
| /// Release requests from the AtomicOrdering. A SequentiallyConsistent |
| /// operation would remain SequentiallyConsistent. |
| static AtomicOrdering |
| getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { |
| switch (SuccessOrdering) { |
| default: |
| llvm_unreachable("invalid cmpxchg success ordering"); |
| case AtomicOrdering::Release: |
| case AtomicOrdering::Monotonic: |
| return AtomicOrdering::Monotonic; |
| case AtomicOrdering::AcquireRelease: |
| case AtomicOrdering::Acquire: |
| return AtomicOrdering::Acquire; |
| case AtomicOrdering::SequentiallyConsistent: |
| return AtomicOrdering::SequentiallyConsistent; |
| } |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::AtomicCmpXchg; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| |
| /// The synchronization scope ID of this cmpxchg instruction. Not quite |
| /// enough room in SubClassData for everything, so synchronization scope ID |
| /// gets its own field. |
| SyncScope::ID SSID; |
| }; |
| |
| template <> |
| struct OperandTraits<AtomicCmpXchgInst> : |
| public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // AtomicRMWInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// an instruction that atomically reads a memory location, |
| /// combines it with another value, and then stores the result back. Returns |
| /// the old value. |
| /// |
| class AtomicRMWInst : public Instruction { |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| AtomicRMWInst *cloneImpl() const; |
| |
| public: |
| /// This enumeration lists the possible modifications atomicrmw can make. In |
| /// the descriptions, 'p' is the pointer to the instruction's memory location, |
| /// 'old' is the initial value of *p, and 'v' is the other value passed to the |
| /// instruction. These instructions always return 'old'. |
| enum BinOp { |
| /// *p = v |
| Xchg, |
| /// *p = old + v |
| Add, |
| /// *p = old - v |
| Sub, |
| /// *p = old & v |
| And, |
| /// *p = ~(old & v) |
| Nand, |
| /// *p = old | v |
| Or, |
| /// *p = old ^ v |
| Xor, |
| /// *p = old >signed v ? old : v |
| Max, |
| /// *p = old <signed v ? old : v |
| Min, |
| /// *p = old >unsigned v ? old : v |
| UMax, |
| /// *p = old <unsigned v ? old : v |
| UMin, |
| |
| FIRST_BINOP = Xchg, |
| LAST_BINOP = UMin, |
| BAD_BINOP |
| }; |
| |
| AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, |
| AtomicOrdering Ordering, SyncScope::ID SSID, |
| Instruction *InsertBefore = nullptr); |
| AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, |
| AtomicOrdering Ordering, SyncScope::ID SSID, |
| BasicBlock *InsertAtEnd); |
| |
| // allocate space for exactly two operands |
| void *operator new(size_t s) { |
| return User::operator new(s, 2); |
| } |
| |
| BinOp getOperation() const { |
| return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5); |
| } |
| |
| static StringRef getOperationName(BinOp Op); |
| |
| void setOperation(BinOp Operation) { |
| unsigned short SubclassData = getSubclassDataFromInstruction(); |
| setInstructionSubclassData((SubclassData & 31) | |
| (Operation << 5)); |
| } |
| |
| /// Return true if this is a RMW on a volatile memory location. |
| /// |
| bool isVolatile() const { |
| return getSubclassDataFromInstruction() & 1; |
| } |
| |
| /// Specify whether this is a volatile RMW or not. |
| /// |
| void setVolatile(bool V) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | |
| (unsigned)V); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| /// Returns the ordering constraint of this rmw instruction. |
| AtomicOrdering getOrdering() const { |
| return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); |
| } |
| |
| /// Sets the ordering constraint of this rmw instruction. |
| void setOrdering(AtomicOrdering Ordering) { |
| assert(Ordering != AtomicOrdering::NotAtomic && |
| "atomicrmw instructions can only be atomic."); |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | |
| ((unsigned)Ordering << 2)); |
| } |
| |
| /// Returns the synchronization scope ID of this rmw instruction. |
| SyncScope::ID getSyncScopeID() const { |
| return SSID; |
| } |
| |
| /// Sets the synchronization scope ID of this rmw instruction. |
| void setSyncScopeID(SyncScope::ID SSID) { |
| this->SSID = SSID; |
| } |
| |
| Value *getPointerOperand() { return getOperand(0); } |
| const Value *getPointerOperand() const { return getOperand(0); } |
| static unsigned getPointerOperandIndex() { return 0U; } |
| |
| Value *getValOperand() { return getOperand(1); } |
| const Value *getValOperand() const { return getOperand(1); } |
| |
| /// Returns the address space of the pointer operand. |
| unsigned getPointerAddressSpace() const { |
| return getPointerOperand()->getType()->getPointerAddressSpace(); |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::AtomicRMW; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| void Init(BinOp Operation, Value *Ptr, Value *Val, |
| AtomicOrdering Ordering, SyncScope::ID SSID); |
| |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| |
| /// The synchronization scope ID of this rmw instruction. Not quite enough |
| /// room in SubClassData for everything, so synchronization scope ID gets its |
| /// own field. |
| SyncScope::ID SSID; |
| }; |
| |
| template <> |
| struct OperandTraits<AtomicRMWInst> |
| : public FixedNumOperandTraits<AtomicRMWInst,2> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // GetElementPtrInst Class |
| //===----------------------------------------------------------------------===// |
| |
| // checkGEPType - Simple wrapper function to give a better assertion failure |
| // message on bad indexes for a gep instruction. |
| // |
| inline Type *checkGEPType(Type *Ty) { |
| assert(Ty && "Invalid GetElementPtrInst indices for type!"); |
| return Ty; |
| } |
| |
| /// an instruction for type-safe pointer arithmetic to |
| /// access elements of arrays and structs |
| /// |
| class GetElementPtrInst : public Instruction { |
| Type *SourceElementType; |
| Type *ResultElementType; |
| |
| GetElementPtrInst(const GetElementPtrInst &GEPI); |
| |
| /// Constructors - Create a getelementptr instruction with a base pointer an |
| /// list of indices. The first ctor can optionally insert before an existing |
| /// instruction, the second appends the new instruction to the specified |
| /// BasicBlock. |
| inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, unsigned Values, |
| const Twine &NameStr, Instruction *InsertBefore); |
| inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, unsigned Values, |
| const Twine &NameStr, BasicBlock *InsertAtEnd); |
| |
| void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| GetElementPtrInst *cloneImpl() const; |
| |
| public: |
| static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| unsigned Values = 1 + unsigned(IdxList.size()); |
| if (!PointeeType) |
| PointeeType = |
| cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
| else |
| assert( |
| PointeeType == |
| cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); |
| return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
| NameStr, InsertBefore); |
| } |
| |
| static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| unsigned Values = 1 + unsigned(IdxList.size()); |
| if (!PointeeType) |
| PointeeType = |
| cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
| else |
| assert( |
| PointeeType == |
| cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()); |
| return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
| NameStr, InsertAtEnd); |
| } |
| |
| /// Create an "inbounds" getelementptr. See the documentation for the |
| /// "inbounds" flag in LangRef.html for details. |
| static GetElementPtrInst *CreateInBounds(Value *Ptr, |
| ArrayRef<Value *> IdxList, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr){ |
| return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); |
| } |
| |
| static GetElementPtrInst * |
| CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| GetElementPtrInst *GEP = |
| Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); |
| GEP->setIsInBounds(true); |
| return GEP; |
| } |
| |
| static GetElementPtrInst *CreateInBounds(Value *Ptr, |
| ArrayRef<Value *> IdxList, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); |
| } |
| |
| static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| GetElementPtrInst *GEP = |
| Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); |
| GEP->setIsInBounds(true); |
| return GEP; |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| Type *getSourceElementType() const { return SourceElementType; } |
| |
| void setSourceElementType(Type *Ty) { SourceElementType = Ty; } |
| void setResultElementType(Type *Ty) { ResultElementType = Ty; } |
| |
| Type *getResultElementType() const { |
| assert(ResultElementType == |
| cast<PointerType>(getType()->getScalarType())->getElementType()); |
| return ResultElementType; |
| } |
| |
| /// Returns the address space of this instruction's pointer type. |
| unsigned getAddressSpace() const { |
| // Note that this is always the same as the pointer operand's address space |
| // and that is cheaper to compute, so cheat here. |
| return getPointerAddressSpace(); |
| } |
| |
| /// Returns the type of the element that would be loaded with |
| /// a load instruction with the specified parameters. |
| /// |
| /// Null is returned if the indices are invalid for the specified |
| /// pointer type. |
| /// |
| static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); |
| static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); |
| static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); |
| |
| inline op_iterator idx_begin() { return op_begin()+1; } |
| inline const_op_iterator idx_begin() const { return op_begin()+1; } |
| inline op_iterator idx_end() { return op_end(); } |
| inline const_op_iterator idx_end() const { return op_end(); } |
| |
| inline iterator_range<op_iterator> indices() { |
| return make_range(idx_begin(), idx_end()); |
| } |
| |
| inline iterator_range<const_op_iterator> indices() const { |
| return make_range(idx_begin(), idx_end()); |
| } |
| |
| Value *getPointerOperand() { |
| return getOperand(0); |
| } |
| const Value *getPointerOperand() const { |
| return getOperand(0); |
| } |
| static unsigned getPointerOperandIndex() { |
| return 0U; // get index for modifying correct operand. |
| } |
| |
| /// Method to return the pointer operand as a |
| /// PointerType. |
| Type *getPointerOperandType() const { |
| return getPointerOperand()->getType(); |
| } |
| |
| /// Returns the address space of the pointer operand. |
| unsigned getPointerAddressSpace() const { |
| return getPointerOperandType()->getPointerAddressSpace(); |
| } |
| |
| /// Returns the pointer type returned by the GEP |
| /// instruction, which may be a vector of pointers. |
| static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) { |
| return getGEPReturnType( |
| cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(), |
| Ptr, IdxList); |
| } |
| static Type *getGEPReturnType(Type *ElTy, Value *Ptr, |
| ArrayRef<Value *> IdxList) { |
| Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), |
| Ptr->getType()->getPointerAddressSpace()); |
| // Vector GEP |
| if (Ptr->getType()->isVectorTy()) { |
| unsigned NumElem = Ptr->getType()->getVectorNumElements(); |
| return VectorType::get(PtrTy, NumElem); |
| } |
| for (Value *Index : IdxList) |
| if (Index->getType()->isVectorTy()) { |
| unsigned NumElem = Index->getType()->getVectorNumElements(); |
| return VectorType::get(PtrTy, NumElem); |
| } |
| // Scalar GEP |
| return PtrTy; |
| } |
| |
| unsigned getNumIndices() const { // Note: always non-negative |
| return getNumOperands() - 1; |
| } |
| |
| bool hasIndices() const { |
| return getNumOperands() > 1; |
| } |
| |
| /// Return true if all of the indices of this GEP are |
| /// zeros. If so, the result pointer and the first operand have the same |
| /// value, just potentially different types. |
| bool hasAllZeroIndices() const; |
| |
| /// Return true if all of the indices of this GEP are |
| /// constant integers. If so, the result pointer and the first operand have |
| /// a constant offset between them. |
| bool hasAllConstantIndices() const; |
| |
| /// Set or clear the inbounds flag on this GEP instruction. |
| /// See LangRef.html for the meaning of inbounds on a getelementptr. |
| void setIsInBounds(bool b = true); |
| |
| /// Determine whether the GEP has the inbounds flag. |
| bool isInBounds() const; |
| |
| /// Accumulate the constant address offset of this GEP if possible. |
| /// |
| /// This routine accepts an APInt into which it will accumulate the constant |
| /// offset of this GEP if the GEP is in fact constant. If the GEP is not |
| /// all-constant, it returns false and the value of the offset APInt is |
| /// undefined (it is *not* preserved!). The APInt passed into this routine |
| /// must be at least as wide as the IntPtr type for the address space of |
| /// the base GEP pointer. |
| bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return (I->getOpcode() == Instruction::GetElementPtr); |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| template <> |
| struct OperandTraits<GetElementPtrInst> : |
| public VariadicOperandTraits<GetElementPtrInst, 1> { |
| }; |
| |
| GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, unsigned Values, |
| const Twine &NameStr, |
| Instruction *InsertBefore) |
| : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
| OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
| Values, InsertBefore), |
| SourceElementType(PointeeType), |
| ResultElementType(getIndexedType(PointeeType, IdxList)) { |
| assert(ResultElementType == |
| cast<PointerType>(getType()->getScalarType())->getElementType()); |
| init(Ptr, IdxList, NameStr); |
| } |
| |
| GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
| ArrayRef<Value *> IdxList, unsigned Values, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) |
| : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
| OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
| Values, InsertAtEnd), |
| SourceElementType(PointeeType), |
| ResultElementType(getIndexedType(PointeeType, IdxList)) { |
| assert(ResultElementType == |
| cast<PointerType>(getType()->getScalarType())->getElementType()); |
| init(Ptr, IdxList, NameStr); |
| } |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // UnaryOperator Class |
| //===----------------------------------------------------------------------===// |
| |
| /// a unary instruction |
| class UnaryOperator : public UnaryInstruction { |
| void AssertOK(); |
| |
| protected: |
| UnaryOperator(UnaryOps iType, Value *S, Type *Ty, |
| const Twine &Name, Instruction *InsertBefore); |
| UnaryOperator(UnaryOps iType, Value *S, Type *Ty, |
| const Twine &Name, BasicBlock *InsertAtEnd); |
| |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| UnaryOperator *cloneImpl() const; |
| |
| public: |
| |
| /// Construct a unary instruction, given the opcode and an operand. |
| /// Optionally (if InstBefore is specified) insert the instruction |
| /// into a BasicBlock right before the specified instruction. The specified |
| /// Instruction is allowed to be a dereferenced end iterator. |
| /// |
| static UnaryOperator *Create(UnaryOps Op, Value *S, |
| const Twine &Name = Twine(), |
| Instruction *InsertBefore = nullptr); |
| |
| /// Construct a unary instruction, given the opcode and an operand. |
| /// Also automatically insert this instruction to the end of the |
| /// BasicBlock specified. |
| /// |
| static UnaryOperator *Create(UnaryOps Op, Value *S, |
| const Twine &Name, |
| BasicBlock *InsertAtEnd); |
| |
| /// These methods just forward to Create, and are useful when you |
| /// statically know what type of instruction you're going to create. These |
| /// helpers just save some typing. |
| #define HANDLE_UNARY_INST(N, OPC, CLASS) \ |
| static UnaryInstruction *Create##OPC(Value *V, \ |
| const Twine &Name = "") {\ |
| return Create(Instruction::OPC, V, Name);\ |
| } |
| #include "llvm/IR/Instruction.def" |
| #define HANDLE_UNARY_INST(N, OPC, CLASS) \ |
| static UnaryInstruction *Create##OPC(Value *V, \ |
| const Twine &Name, BasicBlock *BB) {\ |
| return Create(Instruction::OPC, V, Name, BB);\ |
| } |
| #include "llvm/IR/Instruction.def" |
| #define HANDLE_UNARY_INST(N, OPC, CLASS) \ |
| static UnaryInstruction *Create##OPC(Value *V, \ |
| const Twine &Name, Instruction *I) {\ |
| return Create(Instruction::OPC, V, Name, I);\ |
| } |
| #include "llvm/IR/Instruction.def" |
| |
| UnaryOps getOpcode() const { |
| return static_cast<UnaryOps>(Instruction::getOpcode()); |
| } |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| // ICmpInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction compares its operands according to the predicate given |
| /// to the constructor. It only operates on integers or pointers. The operands |
| /// must be identical types. |
| /// Represent an integer comparison operator. |
| class ICmpInst: public CmpInst { |
| void AssertOK() { |
| assert(isIntPredicate() && |
| "Invalid ICmp predicate value"); |
| assert(getOperand(0)->getType() == getOperand(1)->getType() && |
| "Both operands to ICmp instruction are not of the same type!"); |
| // Check that the operands are the right type |
| assert((getOperand(0)->getType()->isIntOrIntVectorTy() || |
| getOperand(0)->getType()->isPtrOrPtrVectorTy()) && |
| "Invalid operand types for ICmp instruction"); |
| } |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| /// Clone an identical ICmpInst |
| ICmpInst *cloneImpl() const; |
| |
| public: |
| /// Constructor with insert-before-instruction semantics. |
| ICmpInst( |
| Instruction *InsertBefore, ///< Where to insert |
| Predicate pred, ///< The predicate to use for the comparison |
| Value *LHS, ///< The left-hand-side of the expression |
| Value *RHS, ///< The right-hand-side of the expression |
| const Twine &NameStr = "" ///< Name of the instruction |
| ) : CmpInst(makeCmpResultType(LHS->getType()), |
| Instruction::ICmp, pred, LHS, RHS, NameStr, |
| InsertBefore) { |
| #ifndef NDEBUG |
| AssertOK(); |
| #endif |
| } |
| |
| /// Constructor with insert-at-end semantics. |
| ICmpInst( |
| BasicBlock &InsertAtEnd, ///< Block to insert into. |
| Predicate pred, ///< The predicate to use for the comparison |
| Value *LHS, ///< The left-hand-side of the expression |
| Value *RHS, ///< The right-hand-side of the expression |
| const Twine &NameStr = "" ///< Name of the instruction |
| ) : CmpInst(makeCmpResultType(LHS->getType()), |
| Instruction::ICmp, pred, LHS, RHS, NameStr, |
| &InsertAtEnd) { |
| #ifndef NDEBUG |
| AssertOK(); |
| #endif |
| } |
| |
| /// Constructor with no-insertion semantics |
| ICmpInst( |
| Predicate pred, ///< The predicate to use for the comparison |
| Value *LHS, ///< The left-hand-side of the expression |
| Value *RHS, ///< The right-hand-side of the expression |
| const Twine &NameStr = "" ///< Name of the instruction |
| ) : CmpInst(makeCmpResultType(LHS->getType()), |
| Instruction::ICmp, pred, LHS, RHS, NameStr) { |
| #ifndef NDEBUG |
| AssertOK(); |
| #endif |
| } |
| |
| /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. |
| /// @returns the predicate that would be the result if the operand were |
| /// regarded as signed. |
| /// Return the signed version of the predicate |
| Predicate getSignedPredicate() const { |
| return getSignedPredicate(getPredicate()); |
| } |
| |
| /// This is a static version that you can use without an instruction. |
| /// Return the signed version of the predicate. |
| static Predicate getSignedPredicate(Predicate pred); |
| |
| /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. |
| /// @returns the predicate that would be the result if the operand were |
| /// regarded as unsigned. |
| /// Return the unsigned version of the predicate |
| Predicate getUnsignedPredicate() const { |
| return getUnsignedPredicate(getPredicate()); |
| } |
| |
| /// This is a static version that you can use without an instruction. |
| /// Return the unsigned version of the predicate. |
| static Predicate getUnsignedPredicate(Predicate pred); |
| |
| /// Return true if this predicate is either EQ or NE. This also |
| /// tests for commutativity. |
| static bool isEquality(Predicate P) { |
| return P == ICMP_EQ || P == ICMP_NE; |
| } |
| |
| /// Return true if this predicate is either EQ or NE. This also |
| /// tests for commutativity. |
| bool isEquality() const { |
| return isEquality(getPredicate()); |
| } |
| |
| /// @returns true if the predicate of this ICmpInst is commutative |
| /// Determine if this relation is commutative. |
| bool isCommutative() const { return isEquality(); } |
| |
| /// Return true if the predicate is relational (not EQ or NE). |
| /// |
| bool isRelational() const { |
| return !isEquality(); |
| } |
| |
| /// Return true if the predicate is relational (not EQ or NE). |
| /// |
| static bool isRelational(Predicate P) { |
| return !isEquality(P); |
| } |
| |
| /// Exchange the two operands to this instruction in such a way that it does |
| /// not modify the semantics of the instruction. The predicate value may be |
| /// changed to retain the same result if the predicate is order dependent |
| /// (e.g. ult). |
| /// Swap operands and adjust predicate. |
| void swapOperands() { |
| setPredicate(getSwappedPredicate()); |
| Op<0>().swap(Op<1>()); |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::ICmp; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| // FCmpInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction compares its operands according to the predicate given |
| /// to the constructor. It only operates on floating point values or packed |
| /// vectors of floating point values. The operands must be identical types. |
| /// Represents a floating point comparison operator. |
| class FCmpInst: public CmpInst { |
| void AssertOK() { |
| assert(isFPPredicate() && "Invalid FCmp predicate value"); |
| assert(getOperand(0)->getType() == getOperand(1)->getType() && |
| "Both operands to FCmp instruction are not of the same type!"); |
| // Check that the operands are the right type |
| assert(getOperand(0)->getType()->isFPOrFPVectorTy() && |
| "Invalid operand types for FCmp instruction"); |
| } |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| /// Clone an identical FCmpInst |
| FCmpInst *cloneImpl() const; |
| |
| public: |
| /// Constructor with insert-before-instruction semantics. |
| FCmpInst( |
| Instruction *InsertBefore, ///< Where to insert |
| Predicate pred, ///< The predicate to use for the comparison |
| Value *LHS, ///< The left-hand-side of the expression |
| Value *RHS, ///< The right-hand-side of the expression |
| const Twine &NameStr = "" ///< Name of the instruction |
| ) : CmpInst(makeCmpResultType(LHS->getType()), |
| Instruction::FCmp, pred, LHS, RHS, NameStr, |
| InsertBefore) { |
| AssertOK(); |
| } |
| |
| /// Constructor with insert-at-end semantics. |
| FCmpInst( |
| BasicBlock &InsertAtEnd, ///< Block to insert into. |
| Predicate pred, ///< The predicate to use for the comparison |
| Value *LHS, ///< The left-hand-side of the expression |
| Value *RHS, ///< The right-hand-side of the expression |
| const Twine &NameStr = "" ///< Name of the instruction |
| ) : CmpInst(makeCmpResultType(LHS->getType()), |
| Instruction::FCmp, pred, LHS, RHS, NameStr, |
| &InsertAtEnd) { |
| AssertOK(); |
| } |
| |
| /// Constructor with no-insertion semantics |
| FCmpInst( |
| Predicate Pred, ///< The predicate to use for the comparison |
| Value *LHS, ///< The left-hand-side of the expression |
| Value *RHS, ///< The right-hand-side of the expression |
| const Twine &NameStr = "", ///< Name of the instruction |
| Instruction *FlagsSource = nullptr |
| ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, |
| RHS, NameStr, nullptr, FlagsSource) { |
| AssertOK(); |
| } |
| |
| /// @returns true if the predicate of this instruction is EQ or NE. |
| /// Determine if this is an equality predicate. |
| static bool isEquality(Predicate Pred) { |
| return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || |
| Pred == FCMP_UNE; |
| } |
| |
| /// @returns true if the predicate of this instruction is EQ or NE. |
| /// Determine if this is an equality predicate. |
| bool isEquality() const { return isEquality(getPredicate()); } |
| |
| /// @returns true if the predicate of this instruction is commutative. |
| /// Determine if this is a commutative predicate. |
| bool isCommutative() const { |
| return isEquality() || |
| getPredicate() == FCMP_FALSE || |
| getPredicate() == FCMP_TRUE || |
| getPredicate() == FCMP_ORD || |
| getPredicate() == FCMP_UNO; |
| } |
| |
| /// @returns true if the predicate is relational (not EQ or NE). |
| /// Determine if this a relational predicate. |
| bool isRelational() const { return !isEquality(); } |
| |
| /// Exchange the two operands to this instruction in such a way that it does |
| /// not modify the semantics of the instruction. The predicate value may be |
| /// changed to retain the same result if the predicate is order dependent |
| /// (e.g. ult). |
| /// Swap operands and adjust predicate. |
| void swapOperands() { |
| setPredicate(getSwappedPredicate()); |
| Op<0>().swap(Op<1>()); |
| } |
| |
| /// Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::FCmp; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| /// This class represents a function call, abstracting a target |
| /// machine's calling convention. This class uses low bit of the SubClassData |
| /// field to indicate whether or not this is a tail call. The rest of the bits |
| /// hold the calling convention of the call. |
| /// |
| class CallInst : public CallBase { |
| CallInst(const CallInst &CI); |
| |
| /// Construct a CallInst given a range of arguments. |
| /// Construct a CallInst from a range of arguments |
| inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
| Instruction *InsertBefore); |
| |
| inline CallInst(Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
| Instruction *InsertBefore) |
| : CallInst(cast<FunctionType>( |
| cast<PointerType>(Func->getType())->getElementType()), |
| Func, Args, Bundles, NameStr, InsertBefore) {} |
| |
| inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr, |
| Instruction *InsertBefore) |
| : CallInst(Func, Args, None, NameStr, InsertBefore) {} |
| |
| /// Construct a CallInst given a range of arguments. |
| /// Construct a CallInst from a range of arguments |
| inline CallInst(Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
| BasicBlock *InsertAtEnd); |
| |
| explicit CallInst(Value *F, const Twine &NameStr, Instruction *InsertBefore); |
| |
| CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd); |
| |
| void init(Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) { |
| init(cast<FunctionType>( |
| cast<PointerType>(Func->getType())->getElementType()), |
| Func, Args, Bundles, NameStr); |
| } |
| void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
| void init(Value *Func, const Twine &NameStr); |
| |
| /// Compute the number of operands to allocate. |
| static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
| // We need one operand for the called function, plus the input operand |
| // counts provided. |
| return 1 + NumArgs + NumBundleInputs; |
| } |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| CallInst *cloneImpl() const; |
| |
| public: |
| static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles = None, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| return Create(cast<FunctionType>( |
| cast<PointerType>(Func->getType())->getElementType()), |
| Func, Args, Bundles, NameStr, InsertBefore); |
| } |
| |
| static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
| const Twine &NameStr, |
| Instruction *InsertBefore = nullptr) { |
| return Create(cast<FunctionType>( |
| cast<PointerType>(Func->getType())->getElementType()), |
| Func, Args, None, NameStr, InsertBefore); |
| } |
| |
| static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
| const Twine &NameStr, |
| Instruction *InsertBefore = nullptr) { |
| return new (ComputeNumOperands(Args.size())) |
| CallInst(Ty, Func, Args, None, NameStr, InsertBefore); |
| } |
| |
| static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles = None, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| const int NumOperands = |
| ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
| const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
| |
| return new (NumOperands, DescriptorBytes) |
| CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); |
| } |
| |
| static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, |
| const Twine &NameStr, BasicBlock *InsertAtEnd) { |
| const int NumOperands = |
| ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
| const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
| |
| return new (NumOperands, DescriptorBytes) |
| CallInst(Func, Args, Bundles, NameStr, InsertAtEnd); |
| } |
| |
| static CallInst *Create(Value *Func, ArrayRef<Value *> Args, |
| const Twine &NameStr, BasicBlock *InsertAtEnd) { |
| return new (ComputeNumOperands(Args.size())) |
| CallInst(Func, Args, None, NameStr, InsertAtEnd); |
| } |
| |
| static CallInst *Create(Value *F, const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| return new (ComputeNumOperands(0)) CallInst(F, NameStr, InsertBefore); |
| } |
| |
| static CallInst *Create(Value *F, const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return new (ComputeNumOperands(0)) CallInst(F, NameStr, InsertAtEnd); |
| } |
| |
| /// Create a clone of \p CI with a different set of operand bundles and |
| /// insert it before \p InsertPt. |
| /// |
| /// The returned call instruction is identical \p CI in every way except that |
| /// the operand bundles for the new instruction are set to the operand bundles |
| /// in \p Bundles. |
| static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, |
| Instruction *InsertPt = nullptr); |
| |
| /// Generate the IR for a call to malloc: |
| /// 1. Compute the malloc call's argument as the specified type's size, |
| /// possibly multiplied by the array size if the array size is not |
| /// constant 1. |
| /// 2. Call malloc with that argument. |
| /// 3. Bitcast the result of the malloc call to the specified type. |
| static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
| Type *AllocTy, Value *AllocSize, |
| Value *ArraySize = nullptr, |
| Function *MallocF = nullptr, |
| const Twine &Name = ""); |
| static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
| Type *AllocTy, Value *AllocSize, |
| Value *ArraySize = nullptr, |
| Function *MallocF = nullptr, |
| const Twine &Name = ""); |
| static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
| Type *AllocTy, Value *AllocSize, |
| Value *ArraySize = nullptr, |
| ArrayRef<OperandBundleDef> Bundles = None, |
| Function *MallocF = nullptr, |
| const Twine &Name = ""); |
| static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
| Type *AllocTy, Value *AllocSize, |
| Value *ArraySize = nullptr, |
| ArrayRef<OperandBundleDef> Bundles = None, |
| Function *MallocF = nullptr, |
| const Twine &Name = ""); |
| /// Generate the IR for a call to the builtin free function. |
| static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); |
| static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); |
| static Instruction *CreateFree(Value *Source, |
| ArrayRef<OperandBundleDef> Bundles, |
| Instruction *InsertBefore); |
| static Instruction *CreateFree(Value *Source, |
| ArrayRef<OperandBundleDef> Bundles, |
| BasicBlock *InsertAtEnd); |
| |
| // Note that 'musttail' implies 'tail'. |
| enum TailCallKind { |
| TCK_None = 0, |
| TCK_Tail = 1, |
| TCK_MustTail = 2, |
| TCK_NoTail = 3 |
| }; |
| TailCallKind getTailCallKind() const { |
| return TailCallKind(getSubclassDataFromInstruction() & 3); |
| } |
| |
| bool isTailCall() const { |
| unsigned Kind = getSubclassDataFromInstruction() & 3; |
| return Kind == TCK_Tail || Kind == TCK_MustTail; |
| } |
| |
| bool isMustTailCall() const { |
| return (getSubclassDataFromInstruction() & 3) == TCK_MustTail; |
| } |
| |
| bool isNoTailCall() const { |
| return (getSubclassDataFromInstruction() & 3) == TCK_NoTail; |
| } |
| |
| void setTailCall(bool isTC = true) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | |
| unsigned(isTC ? TCK_Tail : TCK_None)); |
| } |
| |
| void setTailCallKind(TailCallKind TCK) { |
| setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) | |
| unsigned(TCK)); |
| } |
| |
| /// Return true if the call can return twice |
| bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } |
| void setCanReturnTwice() { |
| addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); |
| } |
| |
| /// Check if this call is an inline asm statement. |
| bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::Call; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| |
| private: |
| // Shadow Instruction::setInstructionSubclassData with a private forwarding |
| // method so that subclasses cannot accidentally use it. |
| void setInstructionSubclassData(unsigned short D) { |
| Instruction::setInstructionSubclassData(D); |
| } |
| }; |
| |
| CallInst::CallInst(Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
| BasicBlock *InsertAtEnd) |
| : CallBase(cast<FunctionType>( |
| cast<PointerType>(Func->getType())->getElementType()) |
| ->getReturnType(), |
| Instruction::Call, |
| OperandTraits<CallBase>::op_end(this) - |
| (Args.size() + CountBundleInputs(Bundles) + 1), |
| unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
| InsertAtEnd) { |
| init(Func, Args, Bundles, NameStr); |
| } |
| |
| CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
| ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
| Instruction *InsertBefore) |
| : CallBase(Ty->getReturnType(), Instruction::Call, |
| OperandTraits<CallBase>::op_end(this) - |
| (Args.size() + CountBundleInputs(Bundles) + 1), |
| unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
| InsertBefore) { |
| init(Ty, Func, Args, Bundles, NameStr); |
| } |
| |
| //===----------------------------------------------------------------------===// |
| // SelectInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This class represents the LLVM 'select' instruction. |
| /// |
| class SelectInst : public Instruction { |
| SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
| Instruction *InsertBefore) |
| : Instruction(S1->getType(), Instruction::Select, |
| &Op<0>(), 3, InsertBefore) { |
| init(C, S1, S2); |
| setName(NameStr); |
| } |
| |
| SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
| BasicBlock *InsertAtEnd) |
| : Instruction(S1->getType(), Instruction::Select, |
| &Op<0>(), 3, InsertAtEnd) { |
| init(C, S1, S2); |
| setName(NameStr); |
| } |
| |
| void init(Value *C, Value *S1, Value *S2) { |
| assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select"); |
| Op<0>() = C; |
| Op<1>() = S1; |
| Op<2>() = S2; |
| } |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| SelectInst *cloneImpl() const; |
| |
| public: |
| static SelectInst *Create(Value *C, Value *S1, Value *S2, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr, |
| Instruction *MDFrom = nullptr) { |
| SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); |
| if (MDFrom) |
| Sel->copyMetadata(*MDFrom); |
| return Sel; |
| } |
| |
| static SelectInst *Create(Value *C, Value *S1, Value *S2, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); |
| } |
| |
| const Value *getCondition() const { return Op<0>(); } |
| const Value *getTrueValue() const { return Op<1>(); } |
| const Value *getFalseValue() const { return Op<2>(); } |
| Value *getCondition() { return Op<0>(); } |
| Value *getTrueValue() { return Op<1>(); } |
| Value *getFalseValue() { return Op<2>(); } |
| |
| void setCondition(Value *V) { Op<0>() = V; } |
| void setTrueValue(Value *V) { Op<1>() = V; } |
| void setFalseValue(Value *V) { Op<2>() = V; } |
| |
| /// Return a string if the specified operands are invalid |
| /// for a select operation, otherwise return null. |
| static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| OtherOps getOpcode() const { |
| return static_cast<OtherOps>(Instruction::getOpcode()); |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::Select; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| template <> |
| struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // VAArgInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This class represents the va_arg llvm instruction, which returns |
| /// an argument of the specified type given a va_list and increments that list |
| /// |
| class VAArgInst : public UnaryInstruction { |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| VAArgInst *cloneImpl() const; |
| |
| public: |
| VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) |
| : UnaryInstruction(Ty, VAArg, List, InsertBefore) { |
| setName(NameStr); |
| } |
| |
| VAArgInst(Value *List, Type *Ty, const Twine &NameStr, |
| BasicBlock *InsertAtEnd) |
| : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { |
| setName(NameStr); |
| } |
| |
| Value *getPointerOperand() { return getOperand(0); } |
| const Value *getPointerOperand() const { return getOperand(0); } |
| static unsigned getPointerOperandIndex() { return 0U; } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == VAArg; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| //===----------------------------------------------------------------------===// |
| // ExtractElementInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction extracts a single (scalar) |
| /// element from a VectorType value |
| /// |
| class ExtractElementInst : public Instruction { |
| ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr); |
| ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, |
| BasicBlock *InsertAtEnd); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| ExtractElementInst *cloneImpl() const; |
| |
| public: |
| static ExtractElementInst *Create(Value *Vec, Value *Idx, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); |
| } |
| |
| static ExtractElementInst *Create(Value *Vec, Value *Idx, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); |
| } |
| |
| /// Return true if an extractelement instruction can be |
| /// formed with the specified operands. |
| static bool isValidOperands(const Value *Vec, const Value *Idx); |
| |
| Value *getVectorOperand() { return Op<0>(); } |
| Value *getIndexOperand() { return Op<1>(); } |
| const Value *getVectorOperand() const { return Op<0>(); } |
| const Value *getIndexOperand() const { return Op<1>(); } |
| |
| VectorType *getVectorOperandType() const { |
| return cast<VectorType>(getVectorOperand()->getType()); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::ExtractElement; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| template <> |
| struct OperandTraits<ExtractElementInst> : |
| public FixedNumOperandTraits<ExtractElementInst, 2> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // InsertElementInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction inserts a single (scalar) |
| /// element into a VectorType value |
| /// |
| class InsertElementInst : public Instruction { |
| InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr); |
| InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, |
| BasicBlock *InsertAtEnd); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| InsertElementInst *cloneImpl() const; |
| |
| public: |
| static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); |
| } |
| |
| static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); |
| } |
| |
| /// Return true if an insertelement instruction can be |
| /// formed with the specified operands. |
| static bool isValidOperands(const Value *Vec, const Value *NewElt, |
| const Value *Idx); |
| |
| /// Overload to return most specific vector type. |
| /// |
| VectorType *getType() const { |
| return cast<VectorType>(Instruction::getType()); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::InsertElement; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| template <> |
| struct OperandTraits<InsertElementInst> : |
| public FixedNumOperandTraits<InsertElementInst, 3> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // ShuffleVectorInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction constructs a fixed permutation of two |
| /// input vectors. |
| /// |
| class ShuffleVectorInst : public Instruction { |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| ShuffleVectorInst *cloneImpl() const; |
| |
| public: |
| ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
| const Twine &NameStr = "", |
| Instruction *InsertBefor = nullptr); |
| ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
| const Twine &NameStr, BasicBlock *InsertAtEnd); |
| |
| // allocate space for exactly three operands |
| void *operator new(size_t s) { |
| return User::operator new(s, 3); |
| } |
| |
| /// Return true if a shufflevector instruction can be |
| /// formed with the specified operands. |
| static bool isValidOperands(const Value *V1, const Value *V2, |
| const Value *Mask); |
| |
| /// Overload to return most specific vector type. |
| /// |
| VectorType *getType() const { |
| return cast<VectorType>(Instruction::getType()); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| Constant *getMask() const { |
| return cast<Constant>(getOperand(2)); |
| } |
| |
| /// Return the shuffle mask value for the specified element of the mask. |
| /// Return -1 if the element is undef. |
| static int getMaskValue(const Constant *Mask, unsigned Elt); |
| |
| /// Return the shuffle mask value of this instruction for the given element |
| /// index. Return -1 if the element is undef. |
| int getMaskValue(unsigned Elt) const { |
| return getMaskValue(getMask(), Elt); |
| } |
| |
| /// Convert the input shuffle mask operand to a vector of integers. Undefined |
| /// elements of the mask are returned as -1. |
| static void getShuffleMask(const Constant *Mask, |
| SmallVectorImpl<int> &Result); |
| |
| /// Return the mask for this instruction as a vector of integers. Undefined |
| /// elements of the mask are returned as -1. |
| void getShuffleMask(SmallVectorImpl<int> &Result) const { |
| return getShuffleMask(getMask(), Result); |
| } |
| |
| SmallVector<int, 16> getShuffleMask() const { |
| SmallVector<int, 16> Mask; |
| getShuffleMask(Mask); |
| return Mask; |
| } |
| |
| /// Return true if this shuffle returns a vector with a different number of |
| /// elements than its source vectors. |
| /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> |
| /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> |
| bool changesLength() const { |
| unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); |
| unsigned NumMaskElts = getMask()->getType()->getVectorNumElements(); |
| return NumSourceElts != NumMaskElts; |
| } |
| |
| /// Return true if this shuffle returns a vector with a greater number of |
| /// elements than its source vectors. |
| /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> |
| bool increasesLength() const { |
| unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); |
| unsigned NumMaskElts = getMask()->getType()->getVectorNumElements(); |
| return NumSourceElts < NumMaskElts; |
| } |
| |
| /// Return true if this shuffle mask chooses elements from exactly one source |
| /// vector. |
| /// Example: <7,5,undef,7> |
| /// This assumes that vector operands are the same length as the mask. |
| static bool isSingleSourceMask(ArrayRef<int> Mask); |
| static bool isSingleSourceMask(const Constant *Mask) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isSingleSourceMask(MaskAsInts); |
| } |
| |
| /// Return true if this shuffle chooses elements from exactly one source |
| /// vector without changing the length of that vector. |
| /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> |
| /// TODO: Optionally allow length-changing shuffles. |
| bool isSingleSource() const { |
| return !changesLength() && isSingleSourceMask(getMask()); |
| } |
| |
| /// Return true if this shuffle mask chooses elements from exactly one source |
| /// vector without lane crossings. A shuffle using this mask is not |
| /// necessarily a no-op because it may change the number of elements from its |
| /// input vectors or it may provide demanded bits knowledge via undef lanes. |
| /// Example: <undef,undef,2,3> |
| static bool isIdentityMask(ArrayRef<int> Mask); |
| static bool isIdentityMask(const Constant *Mask) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isIdentityMask(MaskAsInts); |
| } |
| |
| /// Return true if this shuffle chooses elements from exactly one source |
| /// vector without lane crossings and does not change the number of elements |
| /// from its input vectors. |
| /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> |
| bool isIdentity() const { |
| return !changesLength() && isIdentityMask(getShuffleMask()); |
| } |
| |
| /// Return true if this shuffle lengthens exactly one source vector with |
| /// undefs in the high elements. |
| bool isIdentityWithPadding() const; |
| |
| /// Return true if this shuffle extracts the first N elements of exactly one |
| /// source vector. |
| bool isIdentityWithExtract() const; |
| |
| /// Return true if this shuffle concatenates its 2 source vectors. This |
| /// returns false if either input is undefined. In that case, the shuffle is |
| /// is better classified as an identity with padding operation. |
| bool isConcat() const; |
| |
| /// Return true if this shuffle mask chooses elements from its source vectors |
| /// without lane crossings. A shuffle using this mask would be |
| /// equivalent to a vector select with a constant condition operand. |
| /// Example: <4,1,6,undef> |
| /// This returns false if the mask does not choose from both input vectors. |
| /// In that case, the shuffle is better classified as an identity shuffle. |
| /// This assumes that vector operands are the same length as the mask |
| /// (a length-changing shuffle can never be equivalent to a vector select). |
| static bool isSelectMask(ArrayRef<int> Mask); |
| static bool isSelectMask(const Constant *Mask) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isSelectMask(MaskAsInts); |
| } |
| |
| /// Return true if this shuffle chooses elements from its source vectors |
| /// without lane crossings and all operands have the same number of elements. |
| /// In other words, this shuffle is equivalent to a vector select with a |
| /// constant condition operand. |
| /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> |
| /// This returns false if the mask does not choose from both input vectors. |
| /// In that case, the shuffle is better classified as an identity shuffle. |
| /// TODO: Optionally allow length-changing shuffles. |
| bool isSelect() const { |
| return !changesLength() && isSelectMask(getMask()); |
| } |
| |
| /// Return true if this shuffle mask swaps the order of elements from exactly |
| /// one source vector. |
| /// Example: <7,6,undef,4> |
| /// This assumes that vector operands are the same length as the mask. |
| static bool isReverseMask(ArrayRef<int> Mask); |
| static bool isReverseMask(const Constant *Mask) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isReverseMask(MaskAsInts); |
| } |
| |
| /// Return true if this shuffle swaps the order of elements from exactly |
| /// one source vector. |
| /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> |
| /// TODO: Optionally allow length-changing shuffles. |
| bool isReverse() const { |
| return !changesLength() && isReverseMask(getMask()); |
| } |
| |
| /// Return true if this shuffle mask chooses all elements with the same value |
| /// as the first element of exactly one source vector. |
| /// Example: <4,undef,undef,4> |
| /// This assumes that vector operands are the same length as the mask. |
| static bool isZeroEltSplatMask(ArrayRef<int> Mask); |
| static bool isZeroEltSplatMask(const Constant *Mask) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isZeroEltSplatMask(MaskAsInts); |
| } |
| |
| /// Return true if all elements of this shuffle are the same value as the |
| /// first element of exactly one source vector without changing the length |
| /// of that vector. |
| /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> |
| /// TODO: Optionally allow length-changing shuffles. |
| /// TODO: Optionally allow splats from other elements. |
| bool isZeroEltSplat() const { |
| return !changesLength() && isZeroEltSplatMask(getMask()); |
| } |
| |
| /// Return true if this shuffle mask is a transpose mask. |
| /// Transpose vector masks transpose a 2xn matrix. They read corresponding |
| /// even- or odd-numbered vector elements from two n-dimensional source |
| /// vectors and write each result into consecutive elements of an |
| /// n-dimensional destination vector. Two shuffles are necessary to complete |
| /// the transpose, one for the even elements and another for the odd elements. |
| /// This description closely follows how the TRN1 and TRN2 AArch64 |
| /// instructions operate. |
| /// |
| /// For example, a simple 2x2 matrix can be transposed with: |
| /// |
| /// ; Original matrix |
| /// m0 = < a, b > |
| /// m1 = < c, d > |
| /// |
| /// ; Transposed matrix |
| /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > |
| /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > |
| /// |
| /// For matrices having greater than n columns, the resulting nx2 transposed |
| /// matrix is stored in two result vectors such that one vector contains |
| /// interleaved elements from all the even-numbered rows and the other vector |
| /// contains interleaved elements from all the odd-numbered rows. For example, |
| /// a 2x4 matrix can be transposed with: |
| /// |
| /// ; Original matrix |
| /// m0 = < a, b, c, d > |
| /// m1 = < e, f, g, h > |
| /// |
| /// ; Transposed matrix |
| /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > |
| /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > |
| static bool isTransposeMask(ArrayRef<int> Mask); |
| static bool isTransposeMask(const Constant *Mask) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isTransposeMask(MaskAsInts); |
| } |
| |
| /// Return true if this shuffle transposes the elements of its inputs without |
| /// changing the length of the vectors. This operation may also be known as a |
| /// merge or interleave. See the description for isTransposeMask() for the |
| /// exact specification. |
| /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> |
| bool isTranspose() const { |
| return !changesLength() && isTransposeMask(getMask()); |
| } |
| |
| /// Return true if this shuffle mask is an extract subvector mask. |
| /// A valid extract subvector mask returns a smaller vector from a single |
| /// source operand. The base extraction index is returned as well. |
| static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
| int &Index); |
| static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, |
| int &Index) { |
| assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant."); |
| SmallVector<int, 16> MaskAsInts; |
| getShuffleMask(Mask, MaskAsInts); |
| return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); |
| } |
| |
| /// Return true if this shuffle mask is an extract subvector mask. |
| bool isExtractSubvectorMask(int &Index) const { |
| int NumSrcElts = Op<0>()->getType()->getVectorNumElements(); |
| return isExtractSubvectorMask(getMask(), NumSrcElts, Index); |
| } |
| |
| /// Change values in a shuffle permute mask assuming the two vector operands |
| /// of length InVecNumElts have swapped position. |
| static void commuteShuffleMask(MutableArrayRef<int> Mask, |
| unsigned InVecNumElts) { |
| for (int &Idx : Mask) { |
| if (Idx == -1) |
| continue; |
| Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; |
| assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 && |
| "shufflevector mask index out of range"); |
| } |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::ShuffleVector; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| template <> |
| struct OperandTraits<ShuffleVectorInst> : |
| public FixedNumOperandTraits<ShuffleVectorInst, 3> { |
| }; |
| |
| DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value) |
| |
| //===----------------------------------------------------------------------===// |
| // ExtractValueInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction extracts a struct member or array |
| /// element value from an aggregate value. |
| /// |
| class ExtractValueInst : public UnaryInstruction { |
| SmallVector<unsigned, 4> Indices; |
| |
| ExtractValueInst(const ExtractValueInst &EVI); |
| |
| /// Constructors - Create a extractvalue instruction with a base aggregate |
| /// value and a list of indices. The first ctor can optionally insert before |
| /// an existing instruction, the second appends the new instruction to the |
| /// specified BasicBlock. |
| inline ExtractValueInst(Value *Agg, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| Instruction *InsertBefore); |
| inline ExtractValueInst(Value *Agg, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, BasicBlock *InsertAtEnd); |
| |
| void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| ExtractValueInst *cloneImpl() const; |
| |
| public: |
| static ExtractValueInst *Create(Value *Agg, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| return new |
| ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); |
| } |
| |
| static ExtractValueInst *Create(Value *Agg, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); |
| } |
| |
| /// Returns the type of the element that would be extracted |
| /// with an extractvalue instruction with the specified parameters. |
| /// |
| /// Null is returned if the indices are invalid for the specified type. |
| static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); |
| |
| using idx_iterator = const unsigned*; |
| |
| inline idx_iterator idx_begin() const { return Indices.begin(); } |
| inline idx_iterator idx_end() const { return Indices.end(); } |
| inline iterator_range<idx_iterator> indices() const { |
| return make_range(idx_begin(), idx_end()); |
| } |
| |
| Value *getAggregateOperand() { |
| return getOperand(0); |
| } |
| const Value *getAggregateOperand() const { |
| return getOperand(0); |
| } |
| static unsigned getAggregateOperandIndex() { |
| return 0U; // get index for modifying correct operand |
| } |
| |
| ArrayRef<unsigned> getIndices() const { |
| return Indices; |
| } |
| |
| unsigned getNumIndices() const { |
| return (unsigned)Indices.size(); |
| } |
| |
| bool hasIndices() const { |
| return true; |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::ExtractValue; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| ExtractValueInst::ExtractValueInst(Value *Agg, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| Instruction *InsertBefore) |
| : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
| ExtractValue, Agg, InsertBefore) { |
| init(Idxs, NameStr); |
| } |
| |
| ExtractValueInst::ExtractValueInst(Value *Agg, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) |
| : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
| ExtractValue, Agg, InsertAtEnd) { |
| init(Idxs, NameStr); |
| } |
| |
| //===----------------------------------------------------------------------===// |
| // InsertValueInst Class |
| //===----------------------------------------------------------------------===// |
| |
| /// This instruction inserts a struct field of array element |
| /// value into an aggregate value. |
| /// |
| class InsertValueInst : public Instruction { |
| SmallVector<unsigned, 4> Indices; |
| |
| InsertValueInst(const InsertValueInst &IVI); |
| |
| /// Constructors - Create a insertvalue instruction with a base aggregate |
| /// value, a value to insert, and a list of indices. The first ctor can |
| /// optionally insert before an existing instruction, the second appends |
| /// the new instruction to the specified BasicBlock. |
| inline InsertValueInst(Value *Agg, Value *Val, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| Instruction *InsertBefore); |
| inline InsertValueInst(Value *Agg, Value *Val, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, BasicBlock *InsertAtEnd); |
| |
| /// Constructors - These two constructors are convenience methods because one |
| /// and two index insertvalue instructions are so common. |
| InsertValueInst(Value *Agg, Value *Val, unsigned Idx, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr); |
| InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, |
| BasicBlock *InsertAtEnd); |
| |
| void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
| const Twine &NameStr); |
| |
| protected: |
| // Note: Instruction needs to be a friend here to call cloneImpl. |
| friend class Instruction; |
| |
| InsertValueInst *cloneImpl() const; |
| |
| public: |
| // allocate space for exactly two operands |
| void *operator new(size_t s) { |
| return User::operator new(s, 2); |
| } |
| |
| static InsertValueInst *Create(Value *Agg, Value *Val, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr = "", |
| Instruction *InsertBefore = nullptr) { |
| return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); |
| } |
| |
| static InsertValueInst *Create(Value *Agg, Value *Val, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| BasicBlock *InsertAtEnd) { |
| return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); |
| } |
| |
| /// Transparently provide more efficient getOperand methods. |
| DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); |
| |
| using idx_iterator = const unsigned*; |
| |
| inline idx_iterator idx_begin() const { return Indices.begin(); } |
| inline idx_iterator idx_end() const { return Indices.end(); } |
| inline iterator_range<idx_iterator> indices() const { |
| return make_range(idx_begin(), idx_end()); |
| } |
| |
| Value *getAggregateOperand() { |
| return getOperand(0); |
| } |
| const Value *getAggregateOperand() const { |
| return getOperand(0); |
| } |
| static unsigned getAggregateOperandIndex() { |
| return 0U; // get index for modifying correct operand |
| } |
| |
| Value *getInsertedValueOperand() { |
| return getOperand(1); |
| } |
| const Value *getInsertedValueOperand() const { |
| return getOperand(1); |
| } |
| static unsigned getInsertedValueOperandIndex() { |
| return 1U; // get index for modifying correct operand |
| } |
| |
| ArrayRef<unsigned> getIndices() const { |
| return Indices; |
| } |
| |
| unsigned getNumIndices() const { |
| return (unsigned)Indices.size(); |
| } |
| |
| bool hasIndices() const { |
| return true; |
| } |
| |
| // Methods for support type inquiry through isa, cast, and dyn_cast: |
| static bool classof(const Instruction *I) { |
| return I->getOpcode() == Instruction::InsertValue; |
| } |
| static bool classof(const Value *V) { |
| return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
| } |
| }; |
| |
| template <> |
| struct OperandTraits<InsertValueInst> : |
| public FixedNumOperandTraits<InsertValueInst, 2> { |
| }; |
| |
| InsertValueInst::InsertValueInst(Value *Agg, |
| Value *Val, |
| ArrayRef<unsigned> Idxs, |
| const Twine &NameStr, |
| Instruction *InsertBefore) |
| : Instruction(Agg->getType(), InsertValue, |
| OperandTraits<InsertValueInst>::op_begin(this), |
|