blob: c85727ce30a94a4ba576762aaf829cddc1f17a13 [file] [log] [blame]
//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes the class definitions of all of the subclasses of the
// Instruction class. This is meant to be an easy way to get access to all
// instruction subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_INSTRUCTIONS_H
#define LLVM_IR_INSTRUCTIONS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
namespace llvm {
class APFloat;
class APInt;
class BasicBlock;
class ConstantInt;
class DataLayout;
class StringRef;
class Type;
class Value;
//===----------------------------------------------------------------------===//
// AllocaInst Class
//===----------------------------------------------------------------------===//
/// an instruction to allocate memory on the stack
class AllocaInst : public UnaryInstruction {
Type *AllocatedType;
using AlignmentField = AlignmentBitfieldElementT<0>;
using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
SwiftErrorField>(),
"Bitfields must be contiguous");
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
AllocaInst *cloneImpl() const;
public:
explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
const Twine &Name, Instruction *InsertBefore);
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
const Twine &Name, BasicBlock *InsertAtEnd);
AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
Instruction *InsertBefore);
AllocaInst(Type *Ty, unsigned AddrSpace,
const Twine &Name, BasicBlock *InsertAtEnd);
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
const Twine &Name = "", Instruction *InsertBefore = nullptr);
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
const Twine &Name, BasicBlock *InsertAtEnd);
/// Return true if there is an allocation size parameter to the allocation
/// instruction that is not 1.
bool isArrayAllocation() const;
/// Get the number of elements allocated. For a simple allocation of a single
/// element, this will return a constant 1 value.
const Value *getArraySize() const { return getOperand(0); }
Value *getArraySize() { return getOperand(0); }
/// Overload to return most specific pointer type.
PointerType *getType() const {
return cast<PointerType>(Instruction::getType());
}
/// Return the address space for the allocation.
unsigned getAddressSpace() const {
return getType()->getAddressSpace();
}
/// Get allocation size in bytes. Returns std::nullopt if size can't be
/// determined, e.g. in case of a VLA.
std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
/// Get allocation size in bits. Returns std::nullopt if size can't be
/// determined, e.g. in case of a VLA.
std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
/// Return the type that is being allocated by the instruction.
Type *getAllocatedType() const { return AllocatedType; }
/// for use only in special circumstances that need to generically
/// transform a whole instruction (eg: IR linking and vectorization).
void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
/// Return the alignment of the memory that is being allocated by the
/// instruction.
Align getAlign() const {
return Align(1ULL << getSubclassData<AlignmentField>());
}
void setAlignment(Align Align) {
setSubclassData<AlignmentField>(Log2(Align));
}
/// Return true if this alloca is in the entry block of the function and is a
/// constant size. If so, the code generator will fold it into the
/// prolog/epilog code, so it is basically free.
bool isStaticAlloca() const;
/// Return true if this alloca is used as an inalloca argument to a call. Such
/// allocas are never considered static even if they are in the entry block.
bool isUsedWithInAlloca() const {
return getSubclassData<UsedWithInAllocaField>();
}
/// Specify whether this alloca is used to represent the arguments to a call.
void setUsedWithInAlloca(bool V) {
setSubclassData<UsedWithInAllocaField>(V);
}
/// Return true if this alloca is used as a swifterror argument to a call.
bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
/// Specify whether this alloca is used to represent a swifterror.
void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Alloca);
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
};
//===----------------------------------------------------------------------===//
// LoadInst Class
//===----------------------------------------------------------------------===//
/// An instruction for reading from memory. This uses the SubclassData field in
/// Value to store whether or not the load is volatile.
class LoadInst : public UnaryInstruction {
using VolatileField = BoolBitfieldElementT<0>;
using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
static_assert(
Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
"Bitfields must be contiguous");
void AssertOK();
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
LoadInst *cloneImpl() const;
public:
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
Instruction *InsertBefore);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
Instruction *InsertBefore);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
Align Align, Instruction *InsertBefore = nullptr);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
Align Align, BasicBlock *InsertAtEnd);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
Align Align, AtomicOrdering Order,
SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
Align Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
/// Return true if this is a load from a volatile memory location.
bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile load or not.
void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Return the alignment of the access that is being performed.
Align getAlign() const {
return Align(1ULL << (getSubclassData<AlignmentField>()));
}
void setAlignment(Align Align) {
setSubclassData<AlignmentField>(Log2(Align));
}
/// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
return getSubclassData<OrderingField>();
}
/// Sets the ordering constraint of this load instruction. May not be Release
/// or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setSubclassData<OrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this load instruction.
SyncScope::ID getSyncScopeID() const {
return SSID;
}
/// Sets the synchronization scope ID of this load instruction.
void setSyncScopeID(SyncScope::ID SSID) {
this->SSID = SSID;
}
/// Sets the ordering constraint and the synchronization scope ID of this load
/// instruction.
void setAtomic(AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System) {
setOrdering(Ordering);
setSyncScopeID(SSID);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
bool isUnordered() const {
return (getOrdering() == AtomicOrdering::NotAtomic ||
getOrdering() == AtomicOrdering::Unordered) &&
!isVolatile();
}
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperandType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Load;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this load instruction. Not quite enough
/// room in SubClassData for everything, so synchronization scope ID gets its
/// own field.
SyncScope::ID SSID;
};
//===----------------------------------------------------------------------===//
// StoreInst Class
//===----------------------------------------------------------------------===//
/// An instruction for storing to memory.
class StoreInst : public Instruction {
using VolatileField = BoolBitfieldElementT<0>;
using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
static_assert(
Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
"Bitfields must be contiguous");
void AssertOK();
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
StoreInst *cloneImpl() const;
public:
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
BasicBlock *InsertAtEnd);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
void *operator new(size_t S) { return User::operator new(S, 2); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
/// Return true if this is a store to a volatile memory location.
bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile store or not.
void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
Align getAlign() const {
return Align(1ULL << (getSubclassData<AlignmentField>()));
}
void setAlignment(Align Align) {
setSubclassData<AlignmentField>(Log2(Align));
}
/// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
return getSubclassData<OrderingField>();
}
/// Sets the ordering constraint of this store instruction. May not be
/// Acquire or AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setSubclassData<OrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this store instruction.
SyncScope::ID getSyncScopeID() const {
return SSID;
}
/// Sets the synchronization scope ID of this store instruction.
void setSyncScopeID(SyncScope::ID SSID) {
this->SSID = SSID;
}
/// Sets the ordering constraint and the synchronization scope ID of this
/// store instruction.
void setAtomic(AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System) {
setOrdering(Ordering);
setSyncScopeID(SSID);
}
bool isSimple() const { return !isAtomic() && !isVolatile(); }
bool isUnordered() const {
return (getOrdering() == AtomicOrdering::NotAtomic ||
getOrdering() == AtomicOrdering::Unordered) &&
!isVolatile();
}
Value *getValueOperand() { return getOperand(0); }
const Value *getValueOperand() const { return getOperand(0); }
Value *getPointerOperand() { return getOperand(1); }
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperandType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Store;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this store instruction. Not quite enough
/// room in SubClassData for everything, so synchronization scope ID gets its
/// own field.
SyncScope::ID SSID;
};
template <>
struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
//===----------------------------------------------------------------------===//
// FenceInst Class
//===----------------------------------------------------------------------===//
/// An instruction for ordering other memory operations.
class FenceInst : public Instruction {
using OrderingField = AtomicOrderingBitfieldElementT<0>;
void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
FenceInst *cloneImpl() const;
public:
// Ordering may only be Acquire, Release, AcquireRelease, or
// SequentiallyConsistent.
FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System,
Instruction *InsertBefore = nullptr);
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly zero operands
void *operator new(size_t S) { return User::operator new(S, 0); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
/// Returns the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const {
return getSubclassData<OrderingField>();
}
/// Sets the ordering constraint of this fence instruction. May only be
/// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
void setOrdering(AtomicOrdering Ordering) {
setSubclassData<OrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this fence instruction.
SyncScope::ID getSyncScopeID() const {
return SSID;
}
/// Sets the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID) {
this->SSID = SSID;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Fence;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this fence instruction. Not quite enough
/// room in SubClassData for everything, so synchronization scope ID gets its
/// own field.
SyncScope::ID SSID;
};
//===----------------------------------------------------------------------===//
// AtomicCmpXchgInst Class
//===----------------------------------------------------------------------===//
/// An instruction that atomically checks whether a
/// specified value is in a memory location, and, if it is, stores a new value
/// there. The value returned by this instruction is a pair containing the
/// original value as first element, and an i1 indicating success (true) or
/// failure (false) as second element.
///
class AtomicCmpXchgInst : public Instruction {
void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SyncScope::ID SSID);
template <unsigned Offset>
using AtomicOrderingBitfieldElement =
typename Bitfield::Element<AtomicOrdering, Offset, 3,
AtomicOrdering::LAST>;
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
AtomicCmpXchgInst *cloneImpl() const;
public:
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering, SyncScope::ID SSID,
Instruction *InsertBefore = nullptr);
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly three operands
void *operator new(size_t S) { return User::operator new(S, 3); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
using VolatileField = BoolBitfieldElementT<0>;
using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
using SuccessOrderingField =
AtomicOrderingBitfieldElementT<WeakField::NextBit>;
using FailureOrderingField =
AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
using AlignmentField =
AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
static_assert(
Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
FailureOrderingField, AlignmentField>(),
"Bitfields must be contiguous");
/// Return the alignment of the memory that is being allocated by the
/// instruction.
Align getAlign() const {
return Align(1ULL << getSubclassData<AlignmentField>());
}
void setAlignment(Align Align) {
setSubclassData<AlignmentField>(Log2(Align));
}
/// Return true if this is a cmpxchg from a volatile memory
/// location.
///
bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile cmpxchg.
///
void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Return true if this cmpxchg may spuriously fail.
bool isWeak() const { return getSubclassData<WeakField>(); }
void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
return Ordering != AtomicOrdering::NotAtomic &&
Ordering != AtomicOrdering::Unordered;
}
static bool isValidFailureOrdering(AtomicOrdering Ordering) {
return Ordering != AtomicOrdering::NotAtomic &&
Ordering != AtomicOrdering::Unordered &&
Ordering != AtomicOrdering::AcquireRelease &&
Ordering != AtomicOrdering::Release;
}
/// Returns the success ordering constraint of this cmpxchg instruction.
AtomicOrdering getSuccessOrdering() const {
return getSubclassData<SuccessOrderingField>();
}
/// Sets the success ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering) {
assert(isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering");
setSubclassData<SuccessOrderingField>(Ordering);
}
/// Returns the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const {
return getSubclassData<FailureOrderingField>();
}
/// Sets the failure ordering constraint of this cmpxchg instruction.
void setFailureOrdering(AtomicOrdering Ordering) {
assert(isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering");
setSubclassData<FailureOrderingField>(Ordering);
}
/// Returns a single ordering which is at least as strong as both the
/// success and failure orderings for this cmpxchg.
AtomicOrdering getMergedOrdering() const {
if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
return AtomicOrdering::SequentiallyConsistent;
if (getFailureOrdering() == AtomicOrdering::Acquire) {
if (getSuccessOrdering() == AtomicOrdering::Monotonic)
return AtomicOrdering::Acquire;
if (getSuccessOrdering() == AtomicOrdering::Release)
return AtomicOrdering::AcquireRelease;
}
return getSuccessOrdering();
}
/// Returns the synchronization scope ID of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const {
return SSID;
}
/// Sets the synchronization scope ID of this cmpxchg instruction.
void setSyncScopeID(SyncScope::ID SSID) {
this->SSID = SSID;
}
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
Value *getCompareOperand() { return getOperand(1); }
const Value *getCompareOperand() const { return getOperand(1); }
Value *getNewValOperand() { return getOperand(2); }
const Value *getNewValOperand() const { return getOperand(2); }
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
/// Returns the strongest permitted ordering on failure, given the
/// desired ordering on success.
///
/// If the comparison in a cmpxchg operation fails, there is no atomic store
/// so release semantics cannot be provided. So this function drops explicit
/// Release requests from the AtomicOrdering. A SequentiallyConsistent
/// operation would remain SequentiallyConsistent.
static AtomicOrdering
getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
switch (SuccessOrdering) {
default:
llvm_unreachable("invalid cmpxchg success ordering");
case AtomicOrdering::Release:
case AtomicOrdering::Monotonic:
return AtomicOrdering::Monotonic;
case AtomicOrdering::AcquireRelease:
case AtomicOrdering::Acquire:
return AtomicOrdering::Acquire;
case AtomicOrdering::SequentiallyConsistent:
return AtomicOrdering::SequentiallyConsistent;
}
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicCmpXchg;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this cmpxchg instruction. Not quite
/// enough room in SubClassData for everything, so synchronization scope ID
/// gets its own field.
SyncScope::ID SSID;
};
template <>
struct OperandTraits<AtomicCmpXchgInst> :
public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
//===----------------------------------------------------------------------===//
// AtomicRMWInst Class
//===----------------------------------------------------------------------===//
/// an instruction that atomically reads a memory location,
/// combines it with another value, and then stores the result back. Returns
/// the old value.
///
class AtomicRMWInst : public Instruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
AtomicRMWInst *cloneImpl() const;
public:
/// This enumeration lists the possible modifications atomicrmw can make. In
/// the descriptions, 'p' is the pointer to the instruction's memory location,
/// 'old' is the initial value of *p, and 'v' is the other value passed to the
/// instruction. These instructions always return 'old'.
enum BinOp : unsigned {
/// *p = v
Xchg,
/// *p = old + v
Add,
/// *p = old - v
Sub,
/// *p = old & v
And,
/// *p = ~(old & v)
Nand,
/// *p = old | v
Or,
/// *p = old ^ v
Xor,
/// *p = old >signed v ? old : v
Max,
/// *p = old <signed v ? old : v
Min,
/// *p = old >unsigned v ? old : v
UMax,
/// *p = old <unsigned v ? old : v
UMin,
/// *p = old + v
FAdd,
/// *p = old - v
FSub,
/// *p = maxnum(old, v)
/// \p maxnum matches the behavior of \p llvm.maxnum.*.
FMax,
/// *p = minnum(old, v)
/// \p minnum matches the behavior of \p llvm.minnum.*.
FMin,
/// Increment one up to a maximum value.
/// *p = (old u>= v) ? 0 : (old + 1)
UIncWrap,
/// Decrement one until a minimum value or zero.
/// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
UDecWrap,
FIRST_BINOP = Xchg,
LAST_BINOP = UDecWrap,
BAD_BINOP
};
private:
template <unsigned Offset>
using AtomicOrderingBitfieldElement =
typename Bitfield::Element<AtomicOrdering, Offset, 3,
AtomicOrdering::LAST>;
template <unsigned Offset>
using BinOpBitfieldElement =
typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>;
public:
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
AtomicOrdering Ordering, SyncScope::ID SSID,
Instruction *InsertBefore = nullptr);
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
// allocate space for exactly two operands
void *operator new(size_t S) { return User::operator new(S, 2); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
using VolatileField = BoolBitfieldElementT<0>;
using AtomicOrderingField =
AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
OperationField, AlignmentField>(),
"Bitfields must be contiguous");
BinOp getOperation() const { return getSubclassData<OperationField>(); }
static StringRef getOperationName(BinOp Op);
static bool isFPOperation(BinOp Op) {
switch (Op) {
case AtomicRMWInst::FAdd:
case AtomicRMWInst::FSub:
case AtomicRMWInst::FMax:
case AtomicRMWInst::FMin:
return true;
default:
return false;
}
}
void setOperation(BinOp Operation) {
setSubclassData<OperationField>(Operation);
}
/// Return the alignment of the memory that is being allocated by the
/// instruction.
Align getAlign() const {
return Align(1ULL << getSubclassData<AlignmentField>());
}
void setAlignment(Align Align) {
setSubclassData<AlignmentField>(Log2(Align));
}
/// Return true if this is a RMW on a volatile memory location.
///
bool isVolatile() const { return getSubclassData<VolatileField>(); }
/// Specify whether this is a volatile RMW or not.
///
void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Returns the ordering constraint of this rmw instruction.
AtomicOrdering getOrdering() const {
return getSubclassData<AtomicOrderingField>();
}
/// Sets the ordering constraint of this rmw instruction.
void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != AtomicOrdering::NotAtomic &&
"atomicrmw instructions can only be atomic.");
assert(Ordering != AtomicOrdering::Unordered &&
"atomicrmw instructions cannot be unordered.");
setSubclassData<AtomicOrderingField>(Ordering);
}
/// Returns the synchronization scope ID of this rmw instruction.
SyncScope::ID getSyncScopeID() const {
return SSID;
}
/// Sets the synchronization scope ID of this rmw instruction.
void setSyncScopeID(SyncScope::ID SSID) {
this->SSID = SSID;
}
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
Value *getValOperand() { return getOperand(1); }
const Value *getValOperand() const { return getOperand(1); }
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperand()->getType()->getPointerAddressSpace();
}
bool isFloatingPointOperation() const {
return isFPOperation(getOperation());
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicRMW;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
private:
void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
AtomicOrdering Ordering, SyncScope::ID SSID);
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
/// The synchronization scope ID of this rmw instruction. Not quite enough
/// room in SubClassData for everything, so synchronization scope ID gets its
/// own field.
SyncScope::ID SSID;
};
template <>
struct OperandTraits<AtomicRMWInst>
: public FixedNumOperandTraits<AtomicRMWInst,2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
//===----------------------------------------------------------------------===//
// GetElementPtrInst Class
//===----------------------------------------------------------------------===//
// checkGEPType - Simple wrapper function to give a better assertion failure
// message on bad indexes for a gep instruction.
//
inline Type *checkGEPType(Type *Ty) {
assert(Ty && "Invalid GetElementPtrInst indices for type!");
return Ty;
}
/// an instruction for type-safe pointer arithmetic to
/// access elements of arrays and structs
///
class GetElementPtrInst : public Instruction {
Type *SourceElementType;
Type *ResultElementType;
GetElementPtrInst(const GetElementPtrInst &GEPI);
/// Constructors - Create a getelementptr instruction with a base pointer an
/// list of indices. The first ctor can optionally insert before an existing
/// instruction, the second appends the new instruction to the specified
/// BasicBlock.
inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList, unsigned Values,
const Twine &NameStr, Instruction *InsertBefore);
inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
GetElementPtrInst *cloneImpl() const;
public:
static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
unsigned Values = 1 + unsigned(IdxList.size());
assert(PointeeType && "Must specify element type");
return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
NameStr, InsertBefore);
}
static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
unsigned Values = 1 + unsigned(IdxList.size());
assert(PointeeType && "Must specify element type");
return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
NameStr, InsertAtEnd);
}
/// Create an "inbounds" getelementptr. See the documentation for the
/// "inbounds" flag in LangRef.html for details.
static GetElementPtrInst *
CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
GetElementPtrInst *GEP =
Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
GEP->setIsInBounds(true);
return GEP;
}
static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
GetElementPtrInst *GEP =
Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
GEP->setIsInBounds(true);
return GEP;
}
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
Type *getSourceElementType() const { return SourceElementType; }
void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
void setResultElementType(Type *Ty) { ResultElementType = Ty; }
Type *getResultElementType() const {
return ResultElementType;
}
/// Returns the address space of this instruction's pointer type.
unsigned getAddressSpace() const {
// Note that this is always the same as the pointer operand's address space
// and that is cheaper to compute, so cheat here.
return getPointerAddressSpace();
}
/// Returns the result type of a getelementptr with the given source
/// element type and indexes.
///
/// Null is returned if the indices are invalid for the specified
/// source element type.
static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
/// Return the type of the element at the given index of an indexable
/// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
///
/// Returns null if the type can't be indexed, or the given index is not
/// legal for the given type.
static Type *getTypeAtIndex(Type *Ty, Value *Idx);
static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
inline op_iterator idx_end() { return op_end(); }
inline const_op_iterator idx_end() const { return op_end(); }
inline iterator_range<op_iterator> indices() {
return make_range(idx_begin(), idx_end());
}
inline iterator_range<const_op_iterator> indices() const {
return make_range(idx_begin(), idx_end());
}
Value *getPointerOperand() {
return getOperand(0);
}
const Value *getPointerOperand() const {
return getOperand(0);
}
static unsigned getPointerOperandIndex() {
return 0U; // get index for modifying correct operand.
}
/// Method to return the pointer operand as a
/// PointerType.
Type *getPointerOperandType() const {
return getPointerOperand()->getType();
}
/// Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
return getPointerOperandType()->getPointerAddressSpace();
}
/// Returns the pointer type returned by the GEP
/// instruction, which may be a vector of pointers.
static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
// Vector GEP
Type *Ty = Ptr->getType();
if (Ty->isVectorTy())
return Ty;
for (Value *Index : IdxList)
if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
ElementCount EltCount = IndexVTy->getElementCount();
return VectorType::get(Ty, EltCount);
}
// Scalar GEP
return Ty;
}
unsigned getNumIndices() const { // Note: always non-negative
return getNumOperands() - 1;
}
bool hasIndices() const {
return getNumOperands() > 1;
}
/// Return true if all of the indices of this GEP are
/// zeros. If so, the result pointer and the first operand have the same
/// value, just potentially different types.
bool hasAllZeroIndices() const;
/// Return true if all of the indices of this GEP are
/// constant integers. If so, the result pointer and the first operand have
/// a constant offset between them.
bool hasAllConstantIndices() const;
/// Set or clear the inbounds flag on this GEP instruction.
/// See LangRef.html for the meaning of inbounds on a getelementptr.
void setIsInBounds(bool b = true);
/// Determine whether the GEP has the inbounds flag.
bool isInBounds() const;
/// Accumulate the constant address offset of this GEP if possible.
///
/// This routine accepts an APInt into which it will accumulate the constant
/// offset of this GEP if the GEP is in fact constant. If the GEP is not
/// all-constant, it returns false and the value of the offset APInt is
/// undefined (it is *not* preserved!). The APInt passed into this routine
/// must be at least as wide as the IntPtr type for the address space of
/// the base GEP pointer.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
bool collectOffset(const DataLayout &DL, unsigned BitWidth,
MapVector<Value *, APInt> &VariableOffsets,
APInt &ConstantOffset) const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::GetElementPtr);
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
template <>
struct OperandTraits<GetElementPtrInst> :
public VariadicOperandTraits<GetElementPtrInst, 1> {
};
GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList, unsigned Values,
const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
OperandTraits<GetElementPtrInst>::op_end(this) - Values,
Values, InsertBefore),
SourceElementType(PointeeType),
ResultElementType(getIndexedType(PointeeType, IdxList)) {
init(Ptr, IdxList, NameStr);
}
GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
ArrayRef<Value *> IdxList, unsigned Values,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
OperandTraits<GetElementPtrInst>::op_end(this) - Values,
Values, InsertAtEnd),
SourceElementType(PointeeType),
ResultElementType(getIndexedType(PointeeType, IdxList)) {
init(Ptr, IdxList, NameStr);
}
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
//===----------------------------------------------------------------------===//
// ICmpInst Class
//===----------------------------------------------------------------------===//
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on integers or pointers. The operands
/// must be identical types.
/// Represent an integer comparison operator.
class ICmpInst: public CmpInst {
void AssertOK() {
assert(isIntPredicate() &&
"Invalid ICmp predicate value");
assert(getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to ICmp instruction are not of the same type!");
// Check that the operands are the right type
assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction");
}
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
/// Clone an identical ICmpInst
ICmpInst *cloneImpl() const;
public:
/// Constructor with insert-before-instruction semantics.
ICmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
const Twine &NameStr = "" ///< Name of the instruction
) : CmpInst(makeCmpResultType(LHS->getType()),
Instruction::ICmp, pred, LHS, RHS, NameStr,
InsertBefore) {
#ifndef NDEBUG
AssertOK();
#endif
}
/// Constructor with insert-at-end semantics.
ICmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
const Twine &NameStr = "" ///< Name of the instruction
) : CmpInst(makeCmpResultType(LHS->getType()),
Instruction::ICmp, pred, LHS, RHS, NameStr,
&InsertAtEnd) {
#ifndef NDEBUG
AssertOK();
#endif
}
/// Constructor with no-insertion semantics
ICmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
const Twine &NameStr = "" ///< Name of the instruction
) : CmpInst(makeCmpResultType(LHS->getType()),
Instruction::ICmp, pred, LHS, RHS, NameStr) {
#ifndef NDEBUG
AssertOK();
#endif
}
/// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as signed.
/// Return the signed version of the predicate
Predicate getSignedPredicate() const {
return getSignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
/// Return the signed version of the predicate.
static Predicate getSignedPredicate(Predicate pred);
/// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as unsigned.
/// Return the unsigned version of the predicate
Predicate getUnsignedPredicate() const {
return getUnsignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
/// Return the unsigned version of the predicate.
static Predicate getUnsignedPredicate(Predicate pred);
/// Return true if this predicate is either EQ or NE. This also
/// tests for commutativity.
static bool isEquality(Predicate P) {
return P == ICMP_EQ || P == ICMP_NE;
}
/// Return true if this predicate is either EQ or NE. This also
/// tests for commutativity.
bool isEquality() const {
return isEquality(getPredicate());
}
/// @returns true if the predicate of this ICmpInst is commutative
/// Determine if this relation is commutative.
bool isCommutative() const { return isEquality(); }
/// Return true if the predicate is relational (not EQ or NE).
///
bool isRelational() const {
return !isEquality();
}
/// Return true if the predicate is relational (not EQ or NE).
///
static bool isRelational(Predicate P) {
return !isEquality(P);
}
/// Return true if the predicate is SGT or UGT.
///
static bool isGT(Predicate P) {
return P == ICMP_SGT || P == ICMP_UGT;
}
/// Return true if the predicate is SLT or ULT.
///
static bool isLT(Predicate P) {
return P == ICMP_SLT || P == ICMP_ULT;
}
/// Return true if the predicate is SGE or UGE.
///
static bool isGE(Predicate P) {
return P == ICMP_SGE || P == ICMP_UGE;
}
/// Return true if the predicate is SLE or ULE.
///
static bool isLE(Predicate P) {
return P == ICMP_SLE || P == ICMP_ULE;
}
/// Returns the sequence of all ICmp predicates.
///
static auto predicates() { return ICmpPredicates(); }
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
/// Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
/// Return result of `LHS Pred RHS` comparison.
static bool compare(const APInt &LHS, const APInt &RHS,
ICmpInst::Predicate Pred);
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ICmp;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
//===----------------------------------------------------------------------===//
// FCmpInst Class
//===----------------------------------------------------------------------===//
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on floating point values or packed
/// vectors of floating point values. The operands must be identical types.
/// Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
void AssertOK() {
assert(isFPPredicate() && "Invalid FCmp predicate value");
assert(getOperand(0)->getType() == getOperand(1)->getType() &&
"Both operands to FCmp instruction are not of the same type!");
// Check that the operands are the right type
assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
"Invalid operand types for FCmp instruction");
}
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
/// Clone an identical FCmpInst
FCmpInst *cloneImpl() const;
public:
/// Constructor with insert-before-instruction semantics.
FCmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
const Twine &NameStr = "" ///< Name of the instruction
) : CmpInst(makeCmpResultType(LHS->getType()),
Instruction::FCmp, pred, LHS, RHS, NameStr,
InsertBefore) {
AssertOK();
}
/// Constructor with insert-at-end semantics.
FCmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
const Twine &NameStr = "" ///< Name of the instruction
) : CmpInst(makeCmpResultType(LHS->getType()),
Instruction::FCmp, pred, LHS, RHS, NameStr,
&InsertAtEnd) {
AssertOK();
}
/// Constructor with no-insertion semantics
FCmpInst(
Predicate Pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
Value *RHS, ///< The right-hand-side of the expression
const Twine &NameStr = "", ///< Name of the instruction
Instruction *FlagsSource = nullptr
) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
RHS, NameStr, nullptr, FlagsSource) {
AssertOK();
}
/// @returns true if the predicate of this instruction is EQ or NE.
/// Determine if this is an equality predicate.
static bool isEquality(Predicate Pred) {
return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
Pred == FCMP_UNE;
}
/// @returns true if the predicate of this instruction is EQ or NE.
/// Determine if this is an equality predicate.
bool isEquality() const { return isEquality(getPredicate()); }
/// @returns true if the predicate of this instruction is commutative.
/// Determine if this is a commutative predicate.
bool isCommutative() const {
return isEquality() ||
getPredicate() == FCMP_FALSE ||
getPredicate() == FCMP_TRUE ||
getPredicate() == FCMP_ORD ||
getPredicate() == FCMP_UNO;
}
/// @returns true if the predicate is relational (not EQ or NE).
/// Determine if this a relational predicate.
bool isRelational() const { return !isEquality(); }
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
/// Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
/// Returns the sequence of all FCmp predicates.
///
static auto predicates() { return FCmpPredicates(); }
/// Return result of `LHS Pred RHS` comparison.
static bool compare(const APFloat &LHS, const APFloat &RHS,
FCmpInst::Predicate Pred);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::FCmp;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
//===----------------------------------------------------------------------===//
/// This class represents a function call, abstracting a target
/// machine's calling convention. This class uses low bit of the SubClassData
/// field to indicate whether or not this is a tail call. The rest of the bits
/// hold the calling convention of the call.
///
class CallInst : public CallBase {
CallInst(const CallInst &CI);
/// Construct a CallInst given a range of arguments.
/// Construct a CallInst from a range of arguments
inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
Instruction *InsertBefore);
inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, Instruction *InsertBefore)
: CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
/// Construct a CallInst given a range of arguments.
/// Construct a CallInst from a range of arguments
inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
BasicBlock *InsertAtEnd);
explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
Instruction *InsertBefore);
CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
BasicBlock *InsertAtEnd);
void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
/// Compute the number of operands to allocate.
static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
// We need one operand for the called function, plus the input operand
// counts provided.
return 1 + NumArgs + NumBundleInputs;
}
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
CallInst *cloneImpl() const;
public:
static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
}
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
return new (ComputeNumOperands(Args.size()))
CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
}
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = std::nullopt,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
const int NumOperands =
ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
return new (NumOperands, DescriptorBytes)
CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
}
static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
}
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return new (ComputeNumOperands(Args.size()))
CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
}
static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
const int NumOperands =
ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
return new (NumOperands, DescriptorBytes)
CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
}
static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
InsertBefore);
}
static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = std::nullopt,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
NameStr, InsertBefore);
}
static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
const Twine &NameStr,
Instruction *InsertBefore = nullptr) {
return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
InsertBefore);
}
static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
InsertAtEnd);
}
static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
InsertAtEnd);
}
static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
NameStr, InsertAtEnd);
}
/// Create a clone of \p CI with a different set of operand bundles and
/// insert it before \p InsertPt.
///
/// The returned call instruction is identical \p CI in every way except that
/// the operand bundles for the new instruction are set to the operand bundles
/// in \p Bundles.
static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
Instruction *InsertPt = nullptr);
// Note that 'musttail' implies 'tail'.
enum TailCallKind : unsigned {
TCK_None = 0,
TCK_Tail = 1,
TCK_MustTail = 2,
TCK_NoTail = 3,
TCK_LAST = TCK_NoTail
};
using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
static_assert(
Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
"Bitfields must be contiguous");
TailCallKind getTailCallKind() const {
return getSubclassData<TailCallKindField>();
}
bool isTailCall() const {
TailCallKind Kind = getTailCallKind();
return Kind == TCK_Tail || Kind == TCK_MustTail;
}
bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
void setTailCallKind(TailCallKind TCK) {
setSubclassData<TailCallKindField>(TCK);
}
void setTailCall(bool IsTc = true) {
setTailCallKind(IsTc ? TCK_Tail : TCK_None);
}
/// Return true if the call can return twice
bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
/// Updates profile metadata by scaling it by \p S / \p T.
void updateProfWeight(uint64_t S, uint64_t T);
private:
// Shadow Instruction::setInstructionSubclassData with a private forwarding
// method so that subclasses cannot accidentally use it.
template <typename Bitfield>
void setSubclassData(typename Bitfield::Type Value) {
Instruction::setSubclassData<Bitfield>(Value);
}
};
CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: CallBase(Ty->getReturnType(), Instruction::Call,
OperandTraits<CallBase>::op_end(this) -
(Args.size() + CountBundleInputs(Bundles) + 1),
unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
InsertAtEnd) {
init(Ty, Func, Args, Bundles, NameStr);
}
CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
Instruction *InsertBefore)
: CallBase(Ty->getReturnType(), Instruction::Call,
OperandTraits<CallBase>::op_end(this) -
(Args.size() + CountBundleInputs(Bundles) + 1),
unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
InsertBefore) {
init(Ty, Func, Args, Bundles, NameStr);
}
//===----------------------------------------------------------------------===//
// SelectInst Class
//===----------------------------------------------------------------------===//
/// This class represents the LLVM 'select' instruction.
///
class SelectInst : public Instruction {
SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(S1->getType(), Instruction::Select,
&Op<0>(), 3, InsertBefore) {
init(C, S1, S2);
setName(NameStr);
}
SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(S1->getType(), Instruction::Select,
&Op<0>(), 3, InsertAtEnd) {
init(C, S1, S2);
setName(NameStr);
}
void init(Value *C, Value *S1, Value *S2) {
assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
Op<0>() = C;
Op<1>() = S1;
Op<2>() = S2;
}
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
SelectInst *cloneImpl() const;
public:
static SelectInst *Create(Value *C, Value *S1, Value *S2,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr,
Instruction *MDFrom = nullptr) {
SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
if (MDFrom)
Sel->copyMetadata(*MDFrom);
return Sel;
}
static SelectInst *Create(Value *C, Value *S1, Value *S2,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
}
const Value *getCondition() const { return Op<0>(); }
const Value *getTrueValue() const { return Op<1>(); }
const Value *getFalseValue() const { return Op<2>(); }
Value *getCondition() { return Op<0>(); }
Value *getTrueValue() { return Op<1>(); }
Value *getFalseValue() { return Op<2>(); }
void setCondition(Value *V) { Op<0>() = V; }
void setTrueValue(Value *V) { Op<1>() = V; }
void setFalseValue(Value *V) { Op<2>() = V; }
/// Swap the true and false values of the select instruction.
/// This doesn't swap prof metadata.
void swapValues() { Op<1>().swap(Op<2>()); }
/// Return a string if the specified operands are invalid
/// for a select operation, otherwise return null.
static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
OtherOps getOpcode() const {
return static_cast<OtherOps>(Instruction::getOpcode());
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Select;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
template <>
struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
//===----------------------------------------------------------------------===//
// VAArgInst Class
//===----------------------------------------------------------------------===//
/// This class represents the va_arg llvm instruction, which returns
/// an argument of the specified type given a va_list and increments that list
///
class VAArgInst : public UnaryInstruction {
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
VAArgInst *cloneImpl() const;
public:
VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr)
: UnaryInstruction(Ty, VAArg, List, InsertBefore) {
setName(NameStr);
}
VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
setName(NameStr);
}
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == VAArg;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
//===----------------------------------------------------------------------===//
// ExtractElementInst Class
//===----------------------------------------------------------------------===//
/// This instruction extracts a single (scalar)
/// element from a VectorType value
///
class ExtractElementInst : public Instruction {
ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr);
ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
BasicBlock *InsertAtEnd);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
ExtractElementInst *cloneImpl() const;
public:
static ExtractElementInst *Create(Value *Vec, Value *Idx,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
}
static ExtractElementInst *Create(Value *Vec, Value *Idx,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
}
/// Return true if an extractelement instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *Vec, const Value *Idx);
Value *getVectorOperand() { return Op<0>(); }
Value *getIndexOperand() { return Op<1>(); }
const Value *getVectorOperand() const { return Op<0>(); }
const Value *getIndexOperand() const { return Op<1>(); }
VectorType *getVectorOperandType() const {
return cast<VectorType>(getVectorOperand()->getType());
}
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ExtractElement;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
template <>
struct OperandTraits<ExtractElementInst> :
public FixedNumOperandTraits<ExtractElementInst, 2> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
//===----------------------------------------------------------------------===//
// InsertElementInst Class
//===----------------------------------------------------------------------===//
/// This instruction inserts a single (scalar)
/// element into a VectorType value
///
class InsertElementInst : public Instruction {
InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr);
InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
BasicBlock *InsertAtEnd);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
InsertElementInst *cloneImpl() const;
public:
static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
}
static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
}
/// Return true if an insertelement instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *Vec, const Value *NewElt,
const Value *Idx);
/// Overload to return most specific vector type.
///
VectorType *getType() const {
return cast<VectorType>(Instruction::getType());
}
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::InsertElement;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
template <>
struct OperandTraits<InsertElementInst> :
public FixedNumOperandTraits<InsertElementInst, 3> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
//===----------------------------------------------------------------------===//
// ShuffleVectorInst Class
//===----------------------------------------------------------------------===//
constexpr int PoisonMaskElem = -1;
/// This instruction constructs a fixed permutation of two
/// input vectors.
///
/// For each element of the result vector, the shuffle mask selects an element
/// from one of the input vectors to copy to the result. Non-negative elements
/// in the mask represent an index into the concatenated pair of input vectors.
/// PoisonMaskElem (-1) specifies that the result element is poison.
///
/// For scalable vectors, all the elements of the mask must be 0 or -1. This
/// requirement may be relaxed in the future.
class ShuffleVectorInst : public Instruction {
SmallVector<int, 4> ShuffleMask;
Constant *ShuffleMaskForBitcode;
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
ShuffleVectorInst *cloneImpl() const;
public:
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr);
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
BasicBlock *InsertAtEnd);
ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
Instruction *InsertBefore = nullptr);
ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
BasicBlock *InsertAtEnd);
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr = "",
Instruction *InsertBefor = nullptr);
ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &NameStr, BasicBlock *InsertAtEnd);
ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
const Twine &NameStr = "",
Instruction *InsertBefor = nullptr);
ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
const Twine &NameStr, BasicBlock *InsertAtEnd);
void *operator new(size_t S) { return User::operator new(S, 2); }
void operator delete(void *Ptr) { return User::operator delete(Ptr); }
/// Swap the operands and adjust the mask to preserve the semantics
/// of the instruction.
void commute();
/// Return true if a shufflevector instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *V1, const Value *V2,
const Value *Mask);
static bool isValidOperands(const Value *V1, const Value *V2,
ArrayRef<int> Mask);
/// Overload to return most specific vector type.
///
VectorType *getType() const {
return cast<VectorType>(Instruction::getType());
}
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Return the shuffle mask value of this instruction for the given element
/// index. Return PoisonMaskElem if the element is undef.
int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
/// Convert the input shuffle mask operand to a vector of integers. Undefined
/// elements of the mask are returned as PoisonMaskElem.
static void getShuffleMask(const Constant *Mask,
SmallVectorImpl<int> &Result);
/// Return the mask for this instruction as a vector of integers. Undefined
/// elements of the mask are returned as PoisonMaskElem.
void getShuffleMask(SmallVectorImpl<int> &Result) const {
Result.assign(ShuffleMask.begin(), ShuffleMask.end());
}
/// Return the mask for this instruction, for use in bitcode.
///
/// TODO: This is temporary until we decide a new bitcode encoding for
/// shufflevector.
Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
Type *ResultTy);
void setShuffleMask(ArrayRef<int> Mask);
ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
/// Return true if this shuffle returns a vector with a different number of
/// elements than its source vectors.
/// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
/// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
bool changesLength() const {
unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
->getElementCount()
.getKnownMinValue();
unsigned NumMaskElts = ShuffleMask.size();
return NumSourceElts != NumMaskElts;
}
/// Return true if this shuffle returns a vector with a greater number of
/// elements than its source vectors.
/// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
bool increasesLength() const {
unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
->getElementCount()
.getKnownMinValue();
unsigned NumMaskElts = ShuffleMask.size();
return NumSourceElts < NumMaskElts;
}
/// Return true if this shuffle mask chooses elements from exactly one source
/// vector.
/// Example: <7,5,undef,7>
/// This assumes that vector operands (of length \p NumSrcElts) are the same
/// length as the mask.
static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isSingleSourceMask(MaskAsInts, NumSrcElts);
}
/// Return true if this shuffle chooses elements from exactly one source
/// vector without changing the length of that vector.
/// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
/// TODO: Optionally allow length-changing shuffles.
bool isSingleSource() const {
return !changesLength() &&
isSingleSourceMask(ShuffleMask, ShuffleMask.size());
}
/// Return true if this shuffle mask chooses elements from exactly one source
/// vector without lane crossings. A shuffle using this mask is not
/// necessarily a no-op because it may change the number of elements from its
/// input vectors or it may provide demanded bits knowledge via undef lanes.
/// Example: <undef,undef,2,3>
static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(Mask->getType()))
return false;
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isIdentityMask(MaskAsInts, NumSrcElts);
}
/// Return true if this shuffle chooses elements from exactly one source
/// vector without lane crossings and does not change the number of elements
/// from its input vectors.
/// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
bool isIdentity() const {
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(getType()))
return false;
return !changesLength() && isIdentityMask(ShuffleMask, ShuffleMask.size());
}
/// Return true if this shuffle lengthens exactly one source vector with
/// undefs in the high elements.
bool isIdentityWithPadding() const;
/// Return true if this shuffle extracts the first N elements of exactly one
/// source vector.
bool isIdentityWithExtract() const;
/// Return true if this shuffle concatenates its 2 source vectors. This
/// returns false if either input is undefined. In that case, the shuffle is
/// is better classified as an identity with padding operation.
bool isConcat() const;
/// Return true if this shuffle mask chooses elements from its source vectors
/// without lane crossings. A shuffle using this mask would be
/// equivalent to a vector select with a constant condition operand.
/// Example: <4,1,6,undef>
/// This returns false if the mask does not choose from both input vectors.
/// In that case, the shuffle is better classified as an identity shuffle.
/// This assumes that vector operands are the same length as the mask
/// (a length-changing shuffle can never be equivalent to a vector select).
static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isSelectMask(MaskAsInts, NumSrcElts);
}
/// Return true if this shuffle chooses elements from its source vectors
/// without lane crossings and all operands have the same number of elements.
/// In other words, this shuffle is equivalent to a vector select with a
/// constant condition operand.
/// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
/// This returns false if the mask does not choose from both input vectors.
/// In that case, the shuffle is better classified as an identity shuffle.
/// TODO: Optionally allow length-changing shuffles.
bool isSelect() const {
return !changesLength() && isSelectMask(ShuffleMask, ShuffleMask.size());
}
/// Return true if this shuffle mask swaps the order of elements from exactly
/// one source vector.
/// Example: <7,6,undef,4>
/// This assumes that vector operands (of length \p NumSrcElts) are the same
/// length as the mask.
static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isReverseMask(MaskAsInts, NumSrcElts);
}
/// Return true if this shuffle swaps the order of elements from exactly
/// one source vector.
/// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
/// TODO: Optionally allow length-changing shuffles.
bool isReverse() const {
return !changesLength() && isReverseMask(ShuffleMask, ShuffleMask.size());
}
/// Return true if this shuffle mask chooses all elements with the same value
/// as the first element of exactly one source vector.
/// Example: <4,undef,undef,4>
/// This assumes that vector operands (of length \p NumSrcElts) are the same
/// length as the mask.
static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isZeroEltSplatMask(MaskAsInts, NumSrcElts);
}
/// Return true if all elements of this shuffle are the same value as the
/// first element of exactly one source vector without changing the length
/// of that vector.
/// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
/// TODO: Optionally allow length-changing shuffles.
/// TODO: Optionally allow splats from other elements.
bool isZeroEltSplat() const {
return !changesLength() &&
isZeroEltSplatMask(ShuffleMask, ShuffleMask.size());
}
/// Return true if this shuffle mask is a transpose mask.
/// Transpose vector masks transpose a 2xn matrix. They read corresponding
/// even- or odd-numbered vector elements from two n-dimensional source
/// vectors and write each result into consecutive elements of an
/// n-dimensional destination vector. Two shuffles are necessary to complete
/// the transpose, one for the even elements and another for the odd elements.
/// This description closely follows how the TRN1 and TRN2 AArch64
/// instructions operate.
///
/// For example, a simple 2x2 matrix can be transposed with:
///
/// ; Original matrix
/// m0 = < a, b >
/// m1 = < c, d >
///
/// ; Transposed matrix
/// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
/// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
///
/// For matrices having greater than n columns, the resulting nx2 transposed
/// matrix is stored in two result vectors such that one vector contains
/// interleaved elements from all the even-numbered rows and the other vector
/// contains interleaved elements from all the odd-numbered rows. For example,
/// a 2x4 matrix can be transposed with:
///
/// ; Original matrix
/// m0 = < a, b, c, d >
/// m1 = < e, f, g, h >
///
/// ; Transposed matrix
/// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
/// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isTransposeMask(MaskAsInts, NumSrcElts);
}
/// Return true if this shuffle transposes the elements of its inputs without
/// changing the length of the vectors. This operation may also be known as a
/// merge or interleave. See the description for isTransposeMask() for the
/// exact specification.
/// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
bool isTranspose() const {
return !changesLength() && isTransposeMask(ShuffleMask, ShuffleMask.size());
}
/// Return true if this shuffle mask is a splice mask, concatenating the two
/// inputs together and then extracts an original width vector starting from
/// the splice index.
/// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
/// This assumes that vector operands (of length \p NumSrcElts) are the same
/// length as the mask.
static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isSpliceMask(MaskAsInts, NumSrcElts, Index);
}
/// Return true if this shuffle splices two inputs without changing the length
/// of the vectors. This operation concatenates the two inputs together and
/// then extracts an original width vector starting from the splice index.
/// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
bool isSplice(int &Index) const {
return !changesLength() &&
isSpliceMask(ShuffleMask, ShuffleMask.size(), Index);
}
/// Return true if this shuffle mask is an extract subvector mask.
/// A valid extract subvector mask returns a smaller vector from a single
/// source operand. The base extraction index is returned as well.
static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
int &Index);
static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
int &Index) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(Mask->getType()))
return false;
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
}
/// Return true if this shuffle mask is an extract subvector mask.
bool isExtractSubvectorMask(int &Index) const {
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(getType()))
return false;
int NumSrcElts =
cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
}
/// Return true if this shuffle mask is an insert subvector mask.
/// A valid insert subvector mask inserts the lowest elements of a second
/// source operand into an in-place first source operand.
/// Both the sub vector width and the insertion index is returned.
static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
int &NumSubElts, int &Index);
static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
int &NumSubElts, int &Index) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(Mask->getType()))
return false;
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
}
/// Return true if this shuffle mask is an insert subvector mask.
bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(getType()))
return false;
int NumSrcElts =
cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
}
/// Return true if this shuffle mask replicates each of the \p VF elements
/// in a vector \p ReplicationFactor times.
/// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
/// <0,0,0,1,1,1,2,2,2,3,3,3>
static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
int &VF);
static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
int &VF) {
assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
// Not possible to express a shuffle mask for a scalable vector for this
// case.
if (isa<ScalableVectorType>(Mask->getType()))
return false;
SmallVector<int, 16> MaskAsInts;
getShuffleMask(Mask, MaskAsInts);
return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
}
/// Return true if this shuffle mask is a replication mask.
bool isReplicationMask(int &ReplicationFactor, int &VF) const;
/// Return true if this shuffle mask represents "clustered" mask of size VF,
/// i.e. each index between [0..VF) is used exactly once in each submask of
/// size VF.
/// For example, the mask for \p VF=4 is:
/// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
/// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
/// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
/// element 3 is used twice in the second submask
/// (3,3,1,0) and index 2 is not used at all.
static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
/// Return true if this shuffle mask is a one-use-single-source("clustered")
/// mask.
bool isOneUseSingleSourceMask(int VF) const;
/// Change values in a shuffle permute mask assuming the two vector operands
/// of length InVecNumElts have swapped position.
static void commuteShuffleMask(MutableArrayRef<int> Mask,
unsigned InVecNumElts) {
for (int &Idx : Mask) {
if (Idx == -1)
continue;
Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range");
}
}
/// Return if this shuffle interleaves its two input vectors together.
bool isInterleave(unsigned Factor);
/// Return true if the mask interleaves one or more input vectors together.
///
/// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
/// E.g. For a Factor of 2 (LaneLen=4):
/// <0, 4, 1, 5, 2, 6, 3, 7>
/// E.g. For a Factor of 3 (LaneLen=4):
/// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
/// E.g. For a Factor of 4 (LaneLen=2):
/// <0, 2, 6, 4, 1, 3, 7, 5>
///
/// NumInputElts is the total number of elements in the input vectors.
///
/// StartIndexes are the first indexes of each vector being interleaved,
/// substituting any indexes that were undef
/// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
///
/// Note that this does not check if the input vectors are consecutive:
/// It will return true for masks such as
/// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
unsigned NumInputElts,
SmallVectorImpl<unsigned> &StartIndexes);
static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
unsigned NumInputElts) {
SmallVector<unsigned, 8> StartIndexes;
return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
}
/// Checks if the shuffle is a bit rotation of the first operand across
/// multiple subelements, e.g:
///
/// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
///
/// could be expressed as
///
/// rotl <4 x i16> %a, 8
///
/// If it can be expressed as a rotation, returns the number of subelements to
/// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
unsigned MinSubElts, unsigned MaxSubElts,
unsigned &NumSubElts, unsigned &RotateAmt);
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ShuffleVector;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
template <>
struct OperandTraits<ShuffleVectorInst>
: public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
//===----------------------------------------------------------------------===//
// ExtractValueInst Class
//===----------------------------------------------------------------------===//
/// This instruction extracts a struct member or array
/// element value from an aggregate value.
///
class ExtractValueInst : public UnaryInstruction {
SmallVector<unsigned, 4> Indices;
ExtractValueInst(const ExtractValueInst &EVI);
/// Constructors - Create a extractvalue instruction with a base aggregate
/// value and a list of indices. The first ctor can optionally insert before
/// an existing instruction, the second appends the new instruction to the
/// specified BasicBlock.
inline ExtractValueInst(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
Instruction *InsertBefore);
inline ExtractValueInst(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr, BasicBlock *InsertAtEnd);
void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
ExtractValueInst *cloneImpl() const;
public:
static ExtractValueInst *Create(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new
ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
}
static ExtractValueInst *Create(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
}
/// Returns the type of the element that would be extracted
/// with an extractvalue instruction with the specified parameters.
///
/// Null is returned if the indices are invalid for the specified type.
static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
using idx_iterator = const unsigned*;
inline idx_iterator idx_begin() const { return Indices.begin(); }
inline idx_iterator idx_end() const { return Indices.end(); }
inline iterator_range<idx_iterator> indices() const {
return make_range(idx_begin(), idx_end());
}
Value *getAggregateOperand() {
return getOperand(0);
}
const Value *getAggregateOperand() const {
return getOperand(0);
}
static unsigned getAggregateOperandIndex() {
return 0U; // get index for modifying correct operand
}
ArrayRef<unsigned> getIndices() const {
return Indices;
}
unsigned getNumIndices() const {
return (unsigned)Indices.size();
}
bool hasIndices() const {
return true;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ExtractValue;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
ExtractValueInst::ExtractValueInst(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
Instruction *InsertBefore)
: UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
ExtractValue, Agg, InsertBefore) {
init(Idxs, NameStr);
}
ExtractValueInst::ExtractValueInst(Value *Agg,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
: UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
ExtractValue, Agg, InsertAtEnd) {
init(Idxs, NameStr);
}
//===----------------------------------------------------------------------===//
// InsertValueInst Class
//===----------------------------------------------------------------------===//
/// This instruction inserts a struct field of array element
/// value into an aggregate value.
///
class InsertValueInst : public Instruction {
SmallVector<unsigned, 4> Indices;
InsertValueInst(const InsertValueInst &IVI);
/// Constructors - Create a insertvalue instruction with a base aggregate
/// value, a value to insert, and a list of indices. The first ctor can
/// optionally insert before an existing instruction, the second appends
/// the new instruction to the specified BasicBlock.
inline InsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
Instruction *InsertBefore);
inline InsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr, BasicBlock *InsertAtEnd);
/// Constructors - These two constructors are convenience methods because one
/// and two index insertvalue instructions are so common.
InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr);
InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
BasicBlock *InsertAtEnd);
void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const Twine &NameStr);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
InsertValueInst *cloneImpl() const;
public:
// allocate space for exactly two operands
void *operator new(size_t S) { return User::operator new(S, 2); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr) {
return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
}
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
}
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
using idx_iterator = const unsigned*;
inline idx_iterator idx_begin() const { return Indices.begin(); }
inline idx_iterator idx_end() const { return Indices.end(); }
inline iterator_range<idx_iterator> indices() const {
return make_range(idx_begin(), idx_end());
}
Value *getAggregateOperand() {
return getOperand(0);
}
const Value *getAggregateOperand() const {
return getOperand(0);
}
static unsigned getAggregateOperandIndex() {
return 0U; // get index for modifying correct operand
}
Value *getInsertedValueOperand() {
return getOperand(1);
}
const Value *getInsertedValueOperand() const {
return getOperand(1);
}
static unsigned getInsertedValueOperandIndex() {
return 1U; // get index for modifying correct operand
}
ArrayRef<unsigned> getIndices() const {
return Indices;
}
unsigned getNumIndices() const {
return (unsigned)Indices.size();
}
bool hasIndices() const {
return true;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::InsertValue;
}
static bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
template <>
struct OperandTraits<InsertValueInst> :
public FixedNumOperandTraits<InsertValueInst, 2> {
};
InsertValueInst::InsertValueInst(Value *Agg,
Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
Instruction *InsertBefore)
: Instruction(Agg->getType(), InsertValue,
OperandTraits<InsertValueInst>::op_begin(this),
2, InsertBefore) {
init(Agg, Val, Idxs, NameStr);
}
InsertValueInst::InsertValueInst(Value *Agg,
Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(Agg->getType(), InsertValue,
OperandTraits<InsertValueInst>::op_begin(this),
2, InsertAtEnd) {
init(Agg, Val, Idxs, NameStr);
}
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
//===----------------------------------------------------------------------===//
// PHINode Class
//===----------------------------------------------------------------------===//
// PHINode - The PHINode class is used to represent the magical mystical PHI
// node, that can not exist in nature, but can be synthesized in a computer
// scientist's overactive imagination.
//
class PHINode : public Instruction {
/// The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
PHINode(const PHINode &PN);
explicit PHINode(Type *Ty, unsigned NumReservedValues,
const Twine &NameStr = "",
Instruction *InsertBefore = nullptr)
: Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
ReservedSpace(NumReservedValues) {
assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
setName(NameStr);
allocHungoffUses(ReservedSpace);
}
PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
BasicBlock *InsertAtEnd)
: Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
ReservedSpace(NumReservedValues) {
assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
setName(NameStr);
allocHungoffUses(ReservedSpace);
}
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
PHINode *cloneImpl() const;
// allocHungoffUses - this is more complicated than the generic
// User::allocHungoffUses, because we have to allocate Uses for the incoming
// values and pointers to the incoming blocks, all in one allocation.
void allocHungoffUses(unsigned N) {
User::allocHungoffUses(N, /* IsPhi */ true);
}