summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp')
-rw-r--r--contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp229
1 files changed, 159 insertions, 70 deletions
diff --git a/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
index 9588dfb..e308f49 100644
--- a/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
+++ b/contrib/llvm/lib/CodeGen/ImplicitNullChecks.cpp
@@ -22,6 +22,7 @@
// With the help of a runtime that understands the .fault_maps section,
// faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
// a page fault.
+// Store and LoadStore are also supported.
//
//===----------------------------------------------------------------------===//
@@ -29,21 +30,22 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/FaultMaps.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
@@ -151,25 +153,44 @@ class ImplicitNullChecks : public MachineFunctionPass {
const TargetRegisterInfo *TRI = nullptr;
AliasAnalysis *AA = nullptr;
MachineModuleInfo *MMI = nullptr;
+ MachineFrameInfo *MFI = nullptr;
bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
SmallVectorImpl<NullCheck> &NullCheckList);
- MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
- MachineBasicBlock *HandlerMBB);
+ MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB,
+ MachineBasicBlock *HandlerMBB);
void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
- /// Is \p MI a memory operation that can be used to implicitly null check the
- /// value in \p PointerReg? \p PrevInsts is the set of instruction seen since
+ enum AliasResult {
+ AR_NoAlias,
+ AR_MayAlias,
+ AR_WillAliasEverything
+ };
+ /// Returns AR_NoAlias if \p MI memory operation does not alias with
+ /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if
+ /// they may alias and any further memory operation may alias with \p PrevMI.
+ AliasResult areMemoryOpsAliased(MachineInstr &MI, MachineInstr *PrevMI);
+
+ enum SuitabilityResult {
+ SR_Suitable,
+ SR_Unsuitable,
+ SR_Impossible
+ };
+ /// Return SR_Suitable if \p MI a memory operation that can be used to
+ /// implicitly null check the value in \p PointerReg, SR_Unsuitable if
+ /// \p MI cannot be used to null check and SR_Impossible if there is
+ /// no sense to continue lookup due to any other instruction will not be able
+ /// to be used. \p PrevInsts is the set of instruction seen since
/// the explicit null check on \p PointerReg.
- bool isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
- ArrayRef<MachineInstr *> PrevInsts);
+ SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
+ ArrayRef<MachineInstr *> PrevInsts);
/// Return true if \p FaultingMI can be hoisted from after the the
/// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a
/// non-null value if we also need to (and legally can) hoist a depedency.
- bool canHoistLoadInst(MachineInstr *FaultingMI, unsigned PointerReg,
- ArrayRef<MachineInstr *> InstsSeenSoFar,
- MachineBasicBlock *NullSucc, MachineInstr *&Dependence);
+ bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg,
+ ArrayRef<MachineInstr *> InstsSeenSoFar,
+ MachineBasicBlock *NullSucc, MachineInstr *&Dependence);
public:
static char ID;
@@ -193,7 +214,7 @@ public:
}
bool ImplicitNullChecks::canHandle(const MachineInstr *MI) {
- if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects())
+ if (MI->isCall() || MI->hasUnmodeledSideEffects())
return false;
auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); };
(void)IsRegMask;
@@ -248,7 +269,7 @@ bool ImplicitNullChecks::canReorder(const MachineInstr *A,
unsigned RegB = MOB.getReg();
- if (TRI->regsOverlap(RegA, RegB))
+ if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef()))
return false;
}
}
@@ -260,6 +281,7 @@ bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getSubtarget().getInstrInfo();
TRI = MF.getRegInfo().getTargetRegisterInfo();
MMI = &MF.getMMI();
+ MFI = &MF.getFrameInfo();
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
SmallVector<NullCheck, 16> NullCheckList;
@@ -283,36 +305,76 @@ static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI,
return false;
}
-bool ImplicitNullChecks::isSuitableMemoryOp(
- MachineInstr &MI, unsigned PointerReg, ArrayRef<MachineInstr *> PrevInsts) {
+ImplicitNullChecks::AliasResult
+ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI,
+ MachineInstr *PrevMI) {
+ // If it is not memory access, skip the check.
+ if (!(PrevMI->mayStore() || PrevMI->mayLoad()))
+ return AR_NoAlias;
+ // Load-Load may alias
+ if (!(MI.mayStore() || PrevMI->mayStore()))
+ return AR_NoAlias;
+ // We lost info, conservatively alias. If it was store then no sense to
+ // continue because we won't be able to check against it further.
+ if (MI.memoperands_empty())
+ return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias;
+ if (PrevMI->memoperands_empty())
+ return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias;
+
+ for (MachineMemOperand *MMO1 : MI.memoperands()) {
+ // MMO1 should have a value due it comes from operation we'd like to use
+ // as implicit null check.
+ assert(MMO1->getValue() && "MMO1 should have a Value!");
+ for (MachineMemOperand *MMO2 : PrevMI->memoperands()) {
+ if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) {
+ if (PSV->mayAlias(MFI))
+ return AR_MayAlias;
+ continue;
+ }
+ llvm::AliasResult AAResult = AA->alias(
+ MemoryLocation(MMO1->getValue(), MemoryLocation::UnknownSize,
+ MMO1->getAAInfo()),
+ MemoryLocation(MMO2->getValue(), MemoryLocation::UnknownSize,
+ MMO2->getAAInfo()));
+ if (AAResult != NoAlias)
+ return AR_MayAlias;
+ }
+ }
+ return AR_NoAlias;
+}
+
+ImplicitNullChecks::SuitabilityResult
+ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
+ ArrayRef<MachineInstr *> PrevInsts) {
int64_t Offset;
unsigned BaseReg;
if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) ||
BaseReg != PointerReg)
- return false;
-
- // We want the load to be issued at a sane offset from PointerReg, so that
- // if PointerReg is null then the load reliably page faults.
- if (!(MI.mayLoad() && !MI.isPredicable() && Offset < PageSize))
- return false;
-
- // Finally, we need to make sure that the load instruction actually is
- // loading from PointerReg, and there isn't some re-definition of PointerReg
- // between the compare and the load.
- for (auto *PrevMI : PrevInsts)
- for (auto &PrevMO : PrevMI->operands())
- if (PrevMO.isReg() && PrevMO.getReg() &&
- TRI->regsOverlap(PrevMO.getReg(), PointerReg))
- return false;
-
- return true;
+ return SR_Unsuitable;
+
+ // We want the mem access to be issued at a sane offset from PointerReg,
+ // so that if PointerReg is null then the access reliably page faults.
+ if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() &&
+ Offset < PageSize))
+ return SR_Unsuitable;
+
+ // Finally, check whether the current memory access aliases with previous one.
+ for (auto *PrevMI : PrevInsts) {
+ AliasResult AR = areMemoryOpsAliased(MI, PrevMI);
+ if (AR == AR_WillAliasEverything)
+ return SR_Impossible;
+ if (AR == AR_MayAlias)
+ return SR_Unsuitable;
+ }
+ return SR_Suitable;
}
-bool ImplicitNullChecks::canHoistLoadInst(
- MachineInstr *FaultingMI, unsigned PointerReg,
- ArrayRef<MachineInstr *> InstsSeenSoFar, MachineBasicBlock *NullSucc,
- MachineInstr *&Dependence) {
+bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI,
+ unsigned PointerReg,
+ ArrayRef<MachineInstr *> InstsSeenSoFar,
+ MachineBasicBlock *NullSucc,
+ MachineInstr *&Dependence) {
auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar);
if (!DepResult.CanReorder)
return false;
@@ -359,7 +421,8 @@ bool ImplicitNullChecks::canHoistLoadInst(
// The Dependency can't be re-defining the base register -- then we won't
// get the memory operation on the address we want. This is already
// checked in \c IsSuitableMemoryOp.
- assert(!TRI->regsOverlap(DependenceMO.getReg(), PointerReg) &&
+ assert(!(DependenceMO.isDef() &&
+ TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) &&
"Should have been checked before!");
}
@@ -481,50 +544,76 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks(
return false;
MachineInstr *Dependence;
- if (isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar) &&
- canHoistLoadInst(&MI, PointerReg, InstsSeenSoFar, NullSucc,
- Dependence)) {
+ SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar);
+ if (SR == SR_Impossible)
+ return false;
+ if (SR == SR_Suitable &&
+ canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) {
NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
NullSucc, Dependence);
return true;
}
+ // If MI re-defines the PointerReg then we cannot move further.
+ if (any_of(MI.operands(), [&](MachineOperand &MO) {
+ return MO.isReg() && MO.getReg() && MO.isDef() &&
+ TRI->regsOverlap(MO.getReg(), PointerReg);
+ }))
+ return false;
InstsSeenSoFar.push_back(&MI);
}
return false;
}
-/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
-/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI
-/// (defining the same register), and branches to HandlerMBB if the load
-/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
-MachineInstr *
-ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
- MachineBasicBlock *MBB,
- MachineBasicBlock *HandlerMBB) {
+/// Wrap a machine instruction, MI, into a FAULTING machine instruction.
+/// The FAULTING instruction does the same load/store as MI
+/// (defining the same register), and branches to HandlerMBB if the mem access
+/// faults. The FAULTING instruction is inserted at the end of MBB.
+MachineInstr *ImplicitNullChecks::insertFaultingInstr(
+ MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) {
const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for
// all targets.
DebugLoc DL;
- unsigned NumDefs = LoadMI->getDesc().getNumDefs();
+ unsigned NumDefs = MI->getDesc().getNumDefs();
assert(NumDefs <= 1 && "other cases unhandled!");
unsigned DefReg = NoRegister;
if (NumDefs != 0) {
- DefReg = LoadMI->defs().begin()->getReg();
- assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
+ DefReg = MI->defs().begin()->getReg();
+ assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 &&
"expected exactly one def!");
}
- auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
- .addMBB(HandlerMBB)
- .addImm(LoadMI->getOpcode());
+ FaultMaps::FaultKind FK;
+ if (MI->mayLoad())
+ FK =
+ MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad;
+ else
+ FK = FaultMaps::FaultingStore;
- for (auto &MO : LoadMI->uses())
- MIB.addOperand(MO);
+ auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg)
+ .addImm(FK)
+ .addMBB(HandlerMBB)
+ .addImm(MI->getOpcode());
+
+ for (auto &MO : MI->uses()) {
+ if (MO.isReg()) {
+ MachineOperand NewMO = MO;
+ if (MO.isUse()) {
+ NewMO.setIsKill(false);
+ } else {
+ assert(MO.isDef() && "Expected def or use");
+ NewMO.setIsDead(false);
+ }
+ MIB.add(NewMO);
+ } else {
+ MIB.add(MO);
+ }
+ }
- MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
+ MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
return MIB;
}
@@ -545,18 +634,18 @@ void ImplicitNullChecks::rewriteNullChecks(
NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI);
}
- // Insert a faulting load where the conditional branch was originally. We
- // check earlier ensures that this bit of code motion is legal. We do not
- // touch the successors list for any basic block since we haven't changed
- // control flow, we've just made it implicit.
- MachineInstr *FaultingLoad = insertFaultingLoad(
+ // Insert a faulting instruction where the conditional branch was
+ // originally. We check earlier ensures that this bit of code motion
+ // is legal. We do not touch the successors list for any basic block
+ // since we haven't changed control flow, we've just made it implicit.
+ MachineInstr *FaultingInstr = insertFaultingInstr(
NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc());
// Now the values defined by MemOperation, if any, are live-in of
// the block of MemOperation.
- // The original load operation may define implicit-defs alongside
- // the loaded value.
+ // The original operation may define implicit-defs alongside
+ // the value.
MachineBasicBlock *MBB = NC.getMemOperation()->getParent();
- for (const MachineOperand &MO : FaultingLoad->operands()) {
+ for (const MachineOperand &MO : FaultingInstr->operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
@@ -588,8 +677,8 @@ void ImplicitNullChecks::rewriteNullChecks(
char ImplicitNullChecks::ID = 0;
char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
-INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
+INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE,
"Implicit null checks", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
+INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE,
"Implicit null checks", false, false)
OpenPOWER on IntegriCloud