Commit fd1f2f85 authored by George Burgess IV's avatar George Burgess IV
Browse files

[MemorySSA] Move code around a bit. NFC.

This patch moves MSSA's caching walker into MemorySSA, and moves the
actual definition of MSSA's caching walker out of MemorySSA.h. This is
done in preparation for the new walker, which should be out for review
soonish.

Also, this patch removes a field from UpwardsMemoryQuery and has a few
lines of diff from clang-format'ing MemorySSA.cpp.

llvm-svn: 273723
parent 5f1c0178
......@@ -593,6 +593,8 @@ protected:
void verifyOrdering(Function &F) const;
private:
class CachingWalker;
void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
......@@ -620,7 +622,7 @@ private:
std::unique_ptr<MemoryAccess> LiveOnEntryDef;
// Memory SSA building info
std::unique_ptr<MemorySSAWalker> Walker;
std::unique_ptr<CachingWalker> Walker;
unsigned NextID;
};
......@@ -751,74 +753,6 @@ public:
using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
/// \brief A MemorySSAWalker that does AA walks and caching of lookups to
/// disambiguate accesses.
///
/// FIXME: The current implementation of this can take quadratic space in rare
/// cases. This can be fixed, but it is something to note until it is fixed.
///
/// In order to trigger this behavior, you need to store to N distinct locations
/// (that AA can prove don't alias), perform M stores to other memory
/// locations that AA can prove don't alias any of the initial N locations, and
/// then load from all of the N locations. In this case, we insert M cache
/// entries for each of the N loads.
///
/// For example:
/// define i32 @foo() {
/// %a = alloca i32, align 4
/// %b = alloca i32, align 4
/// store i32 0, i32* %a, align 4
/// store i32 0, i32* %b, align 4
///
/// ; Insert M stores to other memory that doesn't alias %a or %b here
///
/// %c = load i32, i32* %a, align 4 ; Caches M entries in
/// ; CachedUpwardsClobberingAccess for the
/// ; MemoryLocation %a
/// %d = load i32, i32* %b, align 4 ; Caches M entries in
/// ; CachedUpwardsClobberingAccess for the
/// ; MemoryLocation %b
///
/// ; For completeness' sake, loading %a or %b again would not cache *another*
/// ; M entries.
/// %r = add i32 %c, %d
/// ret i32 %r
/// }
class CachingMemorySSAWalker final : public MemorySSAWalker {
public:
CachingMemorySSAWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
~CachingMemorySSAWalker() override;
MemoryAccess *getClobberingMemoryAccess(const Instruction *) override;
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
MemoryLocation &) override;
void invalidateInfo(MemoryAccess *) override;
protected:
struct UpwardsMemoryQuery;
MemoryAccess *doCacheLookup(const MemoryAccess *, const UpwardsMemoryQuery &,
const MemoryLocation &);
void doCacheInsert(const MemoryAccess *, MemoryAccess *,
const UpwardsMemoryQuery &, const MemoryLocation &);
void doCacheRemove(const MemoryAccess *, const UpwardsMemoryQuery &,
const MemoryLocation &);
private:
MemoryAccessPair UpwardsDFSWalk(MemoryAccess *, const MemoryLocation &,
UpwardsMemoryQuery &, bool);
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
bool instructionClobbersQuery(const MemoryDef *, UpwardsMemoryQuery &,
const MemoryLocation &Loc) const;
void verifyRemoved(MemoryAccess *);
SmallDenseMap<ConstMemoryAccessPair, MemoryAccess *>
CachedUpwardsClobberingAccess;
DenseMap<const MemoryAccess *, MemoryAccess *> CachedUpwardsClobberingCall;
AliasAnalysis *AA;
DominatorTree *DT;
};
/// \brief Iterator base class used to implement const and non-const iterators
/// over the defining accesses of a MemoryAccess.
template <class T>
......
......@@ -10,7 +10,6 @@
// This file implements the MemorySSA class.
//
//===----------------------------------------------------------------===//
#include "llvm/Transforms/Utils/MemorySSA.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
......@@ -39,6 +38,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/MemorySSA.h"
#include <algorithm>
#define DEBUG_TYPE "memoryssa"
......@@ -55,7 +55,6 @@ INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
true)
namespace llvm {
/// \brief An assembly annotator class to print Memory SSA information in
/// comments.
class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
......@@ -77,6 +76,74 @@ public:
OS << "; " << *MA << "\n";
}
};
/// \brief A MemorySSAWalker that does AA walks and caching of lookups to
/// disambiguate accesses.
///
/// FIXME: The current implementation of this can take quadratic space in rare
/// cases. This can be fixed, but it is something to note until it is fixed.
///
/// In order to trigger this behavior, you need to store to N distinct locations
/// (that AA can prove don't alias), perform M stores to other memory
/// locations that AA can prove don't alias any of the initial N locations, and
/// then load from all of the N locations. In this case, we insert M cache
/// entries for each of the N loads.
///
/// For example:
/// define i32 @foo() {
/// %a = alloca i32, align 4
/// %b = alloca i32, align 4
/// store i32 0, i32* %a, align 4
/// store i32 0, i32* %b, align 4
///
/// ; Insert M stores to other memory that doesn't alias %a or %b here
///
/// %c = load i32, i32* %a, align 4 ; Caches M entries in
/// ; CachedUpwardsClobberingAccess for the
/// ; MemoryLocation %a
/// %d = load i32, i32* %b, align 4 ; Caches M entries in
/// ; CachedUpwardsClobberingAccess for the
/// ; MemoryLocation %b
///
/// ; For completeness' sake, loading %a or %b again would not cache *another*
/// ; M entries.
/// %r = add i32 %c, %d
/// ret i32 %r
/// }
class MemorySSA::CachingWalker final : public MemorySSAWalker {
public:
CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
~CachingWalker() override;
MemoryAccess *getClobberingMemoryAccess(const Instruction *) override;
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
MemoryLocation &) override;
void invalidateInfo(MemoryAccess *) override;
protected:
struct UpwardsMemoryQuery;
MemoryAccess *doCacheLookup(const MemoryAccess *, const UpwardsMemoryQuery &,
const MemoryLocation &);
void doCacheInsert(const MemoryAccess *, MemoryAccess *,
const UpwardsMemoryQuery &, const MemoryLocation &);
void doCacheRemove(const MemoryAccess *, const UpwardsMemoryQuery &,
const MemoryLocation &);
private:
MemoryAccessPair UpwardsDFSWalk(MemoryAccess *, const MemoryLocation &,
UpwardsMemoryQuery &, bool);
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
bool instructionClobbersQuery(const MemoryDef *, UpwardsMemoryQuery &,
const MemoryLocation &Loc) const;
void verifyRemoved(MemoryAccess *);
SmallDenseMap<ConstMemoryAccessPair, MemoryAccess *>
CachedUpwardsClobberingAccess;
DenseMap<const MemoryAccess *, MemoryAccess *> CachedUpwardsClobberingCall;
AliasAnalysis *AA;
DominatorTree *DT;
};
}
namespace {
......@@ -238,7 +305,7 @@ MemorySSAWalker *MemorySSA::getWalker() {
if (Walker)
return Walker.get();
Walker = make_unique<CachingMemorySSAWalker>(this, AA, DT);
Walker = make_unique<CachingWalker>(this, AA, DT);
// We create an access to represent "live on entry", for things like
// arguments or users of globals, where the memory they use is defined before
......@@ -862,13 +929,13 @@ void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
CachingMemorySSAWalker::CachingMemorySSAWalker(MemorySSA *M, AliasAnalysis *A,
DominatorTree *D)
MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
DominatorTree *D)
: MemorySSAWalker(M), AA(A), DT(D) {}
CachingMemorySSAWalker::~CachingMemorySSAWalker() {}
MemorySSA::CachingWalker::~CachingWalker() {}
struct CachingMemorySSAWalker::UpwardsMemoryQuery {
struct MemorySSA::CachingWalker::UpwardsMemoryQuery {
// True if we saw a phi whose predecessor was a backedge
bool SawBackedgePhi;
// True if our original query started off as a call
......@@ -887,15 +954,17 @@ struct CachingMemorySSAWalker::UpwardsMemoryQuery {
SmallVector<const MemoryAccess *, 32> VisitedCalls;
// The MemoryAccess we actually got called with, used to test local domination
const MemoryAccess *OriginalAccess;
// The Datalayout for the module we started in
const DataLayout *DL;
UpwardsMemoryQuery()
: SawBackedgePhi(false), IsCall(false), Inst(nullptr),
OriginalAccess(nullptr), DL(nullptr) {}
OriginalAccess(nullptr) {}
UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
: SawBackedgePhi(false), IsCall(ImmutableCallSite(Inst)), Inst(Inst),
OriginalAccess(Access) {}
};
void CachingMemorySSAWalker::invalidateInfo(MemoryAccess *MA) {
void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
// TODO: We can do much better cache invalidation with differently stored
// caches. For now, for MemoryUses, we simply remove them
......@@ -929,19 +998,19 @@ void CachingMemorySSAWalker::invalidateInfo(MemoryAccess *MA) {
#endif
}
void CachingMemorySSAWalker::doCacheRemove(const MemoryAccess *M,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
void MemorySSA::CachingWalker::doCacheRemove(const MemoryAccess *M,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
if (Q.IsCall)
CachedUpwardsClobberingCall.erase(M);
else
CachedUpwardsClobberingAccess.erase({M, Loc});
}
void CachingMemorySSAWalker::doCacheInsert(const MemoryAccess *M,
MemoryAccess *Result,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
void MemorySSA::CachingWalker::doCacheInsert(const MemoryAccess *M,
MemoryAccess *Result,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
// This is fine for Phis, since there are times where we can't optimize them.
// Making a def its own clobber is never correct, though.
assert((Result != M || isa<MemoryPhi>(M)) &&
......@@ -953,11 +1022,12 @@ void CachingMemorySSAWalker::doCacheInsert(const MemoryAccess *M,
CachedUpwardsClobberingAccess[{M, Loc}] = Result;
}
MemoryAccess *CachingMemorySSAWalker::doCacheLookup(const MemoryAccess *M,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
MemoryAccess *
MemorySSA::CachingWalker::doCacheLookup(const MemoryAccess *M,
const UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) {
++NumClobberCacheLookups;
MemoryAccess *Result = nullptr;
MemoryAccess *Result;
if (Q.IsCall)
Result = CachedUpwardsClobberingCall.lookup(M);
......@@ -969,7 +1039,7 @@ MemoryAccess *CachingMemorySSAWalker::doCacheLookup(const MemoryAccess *M,
return Result;
}
bool CachingMemorySSAWalker::instructionClobbersQuery(
bool MemorySSA::CachingWalker::instructionClobbersQuery(
const MemoryDef *MD, UpwardsMemoryQuery &Q,
const MemoryLocation &Loc) const {
Instruction *DefMemoryInst = MD->getMemoryInst();
......@@ -985,7 +1055,7 @@ bool CachingMemorySSAWalker::instructionClobbersQuery(
return I != MRI_NoModRef;
}
MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
MemoryAccessPair MemorySSA::CachingWalker::UpwardsDFSWalk(
MemoryAccess *StartingAccess, const MemoryLocation &Loc,
UpwardsMemoryQuery &Q, bool FollowingBackedge) {
MemoryAccess *ModifyingAccess = nullptr;
......@@ -1117,15 +1187,13 @@ MemoryAccessPair CachingMemorySSAWalker::UpwardsDFSWalk(
/// the MemoryAccess that actually clobbers Loc.
///
/// \returns our clobbering memory access
MemoryAccess *
CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
UpwardsMemoryQuery &Q) {
MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
return UpwardsDFSWalk(StartingAccess, Q.StartingLoc, Q, false).first;
}
MemoryAccess *
CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
MemoryLocation &Loc) {
MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
MemoryAccess *StartingAccess, MemoryLocation &Loc) {
if (isa<MemoryPhi>(StartingAccess))
return StartingAccess;
......@@ -1145,7 +1213,6 @@ CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
Q.StartingLoc = Loc;
Q.Inst = StartingUseOrDef->getMemoryInst();
Q.IsCall = false;
Q.DL = &Q.Inst->getModule()->getDataLayout();
if (auto CacheResult = doCacheLookup(StartingUseOrDef, Q, Q.StartingLoc))
return CacheResult;
......@@ -1168,7 +1235,7 @@ CachingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *StartingAccess,
}
MemoryAccess *
CachingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
MemorySSA::CachingWalker::getClobberingMemoryAccess(const Instruction *I) {
// There should be no way to lookup an instruction and get a phi as the
// access, since we only map BB's to PHI's. So, this must be a use or def.
auto *StartingAccess = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(I));
......@@ -1185,7 +1252,6 @@ CachingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
if (!Q.IsCall)
Q.StartingLoc = MemoryLocation::get(I);
Q.Inst = I;
Q.DL = &Q.Inst->getModule()->getDataLayout();
if (auto CacheResult = doCacheLookup(StartingAccess, Q, Q.StartingLoc))
return CacheResult;
......@@ -1219,7 +1285,7 @@ CachingMemorySSAWalker::getClobberingMemoryAccess(const Instruction *I) {
}
// Verify that MA doesn't exist in any of the caches.
void CachingMemorySSAWalker::verifyRemoved(MemoryAccess *MA) {
void MemorySSA::CachingWalker::verifyRemoved(MemoryAccess *MA) {
#ifndef NDEBUG
for (auto &P : CachedUpwardsClobberingAccess)
assert(P.first.first != MA && P.second != MA &&
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment