blob: 61efbef96aef67dcba8b12b3c4884e06866b23ee [file] [log] [blame]
//===- ScopBuilder.cpp ----------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Create a polyhedral description for a static control flow region.
//
// The pass creates a polyhedral description of the Scops detected by the SCoP
// detection derived from their LLVM-IR code.
//
//===----------------------------------------------------------------------===//
#include "polly/ScopBuilder.h"
#include "polly/Options.h"
#include "polly/ScopDetection.h"
#include "polly/ScopInfo.h"
#include "polly/Support/GICHelper.h"
#include "polly/Support/ISLTools.h"
#include "polly/Support/SCEVValidator.h"
#include "polly/Support/ScopHelper.h"
#include "polly/Support/VirtualInstruction.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
using namespace llvm;
using namespace polly;
#define DEBUG_TYPE "polly-scops"
STATISTIC(ScopFound, "Number of valid Scops");
STATISTIC(RichScopFound, "Number of Scops containing a loop");
STATISTIC(InfeasibleScops,
"Number of SCoPs with statically infeasible context.");
bool polly::ModelReadOnlyScalars;
// The maximal number of dimensions we allow during invariant load construction.
// More complex access ranges will result in very high compile time and are also
// unlikely to result in good code. This value is very high and should only
// trigger for corner cases (e.g., the "dct_luma" function in h264, SPEC2006).
static int const MaxDimensionsInAccessRange = 9;
static cl::opt<bool, true> XModelReadOnlyScalars(
"polly-analyze-read-only-scalars",
cl::desc("Model read-only scalar values in the scop description"),
cl::location(ModelReadOnlyScalars), cl::Hidden, cl::ZeroOrMore,
cl::init(true), cl::cat(PollyCategory));
static cl::opt<int>
OptComputeOut("polly-analysis-computeout",
cl::desc("Bound the scop analysis by a maximal amount of "
"computational steps (0 means no bound)"),
cl::Hidden, cl::init(800000), cl::ZeroOrMore,
cl::cat(PollyCategory));
static cl::opt<bool> PollyAllowDereferenceOfAllFunctionParams(
"polly-allow-dereference-of-all-function-parameters",
cl::desc(
"Treat all parameters to functions that are pointers as dereferencible."
" This is useful for invariant load hoisting, since we can generate"
" less runtime checks. This is only valid if all pointers to functions"
" are always initialized, so that Polly can choose to hoist"
" their loads. "),
cl::Hidden, cl::init(false), cl::cat(PollyCategory));
static cl::opt<unsigned> RunTimeChecksMaxArraysPerGroup(
"polly-rtc-max-arrays-per-group",
cl::desc("The maximal number of arrays to compare in each alias group."),
cl::Hidden, cl::ZeroOrMore, cl::init(20), cl::cat(PollyCategory));
static cl::opt<int> RunTimeChecksMaxAccessDisjuncts(
"polly-rtc-max-array-disjuncts",
cl::desc("The maximal number of disjunts allowed in memory accesses to "
"to build RTCs."),
cl::Hidden, cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
static cl::opt<unsigned> RunTimeChecksMaxParameters(
"polly-rtc-max-parameters",
cl::desc("The maximal number of parameters allowed in RTCs."), cl::Hidden,
cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
static cl::opt<bool> UnprofitableScalarAccs(
"polly-unprofitable-scalar-accs",
cl::desc("Count statements with scalar accesses as not optimizable"),
cl::Hidden, cl::init(false), cl::cat(PollyCategory));
static cl::opt<std::string> UserContextStr(
"polly-context", cl::value_desc("isl parameter set"),
cl::desc("Provide additional constraints on the context parameters"),
cl::init(""), cl::cat(PollyCategory));
static cl::opt<bool> DetectFortranArrays(
"polly-detect-fortran-arrays",
cl::desc("Detect Fortran arrays and use this for code generation"),
cl::Hidden, cl::init(false), cl::cat(PollyCategory));
static cl::opt<bool> DetectReductions("polly-detect-reductions",
cl::desc("Detect and exploit reductions"),
cl::Hidden, cl::ZeroOrMore,
cl::init(true), cl::cat(PollyCategory));
// Multiplicative reductions can be disabled separately as these kind of
// operations can overflow easily. Additive reductions and bit operations
// are in contrast pretty stable.
static cl::opt<bool> DisableMultiplicativeReductions(
"polly-disable-multiplicative-reductions",
cl::desc("Disable multiplicative reductions"), cl::Hidden, cl::ZeroOrMore,
cl::init(false), cl::cat(PollyCategory));
enum class GranularityChoice { BasicBlocks, ScalarIndependence, Stores };
static cl::opt<GranularityChoice> StmtGranularity(
"polly-stmt-granularity",
cl::desc(
"Algorithm to use for splitting basic blocks into multiple statements"),
cl::values(clEnumValN(GranularityChoice::BasicBlocks, "bb",
"One statement per basic block"),
clEnumValN(GranularityChoice::ScalarIndependence, "scalar-indep",
"Scalar independence heuristic"),
clEnumValN(GranularityChoice::Stores, "store",
"Store-level granularity")),
cl::init(GranularityChoice::ScalarIndependence), cl::cat(PollyCategory));
/// Helper to treat non-affine regions and basic blocks the same.
///
///{
/// Return the block that is the representing block for @p RN.
static inline BasicBlock *getRegionNodeBasicBlock(RegionNode *RN) {
return RN->isSubRegion() ? RN->getNodeAs<Region>()->getEntry()
: RN->getNodeAs<BasicBlock>();
}
/// Return the @p idx'th block that is executed after @p RN.
static inline BasicBlock *
getRegionNodeSuccessor(RegionNode *RN, Instruction *TI, unsigned idx) {
if (RN->isSubRegion()) {
assert(idx == 0);
return RN->getNodeAs<Region>()->getExit();
}
return TI->getSuccessor(idx);
}
static bool containsErrorBlock(RegionNode *RN, const Region &R, LoopInfo &LI,
const DominatorTree &DT) {
if (!RN->isSubRegion())
return isErrorBlock(*RN->getNodeAs<BasicBlock>(), R, LI, DT);
for (BasicBlock *BB : RN->getNodeAs<Region>()->blocks())
if (isErrorBlock(*BB, R, LI, DT))
return true;
return false;
}
///}
/// Create a map to map from a given iteration to a subsequent iteration.
///
/// This map maps from SetSpace -> SetSpace where the dimensions @p Dim
/// is incremented by one and all other dimensions are equal, e.g.,
/// [i0, i1, i2, i3] -> [i0, i1, i2 + 1, i3]
///
/// if @p Dim is 2 and @p SetSpace has 4 dimensions.
static isl::map createNextIterationMap(isl::space SetSpace, unsigned Dim) {
isl::space MapSpace = SetSpace.map_from_set();
isl::map NextIterationMap = isl::map::universe(MapSpace);
for (unsigned u = 0; u < NextIterationMap.dim(isl::dim::in); u++)
if (u != Dim)
NextIterationMap =
NextIterationMap.equate(isl::dim::in, u, isl::dim::out, u);
isl::constraint C =
isl::constraint::alloc_equality(isl::local_space(MapSpace));
C = C.set_constant_si(1);
C = C.set_coefficient_si(isl::dim::in, Dim, 1);
C = C.set_coefficient_si(isl::dim::out, Dim, -1);
NextIterationMap = NextIterationMap.add_constraint(C);
return NextIterationMap;
}
/// Add @p BSet to set @p BoundedParts if @p BSet is bounded.
static isl::set collectBoundedParts(isl::set S) {
isl::set BoundedParts = isl::set::empty(S.get_space());
for (isl::basic_set BSet : S.get_basic_set_list())
if (BSet.is_bounded())
BoundedParts = BoundedParts.unite(isl::set(BSet));
return BoundedParts;
}
/// Compute the (un)bounded parts of @p S wrt. to dimension @p Dim.
///
/// @returns A separation of @p S into first an unbounded then a bounded subset,
/// both with regards to the dimension @p Dim.
static std::pair<isl::set, isl::set> partitionSetParts(isl::set S,
unsigned Dim) {
for (unsigned u = 0, e = S.n_dim(); u < e; u++)
S = S.lower_bound_si(isl::dim::set, u, 0);
unsigned NumDimsS = S.n_dim();
isl::set OnlyDimS = S;
// Remove dimensions that are greater than Dim as they are not interesting.
assert(NumDimsS >= Dim + 1);
OnlyDimS = OnlyDimS.project_out(isl::dim::set, Dim + 1, NumDimsS - Dim - 1);
// Create artificial parametric upper bounds for dimensions smaller than Dim
// as we are not interested in them.
OnlyDimS = OnlyDimS.insert_dims(isl::dim::param, 0, Dim);
for (unsigned u = 0; u < Dim; u++) {
isl::constraint C = isl::constraint::alloc_inequality(
isl::local_space(OnlyDimS.get_space()));
C = C.set_coefficient_si(isl::dim::param, u, 1);
C = C.set_coefficient_si(isl::dim::set, u, -1);
OnlyDimS = OnlyDimS.add_constraint(C);
}
// Collect all bounded parts of OnlyDimS.
isl::set BoundedParts = collectBoundedParts(OnlyDimS);
// Create the dimensions greater than Dim again.
BoundedParts =
BoundedParts.insert_dims(isl::dim::set, Dim + 1, NumDimsS - Dim - 1);
// Remove the artificial upper bound parameters again.
BoundedParts = BoundedParts.remove_dims(isl::dim::param, 0, Dim);
isl::set UnboundedParts = S.subtract(BoundedParts);
return std::make_pair(UnboundedParts, BoundedParts);
}
/// Create the conditions under which @p L @p Pred @p R is true.
static isl::set buildConditionSet(ICmpInst::Predicate Pred, isl::pw_aff L,
isl::pw_aff R) {
switch (Pred) {
case ICmpInst::ICMP_EQ:
return L.eq_set(R);
case ICmpInst::ICMP_NE:
return L.ne_set(R);
case ICmpInst::ICMP_SLT:
return L.lt_set(R);
case ICmpInst::ICMP_SLE:
return L.le_set(R);
case ICmpInst::ICMP_SGT:
return L.gt_set(R);
case ICmpInst::ICMP_SGE:
return L.ge_set(R);
case ICmpInst::ICMP_ULT:
return L.lt_set(R);
case ICmpInst::ICMP_UGT:
return L.gt_set(R);
case ICmpInst::ICMP_ULE:
return L.le_set(R);
case ICmpInst::ICMP_UGE:
return L.ge_set(R);
default:
llvm_unreachable("Non integer predicate not supported");
}
}
isl::set ScopBuilder::adjustDomainDimensions(isl::set Dom, Loop *OldL,
Loop *NewL) {
// If the loops are the same there is nothing to do.
if (NewL == OldL)
return Dom;
int OldDepth = scop->getRelativeLoopDepth(OldL);
int NewDepth = scop->getRelativeLoopDepth(NewL);
// If both loops are non-affine loops there is nothing to do.
if (OldDepth == -1 && NewDepth == -1)
return Dom;
// Distinguish three cases:
// 1) The depth is the same but the loops are not.
// => One loop was left one was entered.
// 2) The depth increased from OldL to NewL.
// => One loop was entered, none was left.
// 3) The depth decreased from OldL to NewL.
// => Loops were left were difference of the depths defines how many.
if (OldDepth == NewDepth) {
assert(OldL->getParentLoop() == NewL->getParentLoop());
Dom = Dom.project_out(isl::dim::set, NewDepth, 1);
Dom = Dom.add_dims(isl::dim::set, 1);
} else if (OldDepth < NewDepth) {
assert(OldDepth + 1 == NewDepth);
auto &R = scop->getRegion();
(void)R;
assert(NewL->getParentLoop() == OldL ||
((!OldL || !R.contains(OldL)) && R.contains(NewL)));
Dom = Dom.add_dims(isl::dim::set, 1);
} else {
assert(OldDepth > NewDepth);
int Diff = OldDepth - NewDepth;
int NumDim = Dom.n_dim();
assert(NumDim >= Diff);
Dom = Dom.project_out(isl::dim::set, NumDim - Diff, Diff);
}
return Dom;
}
/// Compute the isl representation for the SCEV @p E in this BB.
///
/// @param BB The BB for which isl representation is to be
/// computed.
/// @param InvalidDomainMap A map of BB to their invalid domains.
/// @param E The SCEV that should be translated.
/// @param NonNegative Flag to indicate the @p E has to be non-negative.
///
/// Note that this function will also adjust the invalid context accordingly.
__isl_give isl_pw_aff *
ScopBuilder::getPwAff(BasicBlock *BB,
DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
const SCEV *E, bool NonNegative) {
PWACtx PWAC = scop->getPwAff(E, BB, NonNegative);
InvalidDomainMap[BB] = InvalidDomainMap[BB].unite(PWAC.second);
return PWAC.first.release();
}
/// Build condition sets for unsigned ICmpInst(s).
/// Special handling is required for unsigned operands to ensure that if
/// MSB (aka the Sign bit) is set for an operands in an unsigned ICmpInst
/// it should wrap around.
///
/// @param IsStrictUpperBound holds information on the predicate relation
/// between TestVal and UpperBound, i.e,
/// TestVal < UpperBound OR TestVal <= UpperBound
__isl_give isl_set *ScopBuilder::buildUnsignedConditionSets(
BasicBlock *BB, Value *Condition, __isl_keep isl_set *Domain,
const SCEV *SCEV_TestVal, const SCEV *SCEV_UpperBound,
DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
bool IsStrictUpperBound) {
// Do not take NonNeg assumption on TestVal
// as it might have MSB (Sign bit) set.
isl_pw_aff *TestVal = getPwAff(BB, InvalidDomainMap, SCEV_TestVal, false);
// Take NonNeg assumption on UpperBound.
isl_pw_aff *UpperBound =
getPwAff(BB, InvalidDomainMap, SCEV_UpperBound, true);
// 0 <= TestVal
isl_set *First =
isl_pw_aff_le_set(isl_pw_aff_zero_on_domain(isl_local_space_from_space(
isl_pw_aff_get_domain_space(TestVal))),
isl_pw_aff_copy(TestVal));
isl_set *Second;
if (IsStrictUpperBound)
// TestVal < UpperBound
Second = isl_pw_aff_lt_set(TestVal, UpperBound);
else
// TestVal <= UpperBound
Second = isl_pw_aff_le_set(TestVal, UpperBound);
isl_set *ConsequenceCondSet = isl_set_intersect(First, Second);
return ConsequenceCondSet;
}
bool ScopBuilder::buildConditionSets(
BasicBlock *BB, SwitchInst *SI, Loop *L, __isl_keep isl_set *Domain,
DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
Value *Condition = getConditionFromTerminator(SI);
assert(Condition && "No condition for switch");
isl_pw_aff *LHS, *RHS;
LHS = getPwAff(BB, InvalidDomainMap, SE.getSCEVAtScope(Condition, L));
unsigned NumSuccessors = SI->getNumSuccessors();
ConditionSets.resize(NumSuccessors);
for (auto &Case : SI->cases()) {
unsigned Idx = Case.getSuccessorIndex();
ConstantInt *CaseValue = Case.getCaseValue();
RHS = getPwAff(BB, InvalidDomainMap, SE.getSCEV(CaseValue));
isl_set *CaseConditionSet =
buildConditionSet(ICmpInst::ICMP_EQ, isl::manage_copy(LHS),
isl::manage(RHS))
.release();
ConditionSets[Idx] = isl_set_coalesce(
isl_set_intersect(CaseConditionSet, isl_set_copy(Domain)));
}
assert(ConditionSets[0] == nullptr && "Default condition set was set");
isl_set *ConditionSetUnion = isl_set_copy(ConditionSets[1]);
for (unsigned u = 2; u < NumSuccessors; u++)
ConditionSetUnion =
isl_set_union(ConditionSetUnion, isl_set_copy(ConditionSets[u]));
ConditionSets[0] = isl_set_subtract(isl_set_copy(Domain), ConditionSetUnion);
isl_pw_aff_free(LHS);
return true;
}
bool ScopBuilder::buildConditionSets(
BasicBlock *BB, Value *Condition, Instruction *TI, Loop *L,
__isl_keep isl_set *Domain,
DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
isl_set *ConsequenceCondSet = nullptr;
if (auto Load = dyn_cast<LoadInst>(Condition)) {
const SCEV *LHSSCEV = SE.getSCEVAtScope(Load, L);
const SCEV *RHSSCEV = SE.getZero(LHSSCEV->getType());
bool NonNeg = false;
isl_pw_aff *LHS = getPwAff(BB, InvalidDomainMap, LHSSCEV, NonNeg);
isl_pw_aff *RHS = getPwAff(BB, InvalidDomainMap, RHSSCEV, NonNeg);
ConsequenceCondSet = buildConditionSet(ICmpInst::ICMP_SLE, isl::manage(LHS),
isl::manage(RHS))
.release();
} else if (auto *PHI = dyn_cast<PHINode>(Condition)) {
auto *Unique = dyn_cast<ConstantInt>(
getUniqueNonErrorValue(PHI, &scop->getRegion(), LI, DT));
if (Unique->isZero())
ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
else
ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
} else if (auto *CCond = dyn_cast<ConstantInt>(Condition)) {
if (CCond->isZero())
ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
else
ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
} else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Condition)) {
auto Opcode = BinOp->getOpcode();
assert(Opcode == Instruction::And || Opcode == Instruction::Or);
bool Valid = buildConditionSets(BB, BinOp->getOperand(0), TI, L, Domain,
InvalidDomainMap, ConditionSets) &&
buildConditionSets(BB, BinOp->getOperand(1), TI, L, Domain,
InvalidDomainMap, ConditionSets);
if (!Valid) {
while (!ConditionSets.empty())
isl_set_free(ConditionSets.pop_back_val());
return false;
}
isl_set_free(ConditionSets.pop_back_val());
isl_set *ConsCondPart0 = ConditionSets.pop_back_val();
isl_set_free(ConditionSets.pop_back_val());
isl_set *ConsCondPart1 = ConditionSets.pop_back_val();
if (Opcode == Instruction::And)
ConsequenceCondSet = isl_set_intersect(ConsCondPart0, ConsCondPart1);
else
ConsequenceCondSet = isl_set_union(ConsCondPart0, ConsCondPart1);
} else {
auto *ICond = dyn_cast<ICmpInst>(Condition);
assert(ICond &&
"Condition of exiting branch was neither constant nor ICmp!");
Region &R = scop->getRegion();
isl_pw_aff *LHS, *RHS;
// For unsigned comparisons we assumed the signed bit of neither operand
// to be set. The comparison is equal to a signed comparison under this
// assumption.
bool NonNeg = ICond->isUnsigned();
const SCEV *LeftOperand = SE.getSCEVAtScope(ICond->getOperand(0), L),
*RightOperand = SE.getSCEVAtScope(ICond->getOperand(1), L);
LeftOperand = tryForwardThroughPHI(LeftOperand, R, SE, LI, DT);
RightOperand = tryForwardThroughPHI(RightOperand, R, SE, LI, DT);
switch (ICond->getPredicate()) {
case ICmpInst::ICMP_ULT:
ConsequenceCondSet =
buildUnsignedConditionSets(BB, Condition, Domain, LeftOperand,
RightOperand, InvalidDomainMap, true);
break;
case ICmpInst::ICMP_ULE:
ConsequenceCondSet =
buildUnsignedConditionSets(BB, Condition, Domain, LeftOperand,
RightOperand, InvalidDomainMap, false);
break;
case ICmpInst::ICMP_UGT:
ConsequenceCondSet =
buildUnsignedConditionSets(BB, Condition, Domain, RightOperand,
LeftOperand, InvalidDomainMap, true);
break;
case ICmpInst::ICMP_UGE:
ConsequenceCondSet =
buildUnsignedConditionSets(BB, Condition, Domain, RightOperand,
LeftOperand, InvalidDomainMap, false);
break;
default:
LHS = getPwAff(BB, InvalidDomainMap, LeftOperand, NonNeg);
RHS = getPwAff(BB, InvalidDomainMap, RightOperand, NonNeg);
ConsequenceCondSet = buildConditionSet(ICond->getPredicate(),
isl::manage(LHS), isl::manage(RHS))
.release();
break;
}
}
// If no terminator was given we are only looking for parameter constraints
// under which @p Condition is true/false.
if (!TI)
ConsequenceCondSet = isl_set_params(ConsequenceCondSet);
assert(ConsequenceCondSet);
ConsequenceCondSet = isl_set_coalesce(
isl_set_intersect(ConsequenceCondSet, isl_set_copy(Domain)));
isl_set *AlternativeCondSet = nullptr;
bool TooComplex =
isl_set_n_basic_set(ConsequenceCondSet) >= MaxDisjunctsInDomain;
if (!TooComplex) {
AlternativeCondSet = isl_set_subtract(isl_set_copy(Domain),
isl_set_copy(ConsequenceCondSet));
TooComplex =
isl_set_n_basic_set(AlternativeCondSet) >= MaxDisjunctsInDomain;
}
if (TooComplex) {
scop->invalidate(COMPLEXITY, TI ? TI->getDebugLoc() : DebugLoc(),
TI ? TI->getParent() : nullptr /* BasicBlock */);
isl_set_free(AlternativeCondSet);
isl_set_free(ConsequenceCondSet);
return false;
}
ConditionSets.push_back(ConsequenceCondSet);
ConditionSets.push_back(isl_set_coalesce(AlternativeCondSet));
return true;
}
bool ScopBuilder::buildConditionSets(
BasicBlock *BB, Instruction *TI, Loop *L, __isl_keep isl_set *Domain,
DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
return buildConditionSets(BB, SI, L, Domain, InvalidDomainMap,
ConditionSets);
assert(isa<BranchInst>(TI) && "Terminator was neither branch nor switch.");
if (TI->getNumSuccessors() == 1) {
ConditionSets.push_back(isl_set_copy(Domain));
return true;
}
Value *Condition = getConditionFromTerminator(TI);
assert(Condition && "No condition for Terminator");
return buildConditionSets(BB, Condition, TI, L, Domain, InvalidDomainMap,
ConditionSets);
}
bool ScopBuilder::propagateDomainConstraints(
Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
// Iterate over the region R and propagate the domain constrains from the
// predecessors to the current node. In contrast to the
// buildDomainsWithBranchConstraints function, this one will pull the domain
// information from the predecessors instead of pushing it to the successors.
// Additionally, we assume the domains to be already present in the domain
// map here. However, we iterate again in reverse post order so we know all
// predecessors have been visited before a block or non-affine subregion is
// visited.
ReversePostOrderTraversal<Region *> RTraversal(R);
for (auto *RN : RTraversal) {
// Recurse for affine subregions but go on for basic blocks and non-affine
// subregions.
if (RN->isSubRegion()) {
Region *SubRegion = RN->getNodeAs<Region>();
if (!scop->isNonAffineSubRegion(SubRegion)) {
if (!propagateDomainConstraints(SubRegion, InvalidDomainMap))
return false;
continue;
}
}
BasicBlock *BB = getRegionNodeBasicBlock(RN);
isl::set &Domain = scop->getOrInitEmptyDomain(BB);
assert(Domain);
// Under the union of all predecessor conditions we can reach this block.
isl::set PredDom = getPredecessorDomainConstraints(BB, Domain);
Domain = Domain.intersect(PredDom).coalesce();
Domain = Domain.align_params(scop->getParamSpace());
Loop *BBLoop = getRegionNodeLoop(RN, LI);
if (BBLoop && BBLoop->getHeader() == BB && scop->contains(BBLoop))
if (!addLoopBoundsToHeaderDomain(BBLoop, InvalidDomainMap))
return false;
}
return true;
}
void ScopBuilder::propagateDomainConstraintsToRegionExit(
BasicBlock *BB, Loop *BBLoop,
SmallPtrSetImpl<BasicBlock *> &FinishedExitBlocks,
DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
// Check if the block @p BB is the entry of a region. If so we propagate it's
// domain to the exit block of the region. Otherwise we are done.
auto *RI = scop->getRegion().getRegionInfo();
auto *BBReg = RI ? RI->getRegionFor(BB) : nullptr;
auto *ExitBB = BBReg ? BBReg->getExit() : nullptr;
if (!BBReg || BBReg->getEntry() != BB || !scop->contains(ExitBB))
return;
// Do not propagate the domain if there is a loop backedge inside the region
// that would prevent the exit block from being executed.
auto *L = BBLoop;
while (L && scop->contains(L)) {
SmallVector<BasicBlock *, 4> LatchBBs;
BBLoop->getLoopLatches(LatchBBs);
for (auto *LatchBB : LatchBBs)
if (BB != LatchBB && BBReg->contains(LatchBB))
return;
L = L->getParentLoop();
}
isl::set Domain = scop->getOrInitEmptyDomain(BB);
assert(Domain && "Cannot propagate a nullptr");
Loop *ExitBBLoop = getFirstNonBoxedLoopFor(ExitBB, LI, scop->getBoxedLoops());
// Since the dimensions of @p BB and @p ExitBB might be different we have to
// adjust the domain before we can propagate it.
isl::set AdjustedDomain = adjustDomainDimensions(Domain, BBLoop, ExitBBLoop);
isl::set &ExitDomain = scop->getOrInitEmptyDomain(ExitBB);
// If the exit domain is not yet created we set it otherwise we "add" the
// current domain.
ExitDomain = ExitDomain ? AdjustedDomain.unite(ExitDomain) : AdjustedDomain;
// Initialize the invalid domain.
InvalidDomainMap[ExitBB] = ExitDomain.empty(ExitDomain.get_space());
FinishedExitBlocks.insert(ExitBB);
}
isl::set ScopBuilder::getPredecessorDomainConstraints(BasicBlock *BB,
isl::set Domain) {
// If @p BB is the ScopEntry we are done
if (scop->getRegion().getEntry() == BB)
return isl::set::universe(Domain.get_space());
// The region info of this function.
auto &RI = *scop->getRegion().getRegionInfo();
Loop *BBLoop = getFirstNonBoxedLoopFor(BB, LI, scop->getBoxedLoops());
// A domain to collect all predecessor domains, thus all conditions under
// which the block is executed. To this end we start with the empty domain.
isl::set PredDom = isl::set::empty(Domain.get_space());
// Set of regions of which the entry block domain has been propagated to BB.
// all predecessors inside any of the regions can be skipped.
SmallSet<Region *, 8> PropagatedRegions;
for (auto *PredBB : predecessors(BB)) {
// Skip backedges.
if (DT.dominates(BB, PredBB))
continue;
// If the predecessor is in a region we used for propagation we can skip it.
auto PredBBInRegion = [PredBB](Region *PR) { return PR->contains(PredBB); };
if (std::any_of(PropagatedRegions.begin(), PropagatedRegions.end(),
PredBBInRegion)) {
continue;
}
// Check if there is a valid region we can use for propagation, thus look
// for a region that contains the predecessor and has @p BB as exit block.
auto *PredR = RI.getRegionFor(PredBB);
while (PredR->getExit() != BB && !PredR->contains(BB))
PredR->getParent();
// If a valid region for propagation was found use the entry of that region
// for propagation, otherwise the PredBB directly.
if (PredR->getExit() == BB) {
PredBB = PredR->getEntry();
PropagatedRegions.insert(PredR);
}
isl::set PredBBDom = scop->getDomainConditions(PredBB);
Loop *PredBBLoop =
getFirstNonBoxedLoopFor(PredBB, LI, scop->getBoxedLoops());
PredBBDom = adjustDomainDimensions(PredBBDom, PredBBLoop, BBLoop);
PredDom = PredDom.unite(PredBBDom);
}
return PredDom;
}
bool ScopBuilder::addLoopBoundsToHeaderDomain(
Loop *L, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
int LoopDepth = scop->getRelativeLoopDepth(L);
assert(LoopDepth >= 0 && "Loop in region should have at least depth one");
BasicBlock *HeaderBB = L->getHeader();
assert(scop->isDomainDefined(HeaderBB));
isl::set &HeaderBBDom = scop->getOrInitEmptyDomain(HeaderBB);
isl::map NextIterationMap =
createNextIterationMap(HeaderBBDom.get_space(), LoopDepth);
isl::set UnionBackedgeCondition = HeaderBBDom.empty(HeaderBBDom.get_space());
SmallVector<BasicBlock *, 4> LatchBlocks;
L->getLoopLatches(LatchBlocks);
for (BasicBlock *LatchBB : LatchBlocks) {
// If the latch is only reachable via error statements we skip it.
if (!scop->isDomainDefined(LatchBB))
continue;
isl::set LatchBBDom = scop->getDomainConditions(LatchBB);
isl::set BackedgeCondition = nullptr;
Instruction *TI = LatchBB->getTerminator();
BranchInst *BI = dyn_cast<BranchInst>(TI);
assert(BI && "Only branch instructions allowed in loop latches");
if (BI->isUnconditional())
BackedgeCondition = LatchBBDom;
else {
SmallVector<isl_set *, 8> ConditionSets;
int idx = BI->getSuccessor(0) != HeaderBB;
if (!buildConditionSets(LatchBB, TI, L, LatchBBDom.get(),
InvalidDomainMap, ConditionSets))
return false;
// Free the non back edge condition set as we do not need it.
isl_set_free(ConditionSets[1 - idx]);
BackedgeCondition = isl::manage(ConditionSets[idx]);
}
int LatchLoopDepth = scop->getRelativeLoopDepth(LI.getLoopFor(LatchBB));
assert(LatchLoopDepth >= LoopDepth);
BackedgeCondition = BackedgeCondition.project_out(
isl::dim::set, LoopDepth + 1, LatchLoopDepth - LoopDepth);
UnionBackedgeCondition = UnionBackedgeCondition.unite(BackedgeCondition);
}
isl::map ForwardMap = ForwardMap.lex_le(HeaderBBDom.get_space());
for (int i = 0; i < LoopDepth; i++)
ForwardMap = ForwardMap.equate(isl::dim::in, i, isl::dim::out, i);
isl::set UnionBackedgeConditionComplement =
UnionBackedgeCondition.complement();
UnionBackedgeConditionComplement =
UnionBackedgeConditionComplement.lower_bound_si(isl::dim::set, LoopDepth,
0);
UnionBackedgeConditionComplement =
UnionBackedgeConditionComplement.apply(ForwardMap);
HeaderBBDom = HeaderBBDom.subtract(UnionBackedgeConditionComplement);
HeaderBBDom = HeaderBBDom.apply(NextIterationMap);
auto Parts = partitionSetParts(HeaderBBDom, LoopDepth);
HeaderBBDom = Parts.second;
// Check if there is a <nsw> tagged AddRec for this loop and if so do not add
// the bounded assumptions to the context as they are already implied by the
// <nsw> tag.
if (scop->hasNSWAddRecForLoop(L))
return true;
isl::set UnboundedCtx = Parts.first.params();
scop->recordAssumption(INFINITELOOP, UnboundedCtx,
HeaderBB->getTerminator()->getDebugLoc(),
AS_RESTRICTION);
return true;
}
void ScopBuilder::buildInvariantEquivalenceClasses() {
DenseMap<std::pair<const SCEV *, Type *>, LoadInst *> EquivClasses;
const InvariantLoadsSetTy &RIL = scop->getRequiredInvariantLoads();
for (LoadInst *LInst : RIL) {
const SCEV *PointerSCEV = SE.getSCEV(LInst->getPointerOperand());
Type *Ty = LInst->getType();
LoadInst *&ClassRep = EquivClasses[std::make_pair(PointerSCEV, Ty)];
if (ClassRep) {
scop->addInvariantLoadMapping(LInst, ClassRep);
continue;
}
ClassRep = LInst;
scop->addInvariantEquivClass(
InvariantEquivClassTy{PointerSCEV, MemoryAccessList(), nullptr, Ty});
}
}
bool ScopBuilder::buildDomains(
Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
bool IsOnlyNonAffineRegion = scop->isNonAffineSubRegion(R);
auto *EntryBB = R->getEntry();
auto *L = IsOnlyNonAffineRegion ? nullptr : LI.getLoopFor(EntryBB);
int LD = scop->getRelativeLoopDepth(L);
auto *S =
isl_set_universe(isl_space_set_alloc(scop->getIslCtx().get(), 0, LD + 1));
InvalidDomainMap[EntryBB] = isl::manage(isl_set_empty(isl_set_get_space(S)));
isl::noexceptions::set Domain = isl::manage(S);
scop->setDomain(EntryBB, Domain);
if (IsOnlyNonAffineRegion)
return !containsErrorBlock(R->getNode(), *R, LI, DT);
if (!buildDomainsWithBranchConstraints(R, InvalidDomainMap))
return false;
if (!propagateDomainConstraints(R, InvalidDomainMap))
return false;
// Error blocks and blocks dominated by them have been assumed to never be
// executed. Representing them in the Scop does not add any value. In fact,
// it is likely to cause issues during construction of the ScopStmts. The
// contents of error blocks have not been verified to be expressible and
// will cause problems when building up a ScopStmt for them.
// Furthermore, basic blocks dominated by error blocks may reference
// instructions in the error block which, if the error block is not modeled,
// can themselves not be constructed properly. To this end we will replace
// the domains of error blocks and those only reachable via error blocks
// with an empty set. Additionally, we will record for each block under which
// parameter combination it would be reached via an error block in its
// InvalidDomain. This information is needed during load hoisting.
if (!propagateInvalidStmtDomains(R, InvalidDomainMap))
return false;
return true;
}
bool ScopBuilder::buildDomainsWithBranchConstraints(
Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
// To create the domain for each block in R we iterate over all blocks and
// subregions in R and propagate the conditions under which the current region
// element is executed. To this end we iterate in reverse post order over R as
// it ensures that we first visit all predecessors of a region node (either a
// basic block or a subregion) before we visit the region node itself.
// Initially, only the domain for the SCoP region entry block is set and from
// there we propagate the current domain to all successors, however we add the
// condition that the successor is actually executed next.
// As we are only interested in non-loop carried constraints here we can
// simply skip loop back edges.
SmallPtrSet<BasicBlock *, 8> FinishedExitBlocks;
ReversePostOrderTraversal<Region *> RTraversal(R);
for (auto *RN : RTraversal) {
// Recurse for affine subregions but go on for basic blocks and non-affine
// subregions.
if (RN->isSubRegion()) {
Region *SubRegion = RN->getNodeAs<Region>();
if (!scop->isNonAffineSubRegion(SubRegion)) {
if (!buildDomainsWithBranchConstraints(SubRegion, InvalidDomainMap))
return false;
continue;
}
}
if (containsErrorBlock(RN, scop->getRegion(), LI, DT))
scop->notifyErrorBlock();
;
BasicBlock *BB = getRegionNodeBasicBlock(RN);
Instruction *TI = BB->getTerminator();
if (isa<UnreachableInst>(TI))
continue;
if (!scop->isDomainDefined(BB))
continue;
isl::set Domain = scop->getDomainConditions(BB);
scop->updateMaxLoopDepth(isl_set_n_dim(Domain.get()));
auto *BBLoop = getRegionNodeLoop(RN, LI);
// Propagate the domain from BB directly to blocks that have a superset
// domain, at the moment only region exit nodes of regions that start in BB.
propagateDomainConstraintsToRegionExit(BB, BBLoop, FinishedExitBlocks,
InvalidDomainMap);
// If all successors of BB have been set a domain through the propagation
// above we do not need to build condition sets but can just skip this
// block. However, it is important to note that this is a local property
// with regards to the region @p R. To this end FinishedExitBlocks is a
// local variable.
auto IsFinishedRegionExit = [&FinishedExitBlocks](BasicBlock *SuccBB) {
return FinishedExitBlocks.count(SuccBB);
};
if (std::all_of(succ_begin(BB), succ_end(BB), IsFinishedRegionExit))
continue;
// Build the condition sets for the successor nodes of the current region
// node. If it is a non-affine subregion we will always execute the single
// exit node, hence the single entry node domain is the condition set. For
// basic blocks we use the helper function buildConditionSets.
SmallVector<isl_set *, 8> ConditionSets;
if (RN->isSubRegion())
ConditionSets.push_back(Domain.copy());
else if (!buildConditionSets(BB, TI, BBLoop, Domain.get(), InvalidDomainMap,
ConditionSets))
return false;
// Now iterate over the successors and set their initial domain based on
// their condition set. We skip back edges here and have to be careful when
// we leave a loop not to keep constraints over a dimension that doesn't
// exist anymore.
assert(RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size());
for (unsigned u = 0, e = ConditionSets.size(); u < e; u++) {
isl::set CondSet = isl::manage(ConditionSets[u]);
BasicBlock *SuccBB = getRegionNodeSuccessor(RN, TI, u);
// Skip blocks outside the region.
if (!scop->contains(SuccBB))
continue;
// If we propagate the domain of some block to "SuccBB" we do not have to
// adjust the domain.
if (FinishedExitBlocks.count(SuccBB))
continue;
// Skip back edges.
if (DT.dominates(SuccBB, BB))
continue;
Loop *SuccBBLoop =
getFirstNonBoxedLoopFor(SuccBB, LI, scop->getBoxedLoops());
CondSet = adjustDomainDimensions(CondSet, BBLoop, SuccBBLoop);
// Set the domain for the successor or merge it with an existing domain in
// case there are multiple paths (without loop back edges) to the
// successor block.
isl::set &SuccDomain = scop->getOrInitEmptyDomain(SuccBB);
if (SuccDomain) {
SuccDomain = SuccDomain.unite(CondSet).coalesce();
} else {
// Initialize the invalid domain.
InvalidDomainMap[SuccBB] = CondSet.empty(CondSet.get_space());
SuccDomain = CondSet;
}
SuccDomain = SuccDomain.detect_equalities();
// Check if the maximal number of domain disjunctions was reached.
// In case this happens we will clean up and bail.
if (SuccDomain.n_basic_set() < MaxDisjunctsInDomain)
continue;
scop->invalidate(COMPLEXITY, DebugLoc());
while (++u < ConditionSets.size())
isl_set_free(ConditionSets[u]);
return false;
}
}
return true;
}
bool ScopBuilder::propagateInvalidStmtDomains(
Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
ReversePostOrderTraversal<Region *> RTraversal(R);
for (auto *RN : RTraversal) {
// Recurse for affine subregions but go on for basic blocks and non-affine
// subregions.
if (RN->isSubRegion()) {
Region *SubRegion = RN->getNodeAs<Region>();
if (!scop->isNonAffineSubRegion(SubRegion)) {
propagateInvalidStmtDomains(SubRegion, InvalidDomainMap);
continue;
}
}
bool ContainsErrorBlock = containsErrorBlock(RN, scop->getRegion(), LI, DT);
BasicBlock *BB = getRegionNodeBasicBlock(RN);
isl::set &Domain = scop->getOrInitEmptyDomain(BB);
assert(Domain && "Cannot propagate a nullptr");
isl::set InvalidDomain = InvalidDomainMap[BB];
bool IsInvalidBlock = ContainsErrorBlock || Domain.is_subset(InvalidDomain);
if (!IsInvalidBlock) {
InvalidDomain = InvalidDomain.intersect(Domain);
} else {
InvalidDomain = Domain;
isl::set DomPar = Domain.params();
scop->recordAssumption(ERRORBLOCK, DomPar,
BB->getTerminator()->getDebugLoc(),
AS_RESTRICTION);
Domain = isl::set::empty(Domain.get_space());
}
if (InvalidDomain.is_empty()) {
InvalidDomainMap[BB] = InvalidDomain;
continue;
}
auto *BBLoop = getRegionNodeLoop(RN, LI);
auto *TI = BB->getTerminator();
unsigned NumSuccs = RN->isSubRegion() ? 1 : TI->getNumSuccessors();
for (unsigned u = 0; u < NumSuccs; u++) {
auto *SuccBB = getRegionNodeSuccessor(RN, TI, u);
// Skip successors outside the SCoP.
if (!scop->contains(SuccBB))
continue;
// Skip backedges.
if (DT.dominates(SuccBB, BB))
continue;
Loop *SuccBBLoop =
getFirstNonBoxedLoopFor(SuccBB, LI, scop->getBoxedLoops());
auto AdjustedInvalidDomain =
adjustDomainDimensions(InvalidDomain, BBLoop, SuccBBLoop);
isl::set SuccInvalidDomain = InvalidDomainMap[SuccBB];
SuccInvalidDomain = SuccInvalidDomain.unite(AdjustedInvalidDomain);
SuccInvalidDomain = SuccInvalidDomain.coalesce();
InvalidDomainMap[SuccBB] = SuccInvalidDomain;
// Check if the maximal number of domain disjunctions was reached.
// In case this happens we will bail.
if (SuccInvalidDomain.n_basic_set() < MaxDisjunctsInDomain)
continue;
InvalidDomainMap.erase(BB);
scop->invalidate(COMPLEXITY, TI->getDebugLoc(), TI->getParent());
return false;
}
InvalidDomainMap[BB] = InvalidDomain;
}
return true;
}
void ScopBuilder::buildPHIAccesses(ScopStmt *PHIStmt, PHINode *PHI,
Region *NonAffineSubRegion,
bool IsExitBlock) {
// PHI nodes that are in the exit block of the region, hence if IsExitBlock is
// true, are not modeled as ordinary PHI nodes as they are not part of the
// region. However, we model the operands in the predecessor blocks that are
// part of the region as regular scalar accesses.
// If we can synthesize a PHI we can skip it, however only if it is in
// the region. If it is not it can only be in the exit block of the region.
// In this case we model the operands but not the PHI itself.
auto *Scope = LI.getLoopFor(PHI->getParent());
if (!IsExitBlock && canSynthesize(PHI, *scop, &SE, Scope))
return;
// PHI nodes are modeled as if they had been demoted prior to the SCoP
// detection. Hence, the PHI is a load of a new memory location in which the
// incoming value was written at the end of the incoming basic block.
bool OnlyNonAffineSubRegionOperands = true;
for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
Value *Op = PHI->getIncomingValue(u);
BasicBlock *OpBB = PHI->getIncomingBlock(u);
ScopStmt *OpStmt = scop->getIncomingStmtFor(PHI->getOperandUse(u));
// Do not build PHI dependences inside a non-affine subregion, but make
// sure that the necessary scalar values are still made available.
if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB)) {
auto *OpInst = dyn_cast<Instruction>(Op);
if (!OpInst || !NonAffineSubRegion->contains(OpInst))
ensureValueRead(Op, OpStmt);
continue;
}
OnlyNonAffineSubRegionOperands = false;
ensurePHIWrite(PHI, OpStmt, OpBB, Op, IsExitBlock);
}
if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
addPHIReadAccess(PHIStmt, PHI);
}
}
void ScopBuilder::buildScalarDependences(ScopStmt *UserStmt,
Instruction *Inst) {
assert(!isa<PHINode>(Inst));
// Pull-in required operands.
for (Use &Op : Inst->operands())
ensureValueRead(Op.get(), UserStmt);
}
// Create a sequence of two schedules. Either argument may be null and is
// interpreted as the empty schedule. Can also return null if both schedules are
// empty.
static isl::schedule combineInSequence(isl::schedule Prev, isl::schedule Succ) {
if (!Prev)
return Succ;
if (!Succ)
return Prev;
return Prev.sequence(Succ);
}
// Create an isl_multi_union_aff that defines an identity mapping from the
// elements of USet to their N-th dimension.
//
// # Example:
//
// Domain: { A[i,j]; B[i,j,k] }
// N: 1
//
// Resulting Mapping: { {A[i,j] -> [(j)]; B[i,j,k] -> [(j)] }
//
// @param USet A union set describing the elements for which to generate a
// mapping.
// @param N The dimension to map to.
// @returns A mapping from USet to its N-th dimension.
static isl::multi_union_pw_aff mapToDimension(isl::union_set USet, int N) {
assert(N >= 0);
assert(USet);
assert(!USet.is_empty());
auto Result = isl::union_pw_multi_aff::empty(USet.get_space());
for (isl::set S : USet.get_set_list()) {
int Dim = S.dim(isl::dim::set);
auto PMA = isl::pw_multi_aff::project_out_map(S.get_space(), isl::dim::set,
N, Dim - N);
if (N > 1)
PMA = PMA.drop_dims(isl::dim::out, 0, N - 1);
Result = Result.add_pw_multi_aff(PMA);
}
return isl::multi_union_pw_aff(isl::union_pw_multi_aff(Result));
}
void ScopBuilder::buildSchedule() {
Loop *L = getLoopSurroundingScop(*scop, LI);
LoopStackTy LoopStack({LoopStackElementTy(L, nullptr, 0)});
buildSchedule(scop->getRegion().getNode(), LoopStack);
assert(LoopStack.size() == 1 && LoopStack.back().L == L);
scop->setScheduleTree(LoopStack[0].Schedule);
}
/// To generate a schedule for the elements in a Region we traverse the Region
/// in reverse-post-order and add the contained RegionNodes in traversal order
/// to the schedule of the loop that is currently at the top of the LoopStack.
/// For loop-free codes, this results in a correct sequential ordering.
///
/// Example:
/// bb1(0)
/// / \.
/// bb2(1) bb3(2)
/// \ / \.
/// bb4(3) bb5(4)
/// \ /
/// bb6(5)
///
/// Including loops requires additional processing. Whenever a loop header is
/// encountered, the corresponding loop is added to the @p LoopStack. Starting
/// from an empty schedule, we first process all RegionNodes that are within
/// this loop and complete the sequential schedule at this loop-level before
/// processing about any other nodes. To implement this
/// loop-nodes-first-processing, the reverse post-order traversal is
/// insufficient. Hence, we additionally check if the traversal yields
/// sub-regions or blocks that are outside the last loop on the @p LoopStack.
/// These region-nodes are then queue and only traverse after the all nodes
/// within the current loop have been processed.
void ScopBuilder::buildSchedule(Region *R, LoopStackTy &LoopStack) {
Loop *OuterScopLoop = getLoopSurroundingScop(*scop, LI);
ReversePostOrderTraversal<Region *> RTraversal(R);
std::deque<RegionNode *> WorkList(RTraversal.begin(), RTraversal.end());
std::deque<RegionNode *> DelayList;
bool LastRNWaiting = false;
// Iterate over the region @p R in reverse post-order but queue
// sub-regions/blocks iff they are not part of the last encountered but not
// completely traversed loop. The variable LastRNWaiting is a flag to indicate
// that we queued the last sub-region/block from the reverse post-order
// iterator. If it is set we have to explore the next sub-region/block from
// the iterator (if any) to guarantee progress. If it is not set we first try
// the next queued sub-region/blocks.
while (!WorkList.empty() || !DelayList.empty()) {
RegionNode *RN;
if ((LastRNWaiting && !WorkList.empty()) || DelayList.empty()) {
RN = WorkList.front();
WorkList.pop_front();
LastRNWaiting = false;
} else {
RN = DelayList.front();
DelayList.pop_front();
}
Loop *L = getRegionNodeLoop(RN, LI);
if (!scop->contains(L))
L = OuterScopLoop;
Loop *LastLoop = LoopStack.back().L;
if (LastLoop != L) {
if (LastLoop && !LastLoop->contains(L)) {
LastRNWaiting = true;
DelayList.push_back(RN);
continue;
}
LoopStack.push_back({L, nullptr, 0});
}
buildSchedule(RN, LoopStack);
}
}
void ScopBuilder::buildSchedule(RegionNode *RN, LoopStackTy &LoopStack) {
if (RN->isSubRegion()) {
auto *LocalRegion = RN->getNodeAs<Region>();
if (!scop->isNonAffineSubRegion(LocalRegion)) {
buildSchedule(LocalRegion, LoopStack);
return;
}
}
assert(LoopStack.rbegin() != LoopStack.rend());
auto LoopData = LoopStack.rbegin();
LoopData->NumBlocksProcessed += getNumBlocksInRegionNode(RN);
for (auto *Stmt : scop->getStmtListFor(RN)) {
isl::union_set UDomain{Stmt->getDomain()};
auto StmtSchedule = isl::schedule::from_domain(UDomain);
LoopData->Schedule = combineInSequence(LoopData->Schedule, StmtSchedule);
}
// Check if we just processed the last node in this loop. If we did, finalize
// the loop by:
//
// - adding new schedule dimensions
// - folding the resulting schedule into the parent loop schedule
// - dropping the loop schedule from the LoopStack.
//
// Then continue to check surrounding loops, which might also have been
// completed by this node.
size_t Dimension = LoopStack.size();
while (LoopData->L &&
LoopData->NumBlocksProcessed == getNumBlocksInLoop(LoopData->L)) {
isl::schedule Schedule = LoopData->Schedule;
auto NumBlocksProcessed = LoopData->NumBlocksProcessed;
assert(std::next(LoopData) != LoopStack.rend());
++LoopData;
--Dimension;
if (Schedule) {
isl::union_set Domain = Schedule.get_domain();
isl::multi_union_pw_aff MUPA = mapToDimension(Domain, Dimension);
Schedule = Schedule.insert_partial_schedule(MUPA);
LoopData->Schedule = combineInSequence(LoopData->Schedule, Schedule);
}
LoopData->NumBlocksProcessed += NumBlocksProcessed;
}
// Now pop all loops processed up there from the LoopStack
LoopStack.erase(LoopStack.begin() + Dimension, LoopStack.end());
}
void ScopBuilder::buildEscapingDependences(Instruction *Inst) {
// Check for uses of this instruction outside the scop. Because we do not
// iterate over such instructions and therefore did not "ensure" the existence
// of a write, we must determine such use here.
if (scop->isEscaping(Inst))
ensureValueWrite(Inst);
}
/// Check that a value is a Fortran Array descriptor.
///
/// We check if V has the following structure:
/// %"struct.array1_real(kind=8)" = type { i8*, i<zz>, i<zz>,
/// [<num> x %struct.descriptor_dimension] }
///
///
/// %struct.descriptor_dimension = type { i<zz>, i<zz>, i<zz> }
///
/// 1. V's type name starts with "struct.array"
/// 2. V's type has layout as shown.
/// 3. Final member of V's type has name "struct.descriptor_dimension",
/// 4. "struct.descriptor_dimension" has layout as shown.
/// 5. Consistent use of i<zz> where <zz> is some fixed integer number.
///
/// We are interested in such types since this is the code that dragonegg
/// generates for Fortran array descriptors.
///
/// @param V the Value to be checked.
///
/// @returns True if V is a Fortran array descriptor, False otherwise.
bool isFortranArrayDescriptor(Value *V) {
PointerType *PTy = dyn_cast<PointerType>(V->getType());
if (!PTy)
return false;
Type *Ty = PTy->getElementType();
assert(Ty && "Ty expected to be initialized");
auto *StructArrTy = dyn_cast<StructType>(Ty);
if (!(StructArrTy && StructArrTy->hasName()))
return false;
if (!StructArrTy->getName().startswith("struct.array"))
return false;
if (StructArrTy->getNumElements() != 4)
return false;
const ArrayRef<Type *> ArrMemberTys = StructArrTy->elements();
// i8* match
if (ArrMemberTys[0] != Type::getInt8PtrTy(V->getContext()))
return false;
// Get a reference to the int type and check that all the members
// share the same int type
Type *IntTy = ArrMemberTys[1];
if (ArrMemberTys[2] != IntTy)
return false;
// type: [<num> x %struct.descriptor_dimension]
ArrayType *DescriptorDimArrayTy = dyn_cast<ArrayType>(ArrMemberTys[3]);
if (!DescriptorDimArrayTy)
return false;
// type: %struct.descriptor_dimension := type { ixx, ixx, ixx }
StructType *DescriptorDimTy =
dyn_cast<StructType>(DescriptorDimArrayTy->getElementType());
if (!(DescriptorDimTy && DescriptorDimTy->hasName()))
return false;
if (DescriptorDimTy->getName() != "struct.descriptor_dimension")
return false;
if (DescriptorDimTy->getNumElements() != 3)
return false;
for (auto MemberTy : DescriptorDimTy->elements()) {
if (MemberTy != IntTy)
return false;
}
return true;
}
Value *ScopBuilder::findFADAllocationVisible(MemAccInst Inst) {
// match: 4.1 & 4.2 store/load
if (!isa<LoadInst>(Inst) && !isa<StoreInst>(Inst))
return nullptr;
// match: 4
if (Inst.getAlignment() != 8)
return nullptr;
Value *Address = Inst.getPointerOperand();
const BitCastInst *Bitcast = nullptr;
// [match: 3]
if (auto *Slot = dyn_cast<GetElementPtrInst>(Address)) {
Value *TypedMem = Slot->getPointerOperand();
// match: 2
Bitcast = dyn_cast<BitCastInst>(TypedMem);
} else {
// match: 2
Bitcast = dyn_cast<BitCastInst>(Address);
}
if (!Bitcast)
return nullptr;
auto *MallocMem = Bitcast->getOperand(0);
// match: 1
auto *MallocCall = dyn_cast<CallInst>(MallocMem);
if (!MallocCall)
return nullptr;
Function *MallocFn = MallocCall->getCalledFunction();
if (!(MallocFn && MallocFn->hasName() && MallocFn->getName() == "malloc"))
return nullptr;
// Find all uses the malloc'd memory.
// We are looking for a "store" into a struct with the type being the Fortran
// descriptor type
for (auto user : MallocMem->users()) {
/// match: 5
auto *MallocStore = dyn_cast<StoreInst>(user);
if (!MallocStore)
continue;
auto *DescriptorGEP =
dyn_cast<GEPOperator>(MallocStore->getPointerOperand());
if (!DescriptorGEP)
continue;
// match: 5
auto DescriptorType =
dyn_cast<StructType>(DescriptorGEP->getSourceElementType());
if (!(DescriptorType && DescriptorType->hasName()))
continue;
Value *Descriptor = dyn_cast<Value>(DescriptorGEP->getPointerOperand());
if (!Descriptor)
continue;
if (!isFortranArrayDescriptor(Descriptor))
continue;
return Descriptor;
}
return nullptr;
}
Value *ScopBuilder::findFADAllocationInvisible(MemAccInst Inst) {
// match: 3
if (!isa<LoadInst>(Inst) && !isa<StoreInst>(Inst))
return nullptr;
Value *Slot = Inst.getPointerOperand();
LoadInst *MemLoad = nullptr;
// [match: 2]
if (auto *SlotGEP = dyn_cast<GetElementPtrInst>(Slot)) {
// match: 1
MemLoad = dyn_cast<LoadInst>(SlotGEP->getPointerOperand());
} else {
// match: 1
MemLoad = dyn_cast<LoadInst>(Slot);
}
if (!MemLoad)
return nullptr;
auto *BitcastOperator =
dyn_cast<BitCastOperator>(MemLoad->getPointerOperand());
if (!BitcastOperator)
return nullptr;
Value *Descriptor = dyn_cast<Value>(BitcastOperator->getOperand(0));
if (!Descriptor)
return nullptr;
if (!isFortranArrayDescriptor(Descriptor))
return nullptr;
return Descriptor;
}
void ScopBuilder::addRecordedAssumptions() {
for (auto &AS : llvm::reverse(scop->recorded_assumptions())) {
if (!AS.BB) {
scop->addAssumption(AS.Kind, AS.Set, AS.Loc, AS.Sign,
nullptr /* BasicBlock */);
continue;
}
// If the domain was deleted the assumptions are void.
isl_set *Dom = scop->getDomainConditions(AS.BB).release();
if (!Dom)
continue;
// If a basic block was given use its domain to simplify the assumption.
// In case of restrictions we know they only have to hold on the domain,
// thus we can intersect them with the domain of the block. However, for
// assumptions the domain has to imply them, thus:
// _ _____
// Dom => S <==> A v B <==> A - B
//
// To avoid the complement we will register A - B as a restriction not an
// assumption.
isl_set *S = AS.Set.copy();
if (AS.Sign == AS_RESTRICTION)
S = isl_set_params(isl_set_intersect(S, Dom));
else /* (AS.Sign == AS_ASSUMPTION) */
S = isl_set_params(isl_set_subtract(Dom, S));
scop->addAssumption(AS.Kind, isl::manage(S), AS.Loc, AS_RESTRICTION, AS.BB);
}
scop->clearRecordedAssumptions();
}
void ScopBuilder::addUserAssumptions(
AssumptionCache &AC, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
for (auto &Assumption : AC.assumptions()) {
auto *CI = dyn_cast_or_null<CallInst>(Assumption);
if (!CI || CI->getNumArgOperands() != 1)
continue;
bool InScop = scop->contains(CI);
if (!InScop && !scop->isDominatedBy(DT, CI->getParent()))
continue;
auto *L = LI.getLoopFor(CI->getParent());
auto *Val = CI->getArgOperand(0);
ParameterSetTy DetectedParams;
auto &R = scop->getRegion();
if (!isAffineConstraint(Val, &R, L, SE, DetectedParams)) {
ORE.emit(
OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreUserAssumption", CI)
<< "Non-affine user assumption ignored.");
continue;
}
// Collect all newly introduced parameters.
ParameterSetTy NewParams;
for (auto *Param : DetectedParams) {
Param = extractConstantFactor(Param, SE).second;
Param = scop->getRepresentingInvariantLoadSCEV(Param);
if (scop->isParam(Param))
continue;
NewParams.insert(Param);
}
SmallVector<isl_set *, 2> ConditionSets;
auto *TI = InScop ? CI->getParent()->getTerminator() : nullptr;
BasicBlock *BB = InScop ? CI->getParent() : R.getEntry();
auto *Dom = InScop ? isl_set_copy(scop->getDomainConditions(BB).get())
: isl_set_copy(scop->getContext().get());
assert(Dom && "Cannot propagate a nullptr.");
bool Valid = buildConditionSets(BB, Val, TI, L, Dom, InvalidDomainMap,
ConditionSets);
isl_set_free(Dom);
if (!Valid)
continue;
isl_set *AssumptionCtx = nullptr;
if (InScop) {
AssumptionCtx = isl_set_complement(isl_set_params(ConditionSets[1]));
isl_set_free(ConditionSets[0]);
} else {
AssumptionCtx = isl_set_complement(ConditionSets[1]);
AssumptionCtx = isl_set_intersect(AssumptionCtx, ConditionSets[0]);
}
// Project out newly introduced parameters as they are not otherwise useful.
if (!NewParams.empty()) {
for (unsigned u = 0; u < isl_set_n_param(AssumptionCtx); u++) {
auto *Id = isl_set_get_dim_id(AssumptionCtx, isl_dim_param, u);
auto *Param = static_cast<const SCEV *>(isl_id_get_user(Id));
isl_id_free(Id);
if (!NewParams.count(Param))
continue;
AssumptionCtx =
isl_set_project_out(AssumptionCtx, isl_dim_param, u--, 1);
}
}
ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "UserAssumption", CI)
<< "Use user assumption: " << stringFromIslObj(AssumptionCtx));
isl::set newContext =
scop->getContext().intersect(isl::manage(AssumptionCtx));
scop->setContext(newContext);
}
}
bool ScopBuilder::buildAccessMultiDimFixed(MemAccInst Inst, ScopStmt *Stmt) {
Value *Val = Inst.getValueOperand();
Type *ElementType = Val->getType();
Value *Address = Inst.getPointerOperand();
const SCEV *AccessFunction =
SE.getSCEVAtScope(Address, LI.getLoopFor(Inst->getParent()));
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
enum MemoryAccess::AccessType AccType =
isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
auto *Src = BitCast->getOperand(0);
auto *SrcTy = Src->getType();
auto *DstTy = BitCast->getType();
// Do not try to delinearize non-sized (opaque) pointers.
if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
(DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
return false;
}
if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
DL.getTypeAllocSize(SrcTy->getPointerElementType()) ==
DL.getTypeAllocSize(DstTy->getPointerElementType()))
Address = Src;
}
auto *GEP = dyn_cast<GetElementPtrInst>(Address);
if (!GEP)
return false;
std::vector<const SCEV *> Subscripts;
std::vector<int> Sizes;
std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
auto *BasePtr = GEP->getOperand(0);
if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
BasePtr = BasePtrCast->getOperand(0);
// Check for identical base pointers to ensure that we do not miss index
// offsets that have been added before this GEP is applied.
if (BasePtr != BasePointer->getValue())
return false;
std::vector<const SCEV *> SizesSCEV;
const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
Loop *SurroundingLoop = Stmt->getSurroundingLoop();
for (auto *Subscript : Subscripts) {
InvariantLoadsSetTy AccessILS;
if (!isAffineExpr(&scop->getRegion(), SurroundingLoop, Subscript, SE,
&AccessILS))
return false;
for (LoadInst *LInst : AccessILS)
if (!ScopRIL.count(LInst))
return false;
}
if (Sizes.empty())
return false;
SizesSCEV.push_back(nullptr);
for (auto V : Sizes)
SizesSCEV.push_back(SE.getSCEV(
ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
addArrayAccess(Stmt, Inst, AccType, BasePointer->getValue(), ElementType,
true, Subscripts, SizesSCEV, Val);
return true;
}
bool ScopBuilder::buildAccessMultiDimParam(MemAccInst Inst, ScopStmt *Stmt) {
if (!PollyDelinearize)
return false;
Value *Address = Inst.getPointerOperand();
Value *Val = Inst.getValueOperand();
Type *ElementType = Val->getType();
unsigned ElementSize = DL.getTypeAllocSize(ElementType);
enum MemoryAccess::AccessType AccType =
isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
const SCEV *AccessFunction =
SE.getSCEVAtScope(Address, LI.getLoopFor(Inst->getParent()));
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
assert(BasePointer && "Could not find base pointer");
auto &InsnToMemAcc = scop->getInsnToMemAccMap();
auto AccItr = InsnToMemAcc.find(Inst);
if (AccItr == InsnToMemAcc.end())
return false;
std::vector<const SCEV *> Sizes = {nullptr};
Sizes.insert(Sizes.end(), AccItr->second.Shape->DelinearizedSizes.begin(),
AccItr->second.Shape->DelinearizedSizes.end());
// In case only the element size is contained in the 'Sizes' array, the
// access does not access a real multi-dimensional array. Hence, we allow
// the normal single-dimensional access construction to handle this.
if (Sizes.size() == 1)
return false;
// Remove the element size. This information is already provided by the
// ElementSize parameter. In case the element size of this access and the
// element size used for delinearization differs the delinearization is
// incorrect. Hence, we invalidate the scop.
//
// TODO: Handle delinearization with differing element sizes.
auto DelinearizedSize =
cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
Sizes.pop_back();
if (ElementSize != DelinearizedSize)
scop->invalidate(DELINEARIZATION, Inst->getDebugLoc(), Inst->getParent());
addArrayAccess(Stmt, Inst, AccType, BasePointer->getValue(), ElementType,
true, AccItr->second.DelinearizedSubscripts, Sizes, Val);
return true;
}
bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, ScopStmt *Stmt) {
auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
if (MemIntr == nullptr)
return false;
auto *L = LI.getLoopFor(Inst->getParent());
auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
assert(LengthVal);
// Check if the length val is actually affine or if we overapproximate it
InvariantLoadsSetTy AccessILS;
const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
Loop *SurroundingLoop = Stmt->getSurroundingLoop();
bool LengthIsAffine = isAffineExpr(&scop->getRegion(), SurroundingLoop,
LengthVal, SE, &AccessILS);
for (LoadInst *LInst : AccessILS)
if (!ScopRIL.count(LInst))
LengthIsAffine = false;
if (!LengthIsAffine)
LengthVal = nullptr;
auto *DestPtrVal = MemIntr->getDest();
assert(DestPtrVal);
auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
assert(DestAccFunc);
// Ignore accesses to "NULL".
// TODO: We could use this to optimize the region further, e.g., intersect
// the context with
// isl_set_complement(isl_set_params(getDomain()))
// as we know it would be undefined to execute this instruction anyway.
if (DestAccFunc->isZero())
return true;
auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(DestAccFunc));
assert(DestPtrSCEV);
DestAccFunc = SE.getMinusSCEV(DestAccFunc, DestPtrSCEV);
addArrayAccess(Stmt, Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
IntegerType::getInt8Ty(DestPtrVal->getContext()),
LengthIsAffine, {DestAccFunc, LengthVal}, {nullptr},
Inst.getValueOperand());
auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
if (!MemTrans)
return true;
auto *SrcPtrVal = MemTrans->getSource();
assert(SrcPtrVal);
auto *SrcAccFunc = SE.getSCEVAtScope(SrcPtrVal, L);
assert(SrcAccFunc);
// Ignore accesses to "NULL".
// TODO: See above TODO
if (SrcAccFunc->isZero())
return true;
auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(SrcAccFunc));
assert(SrcPtrSCEV);
SrcAccFunc = SE.getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
addArrayAccess(Stmt, Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
IntegerType::getInt8Ty(SrcPtrVal->getContext()),
LengthIsAffine, {SrcAccFunc, LengthVal}, {nullptr},
Inst.getValueOperand());
return true;
}
bool ScopBuilder::buildAccessCallInst(MemAccInst Inst, ScopStmt *Stmt) {
auto *CI = dyn_cast_or_null<CallInst>(Inst);
if (CI == nullptr)
return false;
if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI) || isDebugCall(CI))
return true;
bool ReadOnly = false;
auto *AF = SE.getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
auto *CalledFunction = CI->getCalledFunction();
switch (AA.getModRefBehavior(CalledFunction)) {
case FMRB_UnknownModRefBehavior:
llvm_unreachable("Unknown mod ref behaviour cannot be represented.");
case FMRB_DoesNotAccessMemory:
return true;
case FMRB_DoesNotReadMemory:
case FMRB_OnlyAccessesInaccessibleMem:
case FMRB_OnlyAccessesInaccessibleOrArgMem:
return false;
case FMRB_OnlyReadsMemory:
GlobalReads.emplace_back(Stmt, CI);
return true;
case FMRB_OnlyReadsArgumentPointees:
ReadOnly = true;
LLVM_FALLTHROUGH;
case FMRB_OnlyAccessesArgumentPointees: {
auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
Loop *L = LI.getLoopFor(Inst->getParent());
for (const auto &Arg : CI->arg_operands()) {
if (!Arg->getType()->isPointerTy())
continue;
auto *ArgSCEV = SE.getSCEVAtScope(Arg, L);
if (ArgSCEV->isZero())
continue;
auto *ArgBasePtr = cast<SCEVUnknown>(SE.getPointerBase(ArgSCEV));
addArrayAccess(Stmt, Inst, AccType, ArgBasePtr->getValue(),
ArgBasePtr->getType(), false, {AF}, {nullptr}, CI);
}
return true;
}
}
return true;
}
void ScopBuilder::buildAccessSingleDim(MemAccInst Inst, ScopStmt *Stmt) {
Value *Address = Inst.getPointerOperand();
Value *Val = Inst.getValueOperand();
Type *ElementType = Val->getType();
enum MemoryAccess::AccessType AccType =
isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
const SCEV *AccessFunction =
SE.getSCEVAtScope(Address, LI.getLoopFor(Inst->getParent()));
const SCEVUnknown *BasePointer =
dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
assert(BasePointer && "Could not find base pointer");
AccessFunction = SE.getMinusSCEV(AccessFunction, BasePointer);
// Check if the access depends on a loop contained in a non-affine subregion.
bool isVariantInNonAffineLoop = false;
SetVector<const Loop *> Loops;
findLoops(AccessFunction, Loops);
for (const Loop *L : Loops)
if (Stmt->contains(L)) {
isVariantInNonAffineLoop = true;
break;
}
InvariantLoadsSetTy AccessILS;
Loop *SurroundingLoop = Stmt->getSurroundingLoop();
bool IsAffine = !isVariantInNonAffineLoop &&
isAffineExpr(&scop->getRegion(), SurroundingLoop,
AccessFunction, SE, &AccessILS);
const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
for (LoadInst *LInst : AccessILS)
if (!ScopRIL.count(LInst))
IsAffine = false;
if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
AccType = MemoryAccess::MAY_WRITE;
addArrayAccess(Stmt, Inst, AccType, BasePointer->getValue(), ElementType,
IsAffine, {AccessFunction}, {nullptr}, Val);
}
void ScopBuilder::buildMemoryAccess(MemAccInst Inst, ScopStmt *Stmt) {
if (buildAccessMemIntrinsic(Inst, Stmt))
return;
if (buildAccessCallInst(Inst, Stmt))
return;
if (buildAccessMultiDimFixed(Inst, Stmt))
return;
if (buildAccessMultiDimParam(Inst, Stmt))
return;
buildAccessSingleDim(Inst, Stmt);
}
void ScopBuilder::buildAccessFunctions() {
for (auto &Stmt : *scop) {
if (Stmt.isBlockStmt()) {
buildAccessFunctions(&Stmt, *Stmt.getBasicBlock());
continue;
}
Region *R = Stmt.getRegion();
for (BasicBlock *BB : R->blocks())
buildAccessFunctions(&Stmt, *BB, R);
}
// Build write accesses for values that are used after the SCoP.
// The instructions defining them might be synthesizable and therefore not
// contained in any statement, hence we iterate over the original instructions
// to identify all escaping values.
for (BasicBlock *BB : scop->getRegion().blocks()) {
for (Instruction &Inst : *BB)
buildEscapingDependences(&Inst);
}
}
bool ScopBuilder::shouldModelInst(Instruction *Inst, Loop *L) {
return !Inst->isTerminator() && !isIgnoredIntrinsic(Inst) &&
!canSynthesize(Inst, *scop, &SE, L);
}
/// Generate a name for a statement.
///
/// @param BB The basic block the statement will represent.
/// @param BBIdx The index of the @p BB relative to other BBs/regions.
/// @param Count The index of the created statement in @p BB.
/// @param IsMain Whether this is the main of all statement for @p BB. If true,
/// no suffix will be added.
/// @param IsLast Uses a special indicator for the last statement of a BB.
static std::string makeStmtName(BasicBlock *BB, long BBIdx, int Count,
bool IsMain, bool IsLast = false) {
std::string Suffix;
if (!IsMain) {
if (UseInstructionNames)
Suffix = '_';
if (IsLast)
Suffix += "last";
else if (Count < 26)
Suffix += 'a' + Count;
else
Suffix += std::to_string(Count);
}
return getIslCompatibleName("Stmt", BB, BBIdx, Suffix, UseInstructionNames);
}
/// Generate a name for a statement that represents a non-affine subregion.
///
/// @param R The region the statement will represent.
/// @param RIdx The index of the @p R relative to other BBs/regions.
static std::string makeStmtName(Region *R, long RIdx) {
return getIslCompatibleName("Stmt", R->getNameStr(), RIdx, "",
UseInstructionNames);
}
void ScopBuilder::buildSequentialBlockStmts(BasicBlock *BB, bool SplitOnStore) {
Loop *SurroundingLoop = LI.getLoopFor(BB);
int Count = 0;
long BBIdx = scop->getNextStmtIdx();
std::vector<Instruction *> Instructions;
for (Instruction &Inst : *BB) {
if (shouldModelInst(&Inst, SurroundingLoop))
Instructions.push_back(&Inst);
if (Inst.getMetadata("polly_split_after") ||
(SplitOnStore && isa<StoreInst>(Inst))) {
std::string Name = makeStmtName(BB, BBIdx, Count, Count == 0);
scop->addScopStmt(BB, Name, SurroundingLoop, Instructions);
Count++;
Instructions.clear();
}
}
std::string Name = makeStmtName(BB, BBIdx, Count, Count == 0);
scop->addScopStmt(BB, Name, SurroundingLoop, Instructions);
}
/// Is @p Inst an ordered instruction?
///
/// An unordered instruction is an instruction, such that a sequence of
/// unordered instructions can be permuted without changing semantics. Any
/// instruction for which this is not always the case is ordered.
static bool isOrderedInstruction(Instruction *Inst) {
return Inst->mayHaveSideEffects() || Inst->mayReadOrWriteMemory();
}
/// Join instructions to the same statement if one uses the scalar result of the
/// other.
static void joinOperandTree(EquivalenceClasses<Instruction *> &UnionFind,
ArrayRef<Instruction *> ModeledInsts) {
for (Instruction *Inst : ModeledInsts) {
if (isa<PHINode>(Inst))
continue;
for (Use &Op : Inst->operands()) {
Instruction *OpInst = dyn_cast<Instruction>(Op.get());
if (!OpInst)
continue;
// Check if OpInst is in the BB and is a modeled instruction.
auto OpVal = UnionFind.findValue(OpInst);
if (OpVal == UnionFind.end())
continue;
UnionFind.unionSets(Inst, OpInst);
}
}
}
/// Ensure that the order of ordered instructions does not change.
///
/// If we encounter an ordered instruction enclosed in instructions belonging to
/// a different statement (which might as well contain ordered instructions, but
/// this is not tested here), join them.
static void
joinOrderedInstructions(EquivalenceClasses<Instruction *> &UnionFind,
ArrayRef<Instruction *> ModeledInsts) {
SetVector<Instruction *> SeenLeaders;
for (Instruction *Inst : ModeledInsts) {
if (!isOrderedInstruction(Inst))
continue;
Instruction *Leader = UnionFind.getLeaderValue(Inst);
// Since previous iterations might have merged sets, some items in
// SeenLeaders are not leaders anymore. However, The new leader of
// previously merged instructions must be one of the former leaders of
// these merged instructions.
bool Inserted = SeenLeaders.insert(Leader);
if (Inserted)
continue;
// Merge statements to close holes. Say, we have already seen statements A
// and B, in this order. Then we see an instruction of A again and we would
// see the pattern "A B A". This function joins all statements until the
// only seen occurrence of A.
for (Instruction *Prev : reverse(SeenLeaders)) {
// We are backtracking from the last element until we see Inst's leader
// in SeenLeaders and merge all into one set. Although leaders of
// instructions change during the execution of this loop, it's irrelevant
// as we are just searching for the element that we already confirmed is
// in the list.
if (Prev == Leader)
break;
UnionFind.unionSets(Prev, Leader);
}
}
}
/// If the BasicBlock has an edge from itself, ensure that the PHI WRITEs for
/// the incoming values from this block are executed after the PHI READ.
///
/// Otherwise it could overwrite the incoming value from before the BB with the
/// value for the next execution. This can happen if the PHI WRITE is added to
/// the statement with the instruction that defines the incoming value (instead
/// of the last statement of the same BB). To ensure that the PHI READ and WRITE
/// are in order, we put both into the statement. PHI WRITEs are always executed
/// after PHI READs when they are in the same statement.
///
/// TODO: This is an overpessimization. We only have to ensure that the PHI
/// WRITE is not put into a statement containing the PHI itself. That could also
/// be done by
/// - having all (strongly connected) PHIs in a single statement,
/// - unite only the PHIs in the operand tree of the PHI WRITE (because it only
/// has a chance of being lifted before a PHI by being in a statement with a
/// PHI that comes before in the basic block), or
/// - when uniting statements, ensure that no (relevant) PHIs are overtaken.
static void joinOrderedPHIs(EquivalenceClasses<Instruction *> &UnionFind,
ArrayRef<Instruction *> ModeledInsts) {
for (Instruction *Inst : ModeledInsts) {
PHINode *PHI = dyn_cast<PHINode>(Inst);
if (!PHI)
continue;
int Idx = PHI->getBasicBlockIndex(PHI->getParent());
if (Idx < 0)
continue;
Instruction *IncomingVal =
dyn_cast<Instruction>(PHI->getIncomingValue(Idx));
if (!IncomingVal)
continue;
UnionFind.unionSets(PHI, IncomingVal);
}
}
void ScopBuilder::buildEqivClassBlockStmts(BasicBlock *BB) {
Loop *L = LI.getLoopFor(BB);
// Extracting out modeled instructions saves us from checking
// shouldModelInst() repeatedly.
SmallVector<Instruction *, 32> ModeledInsts;
EquivalenceClasses<Instruction *> UnionFind;
Instruction *MainInst = nullptr, *MainLeader = nullptr;
for (Instruction &Inst : *BB) {
if (!shouldModelInst(&Inst, L))
continue;
ModeledInsts.push_back(&Inst);
UnionFind.insert(&Inst);
// When a BB is split into multiple statements, the main statement is the
// one containing the 'main' instruction. We select the first instruction
// that is unlikely to be removed (because it has side-effects) as the main
// one. It is used to ensure that at least one statement from the bb has the
// same name as with -polly-stmt-granularity=bb.
if (!MainInst && (isa<StoreInst>(Inst) ||
(isa<CallInst>(Inst) && !isa<IntrinsicInst>(Inst))))
MainInst = &Inst;
}
joinOperandTree(UnionFind, ModeledInsts);
joinOrderedInstructions(UnionFind, ModeledInsts);
joinOrderedPHIs(UnionFind, ModeledInsts);
// The list of instructions for statement (statement represented by the leader
// instruction).
MapVector<Instruction *, std::vector<Instruction *>> LeaderToInstList;
// The order of statements must be preserved w.r.t. their ordered
// instructions. Without this explicit scan, we would also use non-ordered
// instructions (whose order is arbitrary) to determine statement order.
for (Instruction &Inst : *BB) {
if (!isOrderedInstruction(&Inst))
continue;
auto LeaderIt = UnionFind.findLeader(&Inst);
if (LeaderIt == UnionFind.member_end())
continue;
// Insert element for the leader instruction.
(void)LeaderToInstList[*LeaderIt];
}
// Collect the instructions of all leaders. UnionFind's member iterator
// unfortunately are not in any specific order.
for (Instruction &Inst : *BB) {
auto LeaderIt = UnionFind.findLeader(&Inst);
if (LeaderIt == UnionFind.member_end())
continue;
if (&Inst == MainInst)
MainLeader = *LeaderIt;
std::vector<Instruction *> &InstList = LeaderToInstList[*LeaderIt];
InstList.push_back(&Inst);
}
// Finally build the statements.
int Count = 0;
long BBIdx = scop->getNextStmtIdx();
for (auto &Instructions : LeaderToInstList) {
std::vector<Instruction *> &InstList = Instructions.second;
// If there is no main instruction, make the first statement the main.
bool IsMain = (MainInst ? MainLeader == Instructions.first : Count == 0);
std::string Name = makeStmtName(BB, BBIdx, Count, IsMain);
scop->addScopStmt(BB, Name, L, std::move(InstList));
Count += 1;
}
// Unconditionally add an epilogue (last statement). It contains no
// instructions, but holds the PHI write accesses for successor basic blocks,
// if the incoming value is not defined in another statement if the same BB.
// The epilogue becomes the main statement only if there is no other
// statement that could become main.
// The epilogue will be removed if no PHIWrite is added to it.
std::string EpilogueName = makeStmtName(BB, BBIdx, Count, Count == 0, true);
scop->addScopStmt(BB, EpilogueName, L, {});
}
void ScopBuilder::buildStmts(Region &SR) {
if (scop->isNonAffineSubRegion(&SR)) {
std::vector<Instruction *> Instructions;
Loop *SurroundingLoop =
getFirstNonBoxedLoopFor(SR.getEntry(), LI, scop->getBoxedLoops());
for (Instruction &Inst : *SR.getEntry())
if (shouldModelInst(&Inst, SurroundingLoop))
Instructions.push_back(&Inst);
long RIdx = scop->getNextStmtIdx();
std::string Name = makeStmtName(&SR, RIdx);
scop->addScopStmt(&SR, Name, SurroundingLoop, Instructions);
return;
}
for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
if (I->isSubRegion())
buildStmts(*I->getNodeAs<Region>());
else {
BasicBlock *BB = I->getNodeAs<BasicBlock>();
switch (StmtGranularity) {
case GranularityChoice::BasicBlocks:
buildSequentialBlockStmts(BB);
break;
case GranularityChoice::ScalarIndependence:
buildEqivClassBlockStmts(BB);
break;
case GranularityChoice::Stores:
buildSequentialBlockStmts(BB, true);
break;
}
}
}
void ScopBuilder::buildAccessFunctions(ScopStmt *Stmt, BasicBlock &BB,
Region *NonAffineSubRegion) {
assert(
Stmt &&
"The exit BB is the only one that cannot be represented by a statement");
assert(Stmt->represents(&BB));
// We do not build access functions for error blocks, as they may contain
// instructions we can not model.
if (isErrorBlock(BB, scop->getRegion(), LI, DT))
return;
auto BuildAccessesForInst = [this, Stmt,
NonAffineSubRegion](Instruction *Inst) {
PHINode *PHI = dyn_cast<PHINode>(Inst);
if (PHI)
buildPHIAccesses(Stmt, PHI, NonAffineSubRegion, false);
if (auto MemInst = MemAccInst::dyn_cast(*Inst)) {
assert(Stmt && "Cannot build access function in non-existing statement");
buildMemoryAccess(MemInst, Stmt);
}
// PHI nodes have already been modeled above and terminators that are
// not part of a non-affine subregion are fully modeled and regenerated
// from the polyhedral domains. Hence, they do not need to be modeled as
// explicit data dependences.
if (!PHI)
buildScalarDependences(Stmt, Inst);
};
const InvariantLoadsSetTy &RIL = scop->getRequiredInvariantLoads();
bool IsEntryBlock = (Stmt->getEntryBlock() == &BB);
if (IsEntryBlock) {
for (Instruction *Inst : Stmt->getInstructions())
BuildAccessesForInst(Inst);
if (Stmt->isRegionStmt())
BuildAccessesForInst(BB.getTerminator());
} else {
for (Instruction &Inst : BB) {
if (isIgnoredIntrinsic(&Inst))
continue;
// Invariant loads already have been processed.
if (isa<LoadInst>(Inst) && RIL.count(cast<LoadInst>(&Inst)))
continue;
BuildAccessesForInst(&Inst);
}
}
}
MemoryAccess *ScopBuilder::addMemoryAccess(
ScopStmt *Stmt, Instruction *Inst, MemoryAccess::AccessType AccType,
Value *BaseAddress, Type *ElementType, bool Affine, Value *AccessValue,
ArrayRef<const SCEV *> Subscripts, ArrayRef<const SCEV *> Sizes,
MemoryKind Kind) {
bool isKnownMustAccess = false;
// Accesses in single-basic block statements are always executed.
if (Stmt->isBlockStmt())
isKnownMustAccess = true;
if (Stmt->isRegionStmt()) {
// Accesses that dominate the exit block of a non-affine region are always
// executed. In non-affine regions there may exist MemoryKind::Values that
// do not dominate the exit. MemoryKind::Values will always dominate the
// exit and MemoryKind::PHIs only if there is at most one PHI_WRITE in the
// non-affine region.
if (Inst && DT.dominates(Inst->getParent(), Stmt->getRegion()->getExit()))
isKnownMustAccess = true;
}
// Non-affine PHI writes do not "happen" at a particular instruction, but
// after exiting the statement. Therefore they are guaranteed to execute and
// overwrite the old value.
if (Kind == MemoryKind::PHI || Kind == MemoryKind::ExitPHI)
isKnownMustAccess = true;
if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
AccType = MemoryAccess::MAY_WRITE;
auto *Access = new MemoryAccess(Stmt, Inst, AccType, BaseAddress, ElementType,
Affine, Subscripts, Sizes, AccessValue, Kind);
scop->addAccessFunction(Access);
Stmt->addAccess(Access);
return Access;
}
void ScopBuilder::addArrayAccess(ScopStmt *Stmt, MemAccInst MemAccInst,
MemoryAccess::AccessType AccType,
Value *BaseAddress, Type *ElementType,
bool IsAffine,
ArrayRef<const SCEV *> Subscripts,
ArrayRef<const SCEV *> Sizes,
Value *AccessValue) {
ArrayBasePointers.insert(BaseAddress);
auto *MemAccess = addMemoryAccess(Stmt, MemAccInst, AccType, BaseAddress,
ElementType, IsAffine, AccessValue,
Subscripts, Sizes, MemoryKind::Array);
if (!DetectFortranArrays)
return;
if (Value *FAD = findFADAllocationInvisible(MemAccInst))
MemAccess->setFortranArrayDescriptor(FAD);
else if (Value *FAD = findFADAllocationVisible(MemAccInst))
MemAccess->setFortranArrayDescriptor(FAD);
}
/// Check if @p Expr is divisible by @p Size.
static bool isDivisible(const SCEV *Expr, unsigned Size, ScalarEvolution &SE) {
assert(Size != 0);
if (Size == 1)
return true;
// Only one factor needs to be divisible.
if (auto *MulExpr = dyn_cast<SCEVMulExpr>(Expr)) {
for (auto *FactorExpr : MulExpr->operands())
if (isDivisible(FactorExpr, Size, SE))
return true;
return false;
}
// For other n-ary expressions (Add, AddRec, Max,...) all operands need
// to be divisible.
if (auto *NAryExpr = dyn_cast<SCEVNAryExpr>(Expr)) {
for (auto *OpExpr : NAryExpr->operands())
if (!isDivisible(OpExpr, Size, SE))
return false;
return true;
}
auto *SizeSCEV = SE.getConstant(Expr->getType(), Size);
auto *UDivSCEV = SE.getUDivExpr(Expr, SizeSCEV);
auto *MulSCEV = SE.getMulExpr(UDivSCEV, SizeSCEV);
return MulSCEV == Expr;
}
void ScopBuilder::foldSizeConstantsToRight() {
isl::union_set Accessed = scop->getAccesses().range();
for (auto Array : scop->arrays()) {
if (Array->getNumberOfDimensions() <= 1)
continue;
isl::space Space = Array->getSpace();
Space = Space.align_params(Accessed.get_space());
if (!Accessed.contains(Space))
continue;
isl::set Elements = Accessed.extract_set(Space);
isl::map Transform = isl::map::universe(Array->getSpace().map_from_set());
std::vector<int> Int;
int Dims = Elements.dim(isl::dim::set);
for (int i = 0; i < Dims; i++) {
isl::set DimOnly = isl::set(Elements).project_out(isl::dim::set, 0, i);
DimOnly = DimOnly.project_out(isl::dim::set, 1, Dims - i - 1);
DimOnly = DimOnly.lower_bound_si(isl::dim::set, 0, 0);
isl::basic_set DimHull = DimOnly.affine_hull();
if (i == Dims - 1) {
Int.push_back(1);
Transform = Transform.equate(isl::dim::in, i, isl::dim::out, i);
continue;
}
if (DimHull.dim(isl::dim::div) == 1) {
isl::aff Diff = DimHull.get_div(0);
isl::val Val = Diff.get_denominator_val();
int ValInt = 1;
if (Val.is_int()) {
auto ValAPInt = APIntFromVal(Val);
if (ValAPInt.isSignedIntN(32))
ValInt = ValAPInt.getSExtValue();
} else {
}
Int.push_back(ValInt);
isl::constraint C = isl::constraint::alloc_equality(
isl::local_space(Transform.get_space()));
C = C.set_coefficient_si(isl::dim::out, i, ValInt);
C = C.set_coefficient_si(isl::dim::in, i, -1);
Transform = Transform.add_constraint(C);
continue;
}
isl::basic_set ZeroSet = isl::basic_set(DimHull);
ZeroSet = ZeroSet.fix_si(isl::dim::set, 0, 0);
int ValInt = 1;
if (ZeroSet.is_equal(DimHull)) {
ValInt = 0;
}
Int.push_back(ValInt);
Transform = Transform.equate(isl::dim::in, i, isl::dim::out, i);
}
isl::set MappedElements = isl::map(Transform).domain();
if (!Elements.is_subset(MappedElements))
continue;
bool CanFold = true;
if (Int[0] <= 1)
CanFold = false;
unsigned NumDims = Array->getNumberOfDimensions();
for (unsigned i = 1; i < NumDims - 1; i++)
if (Int[0] != Int[i] && Int[i])
CanFold = false;
if (!CanFold)
continue;
for (auto &Access : scop->access_functions())
if (Access->getScopArrayInfo() == Array)
Access->setAccessRelation(
Access->getAccessRelation().apply_range(Transform));
std::vector<const SCEV *> Sizes;
for (unsigned i = 0; i < NumDims; i++) {
auto Size = Array->getDimensionSize(i);
if (i == NumDims - 1)
Size = SE.getMulExpr(Size, SE.getConstant(Size->getType(), Int[0]));
Sizes.push_back(Size);
}
Array->updateSizes(Sizes, false /* CheckConsistency */);
}
}
void ScopBuilder::markFortranArrays() {
for (ScopStm