1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
| //===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Small functions that help with LLVM-IR.
//
//===----------------------------------------------------------------------===//
#ifndef POLLY_SUPPORT_IRHELPER_H
#define POLLY_SUPPORT_IRHELPER_H
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/ValueHandle.h"
namespace llvm {
class LoopInfo;
class Loop;
class ScalarEvolution;
class SCEV;
class Region;
class Pass;
class DominatorTree;
class RegionInfo;
class RegionNode;
} // namespace llvm
namespace polly {
class Scop;
class ScopStmt;
/// Type to remap values.
using ValueMapT = llvm::DenseMap<llvm::AssertingVH<llvm::Value>,
llvm::AssertingVH<llvm::Value>>;
/// Type for a set of invariant loads.
using InvariantLoadsSetTy = llvm::SetVector<llvm::AssertingVH<llvm::LoadInst>>;
/// Set type for parameters.
using ParameterSetTy = llvm::SetVector<const llvm::SCEV *>;
/// Set of loops (used to remember loops in non-affine subregions).
using BoxedLoopsSetTy = llvm::SetVector<const llvm::Loop *>;
/// Utility proxy to wrap the common members of LoadInst and StoreInst.
///
/// This works like the LLVM utility class CallSite, ie. it forwards all calls
/// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
/// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
/// MemTransferInst, etc. in that it offers a common interface, but does not act
/// as a fake base class.
/// It is similar to StringRef and ArrayRef in that it holds a pointer to the
/// referenced object and should be passed by-value as it is small enough.
///
/// This proxy can either represent a LoadInst instance, a StoreInst instance,
/// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
/// nullptr (only creatable using the default constructor); never an Instruction
/// that is neither of the above mentioned. When representing a nullptr, only
/// the following methods are defined:
/// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
/// operator bool(), operator!()
///
/// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble
/// those from llvm/Support/Casting.h. Partial template function specialization
/// is currently not supported in C++ such that those cannot be used directly.
/// (llvm::isa could, but then llvm:cast etc. would not have the expected
/// behavior)
class MemAccInst {
private:
llvm::Instruction *I;
public:
MemAccInst() : I(nullptr) {}
MemAccInst(const MemAccInst &Inst) : I(Inst.I) {}
/* implicit */ MemAccInst(llvm::LoadInst &LI) : I(&LI) {}
/* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {}
/* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {}
/* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {}
/* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {}
/* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {}
explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); }
explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); }
static bool isa(const llvm::Value &V) {
return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) ||
llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V);
}
static bool isa(const llvm::Value *V) {
return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) ||
llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V);
}
static MemAccInst cast(llvm::Value &V) {
return MemAccInst(llvm::cast<llvm::Instruction>(V));
}
static MemAccInst cast(llvm::Value *V) {
return MemAccInst(llvm::cast<llvm::Instruction>(V));
}
static MemAccInst cast_or_null(llvm::Value &V) {
return MemAccInst(llvm::cast<llvm::Instruction>(V));
}
static MemAccInst cast_or_null(llvm::Value *V) {
if (!V)
return MemAccInst();
return MemAccInst(llvm::cast<llvm::Instruction>(V));
}
static MemAccInst dyn_cast(llvm::Value &V) {
if (isa(V))
return MemAccInst(llvm::cast<llvm::Instruction>(V));
return MemAccInst();
}
static MemAccInst dyn_cast(llvm::Value *V) {
assert(V);
if (isa(V))
return MemAccInst(llvm::cast<llvm::Instruction>(V));
return MemAccInst();
}
MemAccInst &operator=(const MemAccInst &Inst) {
I = Inst.I;
return *this;
}
MemAccInst &operator=(llvm::LoadInst &LI) {
I = &LI;
return *this;
}
MemAccInst &operator=(llvm::LoadInst *LI) {
I = LI;
return *this;
}
MemAccInst &operator=(llvm::StoreInst &SI) {
I = &SI;
return *this;
}
MemAccInst &operator=(llvm::StoreInst *SI) {
I = SI;
return *this;
}
MemAccInst &operator=(llvm::MemIntrinsic &MI) {
I = &MI;
return *this;
}
MemAccInst &operator=(llvm::MemIntrinsic *MI) {
I = MI;
return *this;
}
MemAccInst &operator=(llvm::CallInst &CI) {
I = &CI;
return *this;
}
MemAccInst &operator=(llvm::CallInst *CI) {
I = CI;
return *this;
}
llvm::Instruction *get() const {
assert(I && "Unexpected nullptr!");
return I;
}
operator llvm::Instruction *() const { return asInstruction(); }
llvm::Instruction *operator->() const { return get(); }
explicit operator bool() const { return isInstruction(); }
bool operator!() const { return isNull(); }
llvm::Value *getValueOperand() const {
if (isLoad())
return asLoad();
if (isStore())
return asStore()->getValueOperand();
if (isMemIntrinsic())
return nullptr;
if (isCallInst())
return nullptr;
llvm_unreachable("Operation not supported on nullptr");
}
llvm::Value *getPointerOperand() const {
if (isLoad())
return asLoad()->getPointerOperand();
if (isStore())
return asStore()->getPointerOperand();
if (isMemIntrinsic())
return asMemIntrinsic()->getRawDest();
if (isCallInst())
return nullptr;
llvm_unreachable("Operation not supported on nullptr");
}
unsigned getAlignment() const {
if (isLoad())
return asLoad()->getAlignment();
if (isStore())
return asStore()->getAlignment();
if (isMemTransferInst())
return std::min(asMemTransferInst()->getDestAlignment(),
asMemTransferInst()->getSourceAlignment());
if (isMemIntrinsic())
return asMemIntrinsic()->getDestAlignment();
if (isCallInst())
return 0;
llvm_unreachable("Operation not supported on nullptr");
}
bool isVolatile() const {
if (isLoad())
return asLoad()->isVolatile();
if (isStore())
return asStore()->isVolatile();
if (isMemIntrinsic())
return asMemIntrinsic()->isVolatile();
if (isCallInst())
return false;
llvm_unreachable("Operation not supported on nullptr");
}
bool isSimple() const {
if (isLoad())
return asLoad()->isSimple();
if (isStore())
return asStore()->isSimple();
if (isMemIntrinsic())
return !asMemIntrinsic()->isVolatile();
if (isCallInst())
return true;
llvm_unreachable("Operation not supported on nullptr");
}
llvm::AtomicOrdering getOrdering() const {
if (isLoad())
return asLoad()->getOrdering();
if (isStore())
return asStore()->getOrdering();
if (isMemIntrinsic())
return llvm::AtomicOrdering::NotAtomic;
if (isCallInst())
return llvm::AtomicOrdering::NotAtomic;
llvm_unreachable("Operation not supported on nullptr");
}
bool isUnordered() const {
if (isLoad())
return asLoad()->isUnordered();
if (isStore())
return asStore()->isUnordered();
// Copied from the Load/Store implementation of isUnordered:
if (isMemIntrinsic())
return !asMemIntrinsic()->isVolatile();
if (isCallInst())
return true;
llvm_unreachable("Operation not supported on nullptr");
}
bool isNull() const { return !I; }
bool isInstruction() const { return I; }
llvm::Instruction *asInstruction() const { return I; }
private:
bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); }
bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); }
bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); }
bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); }
bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); }
bool isMemTransferInst() const {
return I && llvm::isa<llvm::MemTransferInst>(I);
}
llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); }
llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); }
llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); }
llvm::MemIntrinsic *asMemIntrinsic() const {
return llvm::cast<llvm::MemIntrinsic>(I);
}
llvm::MemSetInst *asMemSetInst() const {
return llvm::cast<llvm::MemSetInst>(I);
}
llvm::MemTransferInst *asMemTransferInst() const {
return llvm::cast<llvm::MemTransferInst>(I);
}
};
} // namespace polly
namespace llvm {
/// Specialize simplify_type for MemAccInst to enable dyn_cast and cast
/// from a MemAccInst object.
template <> struct simplify_type<polly::MemAccInst> {
typedef Instruction *SimpleType;
static SimpleType getSimplifiedValue(polly::MemAccInst &I) {
return I.asInstruction();
}
};
} // namespace llvm
namespace polly {
/// Simplify the region to have a single unconditional entry edge and a
/// single exit edge.
///
/// Although this function allows DT and RI to be null, regions only work
/// properly if the DominatorTree (for Region::contains) and RegionInfo are kept
/// up-to-date.
///
/// @param R The region to be simplified
/// @param DT DominatorTree to be updated.
/// @param LI LoopInfo to be updated.
/// @param RI RegionInfo to be updated.
void simplifyRegion(llvm::Region *R, llvm::DominatorTree *DT,
llvm::LoopInfo *LI, llvm::RegionInfo *RI);
/// Split the entry block of a function to store the newly inserted
/// allocations outside of all Scops.
///
/// @param EntryBlock The entry block of the current function.
/// @param P The pass that currently running.
///
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, llvm::Pass *P);
/// Split the entry block of a function to store the newly inserted
/// allocations outside of all Scops.
///
/// @param DT DominatorTree to be updated.
/// @param LI LoopInfo to be updated.
/// @param RI RegionInfo to be updated.
void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock,
llvm::DominatorTree *DT, llvm::LoopInfo *LI,
llvm::RegionInfo *RI);
/// Wrapper for SCEVExpander extended to all Polly features.
///
/// This wrapper will internally call the SCEVExpander but also makes sure that
/// all additional features not represented in SCEV (e.g., SDiv/SRem are not
/// black boxes but can be part of the function) will be expanded correctly.
///
/// The parameters are the same as for the creation of a SCEVExpander as well
/// as the call to SCEVExpander::expandCodeFor:
///
/// @param S The current Scop.
/// @param SE The Scalar Evolution pass.
/// @param DL The module data layout.
/// @param Name The suffix added to the new instruction names.
/// @param E The expression for which code is actually generated.
/// @param Ty The type of the resulting code.
/// @param IP The insertion point for the new code.
/// @param VMap A remapping of values used in @p E.
/// @param RTCBB The last block of the RTC. Used to insert loop-invariant
/// instructions in rare cases.
llvm::Value *expandCodeFor(Scop &S, llvm::ScalarEvolution &SE,
const llvm::DataLayout &DL, const char *Name,
const llvm::SCEV *E, llvm::Type *Ty,
llvm::Instruction *IP, ValueMapT *VMap,
llvm::BasicBlock *RTCBB);
/// Check if the block is a error block.
///
/// A error block is currently any block that fulfills at least one of
/// the following conditions:
///
/// - It is terminated by an unreachable instruction
/// - It contains a call to a non-pure function that is not immediately
/// dominated by a loop header and that does not dominate the region exit.
/// This is a heuristic to pick only error blocks that are conditionally
/// executed and can be assumed to be not executed at all without the domains
/// being available.
///
/// @param BB The block to check.
/// @param R The analyzed region.
/// @param LI The loop info analysis.
/// @param DT The dominator tree of the function.
///
/// @return True if the block is a error block, false otherwise.
bool isErrorBlock(llvm::BasicBlock &BB, const llvm::Region &R,
llvm::LoopInfo &LI, const llvm::DominatorTree &DT);
/// Return the condition for the terminator @p TI.
///
/// For unconditional branches the "i1 true" condition will be returned.
///
/// @param TI The terminator to get the condition from.
///
/// @return The condition of @p TI and nullptr if none could be extracted.
llvm::Value *getConditionFromTerminator(llvm::Instruction *TI);
/// Get the smallest loop that contains @p S but is not in @p S.
llvm::Loop *getLoopSurroundingScop(Scop &S, llvm::LoopInfo &LI);
/// Get the number of blocks in @p L.
///
/// The number of blocks in a loop are the number of basic blocks actually
/// belonging to the loop, as well as all single basic blocks that the loop
/// exits to and which terminate in an unreachable instruction. We do not
/// allow such basic blocks in the exit of a scop, hence they belong to the
/// scop and represent run-time conditions which we want to model and
/// subsequently speculate away.
///
/// @see getRegionNodeLoop for additional details.
unsigned getNumBlocksInLoop(llvm::Loop *L);
/// Get the number of blocks in @p RN.
unsigned getNumBlocksInRegionNode(llvm::RegionNode *RN);
/// Return the smallest loop surrounding @p RN.
llvm::Loop *getRegionNodeLoop(llvm::RegionNode *RN, llvm::LoopInfo &LI);
/// Check if @p LInst can be hoisted in @p R.
///
/// @param LInst The load to check.
/// @param R The analyzed region.
/// @param LI The loop info.
/// @param SE The scalar evolution analysis.
/// @param DT The dominator tree of the function.
/// @param KnownInvariantLoads The invariant load set.
///
/// @return True if @p LInst can be hoisted in @p R.
bool isHoistableLoad(llvm::LoadInst *LInst, llvm::Region &R, llvm::LoopInfo &LI,
llvm::ScalarEvolution &SE, const llvm::DominatorTree &DT,
const InvariantLoadsSetTy &KnownInvariantLoads);
/// Return true iff @p V is an intrinsic that we ignore during code
/// generation.
bool isIgnoredIntrinsic(const llvm::Value *V);
/// Check whether a value an be synthesized by the code generator.
///
/// Some value will be recalculated only from information that is code generated
/// from the polyhedral representation. For such instructions we do not need to
/// ensure that their operands are available during code generation.
///
/// @param V The value to check.
/// @param S The current SCoP.
/// @param SE The scalar evolution database.
/// @param Scope Location where the value would by synthesized.
/// @return If the instruction I can be regenerated from its
/// scalar evolution representation, return true,
/// otherwise return false.
bool canSynthesize(const llvm::Value *V, const Scop &S,
llvm::ScalarEvolution *SE, llvm::Loop *Scope);
/// Return the block in which a value is used.
///
/// For normal instructions, this is the instruction's parent block. For PHI
/// nodes, this is the incoming block of that use, because this is where the
/// operand must be defined (i.e. its definition dominates this block).
/// Non-instructions do not use operands at a specific point such that in this
/// case this function returns nullptr.
llvm::BasicBlock *getUseBlock(const llvm::Use &U);
/// Derive the individual index expressions from a GEP instruction.
///
/// This function optimistically assumes the GEP references into a fixed size
/// array. If this is actually true, this function returns a list of array
/// subscript expressions as SCEV as well as a list of integers describing
/// the size of the individual array dimensions. Both lists have either equal
/// length or the size list is one element shorter in case there is no known
/// size available for the outermost array dimension.
///
/// @param GEP The GetElementPtr instruction to analyze.
///
/// @return A tuple with the subscript expressions and the dimension sizes.
std::tuple<std::vector<const llvm::SCEV *>, std::vector<int>>
getIndexExpressionsFromGEP(llvm::GetElementPtrInst *GEP,
llvm::ScalarEvolution &SE);
// If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
// for Polly. If the loop is affine, return the loop itself.
//
// @param L Pointer to the Loop object to analyze.
// @param LI Reference to the LoopInfo.
// @param BoxedLoops Set of Boxed Loops we get from the SCoP.
llvm::Loop *getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
const BoxedLoopsSetTy &BoxedLoops);
// If the Basic Block belongs to a loop that is nonaffine/boxed, return the
// first non-boxed surrounding loop for Polly. If the loop is affine, return
// the loop itself.
//
// @param BB Pointer to the Basic Block to analyze.
// @param LI Reference to the LoopInfo.
// @param BoxedLoops Set of Boxed Loops we get from the SCoP.
llvm::Loop *getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, llvm::LoopInfo &LI,
const BoxedLoopsSetTy &BoxedLoops);
/// Is the given instruction a call to a debug function?
///
/// A debug function can be used to insert output in Polly-optimized code which
/// normally does not allow function calls with side-effects. For instance, a
/// printf can be inserted to check whether a value still has the expected value
/// after Polly generated code:
///
/// int sum = 0;
/// for (int i = 0; i < 16; i+=1) {
/// sum += i;
/// printf("The value of sum at i=%d is %d\n", sum, i);
/// }
bool isDebugCall(llvm::Instruction *Inst);
/// Does the statement contain a call to a debug function?
///
/// Such a statement must not be removed, even if has no side-effects.
bool hasDebugCall(ScopStmt *Stmt);
} // namespace polly
#endif
|