1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
| //===--------------------- Support.cpp --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file implements a few helper functions used by various pipeline
/// components.
///
//===----------------------------------------------------------------------===//
#include "llvm/MCA/Support.h"
#include "llvm/MC/MCSchedule.h"
namespace llvm {
namespace mca {
#define DEBUG_TYPE "llvm-mca"
ResourceCycles &ResourceCycles::operator+=(const ResourceCycles &RHS) {
if (Denominator == RHS.Denominator)
Numerator += RHS.Numerator;
else {
// Create a common denominator for LHS and RHS by calculating the least
// common multiple from the GCD.
unsigned GCD = GreatestCommonDivisor64(Denominator, RHS.Denominator);
unsigned LCM = (Denominator * RHS.Denominator) / GCD;
unsigned LHSNumerator = Numerator * (LCM / Denominator);
unsigned RHSNumerator = RHS.Numerator * (LCM / RHS.Denominator);
Numerator = LHSNumerator + RHSNumerator;
Denominator = LCM;
}
return *this;
}
void computeProcResourceMasks(const MCSchedModel &SM,
MutableArrayRef<uint64_t> Masks) {
unsigned ProcResourceID = 0;
assert(Masks.size() == SM.getNumProcResourceKinds() &&
"Invalid number of elements");
// Resource at index 0 is the 'InvalidUnit'. Set an invalid mask for it.
Masks[0] = 0;
// Create a unique bitmask for every processor resource unit.
for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
const MCProcResourceDesc &Desc = *SM.getProcResource(I);
if (Desc.SubUnitsIdxBegin)
continue;
Masks[I] = 1ULL << ProcResourceID;
ProcResourceID++;
}
// Create a unique bitmask for every processor resource group.
for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
const MCProcResourceDesc &Desc = *SM.getProcResource(I);
if (!Desc.SubUnitsIdxBegin)
continue;
Masks[I] = 1ULL << ProcResourceID;
for (unsigned U = 0; U < Desc.NumUnits; ++U) {
uint64_t OtherMask = Masks[Desc.SubUnitsIdxBegin[U]];
Masks[I] |= OtherMask;
}
ProcResourceID++;
}
#ifndef NDEBUG
LLVM_DEBUG(dbgs() << "\nProcessor resource masks:"
<< "\n");
for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
const MCProcResourceDesc &Desc = *SM.getProcResource(I);
LLVM_DEBUG(dbgs() << '[' << format_decimal(I,2) << "] " << " - "
<< format_hex(Masks[I],16) << " - "
<< Desc.Name << '\n');
}
#endif
}
double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
unsigned NumMicroOps,
ArrayRef<unsigned> ProcResourceUsage) {
// The block throughput is bounded from above by the hardware dispatch
// throughput. That is because the DispatchWidth is an upper bound on the
// number of opcodes that can be part of a single dispatch group.
double Max = static_cast<double>(NumMicroOps) / DispatchWidth;
// The block throughput is also limited by the amount of hardware parallelism.
// The number of available resource units affects the resource pressure
// distribution, as well as how many blocks can be executed every cycle.
for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
unsigned ResourceCycles = ProcResourceUsage[I];
if (!ResourceCycles)
continue;
const MCProcResourceDesc &MCDesc = *SM.getProcResource(I);
double Throughput = static_cast<double>(ResourceCycles) / MCDesc.NumUnits;
Max = std::max(Max, Throughput);
}
// The block reciprocal throughput is computed as the MAX of:
// - (NumMicroOps / DispatchWidth)
// - (NumUnits / ResourceCycles) for every consumed processor resource.
return Max;
}
} // namespace mca
} // namespace llvm
|