reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

lib/Analysis/ScalarEvolution.cpp
 2912   assert(!Ops.empty() && "Cannot get empty mul!");
 2913   if (Ops.size() == 1) return Ops[0];
 2913   if (Ops.size() == 1) return Ops[0];
 2915   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
 2916   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
 2917     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
 2922   GroupByComplexity(Ops, &LI, DT);
 2924   Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
 2927   if (Depth > MaxArithDepth || hasHugeExpression(Ops))
 2928     return getOrCreateMulExpr(Ops, Flags);
 2932   if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
 2934     if (Ops.size() == 2)
 2936       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
 2951     while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
 2955       Ops[0] = getConstant(Fold);
 2956       Ops.erase(Ops.begin()+1);  // Erase the folded element
 2956       Ops.erase(Ops.begin()+1);  // Erase the folded element
 2957       if (Ops.size() == 1) return Ops[0];
 2957       if (Ops.size() == 1) return Ops[0];
 2958       LHSC = cast<SCEVConstant>(Ops[0]);
 2962     if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) {
 2963       Ops.erase(Ops.begin());
 2963       Ops.erase(Ops.begin());
 2965     } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
 2967       return Ops[0];
 2968     } else if (Ops[0]->isAllOnesValue()) {
 2971       if (Ops.size() == 2) {
 2972         if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
 2976             const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
 2983         } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
 2987             Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
 2996     if (Ops.size() == 1)
 2997       return Ops[0];
 3001   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
 3001   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
 3005   if (Idx < Ops.size()) {
 3007     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
 3008       if (Ops.size() > MulOpsInlineThreshold)
 3012       Ops.erase(Ops.begin()+Idx);
 3012       Ops.erase(Ops.begin()+Idx);
 3013       Ops.append(Mul->op_begin(), Mul->op_end());
 3021       return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
 3027   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
 3027   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
 3031   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
 3031   for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
 3035     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
 3037     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
 3038       if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
 3039         LIOps.push_back(Ops[i]);
 3040         Ops.erase(Ops.begin()+i);
 3040         Ops.erase(Ops.begin()+i);
 3063       if (Ops.size() == 1) return NewRec;
 3067         if (Ops[i] == AddRec) {
 3068           Ops[i] = NewRec;
 3071       return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
 3090          OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
 3090          OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
 3093         dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
 3136         if (Ops.size() == 2) return NewAddRec;
 3137         Ops[Idx] = NewAddRec;
 3138         Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
 3138         Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
 3146       return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
 3154   return getOrCreateMulExpr(Ops, Flags);