reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
582 if (isReverseInlineImm(TII, Src, ReverseImm)) { 583 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32)); 592 if (auto *NextMI = matchSwap(MI, MRI, TII)) { 637 if (TII->commuteInstruction(MI, false, 1, 2)) 651 if (Src1->isImm() && isKImmOperand(TII, *Src1)) { 655 MI.setDesc(TII->get(Opc)); 662 if (MI.isCompare() && TII->isSOPC(MI)) { 663 shrinkScalarCompare(TII, MI); 674 if (isKImmOperand(TII, Src)) 675 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32)); 676 else if (isReverseInlineImm(TII, Src, ReverseImm)) { 677 MI.setDesc(TII->get(AMDGPU::S_BREV_B32)); 689 if (shrinkScalarLogicOp(ST, MRI, TII, MI)) 693 if (TII->isMIMG(MI.getOpcode()) && 701 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 704 if (!TII->canShrink(MI, MRI)) { 707 if (!MI.isCommutable() || !TII->commuteInstruction(MI) || 708 !TII->canShrink(MI, MRI)) 714 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) 719 if (TII->isVOPC(Op32)) { 741 TII->getNamedOperand(MI, AMDGPU::OpName::src2); 754 const MachineOperand *SDst = TII->getNamedOperand(MI, 758 const MachineOperand *Src2 = TII->getNamedOperand(MI, 785 MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32); 792 foldImmediates(*Inst32, TII, MRI);