reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

lib/Target/AMDGPU/SIInstrInfo.cpp
 3192   uint16_t Opcode = MI.getOpcode();
 3193   if (SIInstrInfo::isGenericOpcode(MI.getOpcode()))
 3196   const MachineFunction *MF = MI.getParent()->getParent();
 3206       Desc.getNumOperands() != MI.getNumExplicitOperands()) {
 3211   if (MI.isInlineAsm()) {
 3213     for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
 3215       const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
 3219       const MachineOperand &Op = MI.getOperand(I);
 3235     if (MI.getOperand(i).isFPImm()) {
 3245       if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
 3245       if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) {
 3263       const MachineOperand &MO = MI.getOperand(i);
 3264       if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
 3275       if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
 3275       if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
 3284     if (!MI.getOperand(i).isReg())
 3288       Register Reg = MI.getOperand(i).getReg();
 3301   if (isSDWA(MI)) {
 3314       const MachineOperand &MO = MI.getOperand(OpIdx);
 3333       const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
 3345         const MachineOperand &Dst = MI.getOperand(DstIdx);
 3352         const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp);
 3359         const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod);
 3367     const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused);
 3370       const MachineOperand &Dst = MI.getOperand(DstIdx);
 3377           MI.getOperand(MI.findTiedOperandIdx(DstIdx));
 3377           MI.getOperand(MI.findTiedOperandIdx(DstIdx));
 3391   if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
 3391   if (isMIMG(MI.getOpcode()) && !MI.mayStore()) {
 3394     const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask);
 3398           isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm);
 3399       const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe);
 3400       const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe);
 3401       const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16);
 3412           AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata);
 3413       const MachineOperand &Dst = MI.getOperand(DstIdx);
 3415         const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx);
 3428       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
 3428       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
 3428       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
 3428       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
 3428       && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) {
 3441     unsigned SGPRUsed = findImplicitSGPRRead(MI);
 3450       const MachineOperand &MO = MI.getOperand(OpIdx);
 3451       if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
 3475     if (isVOP3(MI) && LiteralCount) {
 3497       const MachineOperand &MO = MI.getOperand(OpIdx);
 3499       if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
 3516     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
 3517     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
 3518     const MachineOperand &Src2 = MI.getOperand(Src2Idx);
 3528   if (isSOP2(MI) || isSOPC(MI)) {
 3528   if (isSOP2(MI) || isSOPC(MI)) {
 3529     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
 3530     const MachineOperand &Src1 = MI.getOperand(Src1Idx);
 3546   if (isSOPK(MI)) {
 3547     auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16);
 3555       if (sopkIsZext(MI)) {
 3583     if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
 3588     const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
 3596       if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
 3603     const MachineOperand &Src0 = MI.getOperand(Src0Idx);
 3605       = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
 3615   if (shouldReadExec(MI)) {
 3616     if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
 3622   if (isSMRD(MI)) {
 3623     if (MI.mayStore()) {
 3626       const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
 3634   if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) {
 3635     const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
 3642   if (isMIMG(MI)) {
 3643     const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim);
 3669         const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx);
 3689   const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl);