reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

lib/Target/AMDGPU/SIInstrInfo.cpp
 4515   MachineFunction &MF = *MI.getParent()->getParent();
 4519   if (isVOP2(MI) || isVOPC(MI)) {
 4519   if (isVOP2(MI) || isVOPC(MI)) {
 4520     legalizeOperandsVOP2(MRI, MI);
 4525   if (isVOP3(MI)) {
 4526     legalizeOperandsVOP3(MRI, MI);
 4531   if (isSMRD(MI)) {
 4532     legalizeOperandsSMRD(MRI, MI);
 4539   if (MI.getOpcode() == AMDGPU::PHI) {
 4541     for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
 4542       if (!MI.getOperand(i).isReg() ||
 4543           !Register::isVirtualRegister(MI.getOperand(i).getReg()))
 4546           MRI.getRegClass(MI.getOperand(i).getReg());
 4557     if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
 4560         if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) {
 4563           VRC = RI.hasAGPRs(getOpRegClass(MI, 0))
 4567           VRC = RI.hasAGPRs(getOpRegClass(MI, 0))
 4577     for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
 4578       MachineOperand &Op = MI.getOperand(I);
 4583       MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
 4588       legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
 4595   if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
 4596     MachineBasicBlock *MBB = MI.getParent();
 4597     const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
 4602       for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
 4603         MachineOperand &Op = MI.getOperand(I);
 4612         legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
 4612         legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
 4622   if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
 4623     Register Dst = MI.getOperand(0).getReg();
 4624     Register Src0 = MI.getOperand(1).getReg();
 4628       MachineBasicBlock *MBB = MI.getParent();
 4629       MachineOperand &Op = MI.getOperand(1);
 4630       legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
 4630       legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
 4636   if (MI.getOpcode() == AMDGPU::SI_INIT_M0) {
 4637     MachineOperand &Src = MI.getOperand(0);
 4639       Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI));
 4648   if (isMIMG(MI) ||
 4650        (isMUBUF(MI) || isMTBUF(MI)))) {
 4650        (isMUBUF(MI) || isMTBUF(MI)))) {
 4651     MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
 4653       unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
 4657     MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
 4659       unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
 4667       AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
 4670     MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
 4671     unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
 4692     MachineBasicBlock &MBB = *MI.getParent();
 4694     MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
 4695     if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
 4707       std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
 4710       const DebugLoc &DL = MI.getDebugLoc();
 4711       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e64), NewVAddrLo)
 4718       BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi)
 4726       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
 4726       BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
 4742       std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
 4745       MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
 4746       MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
 4747       MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
 4748       unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
 4752       MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
 4758             BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
 4758             BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
 4767                 getNamedOperand(MI, AMDGPU::OpName::glc)) {
 4771                 getNamedOperand(MI, AMDGPU::OpName::dlc)) {
 4775         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
 4778                 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
 4782         MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz));
 4784         MIB.cloneMemRefs(MI);
 4788         Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
 4788         Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
 4795                      .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
 4796                      .cloneMemRefs(MI);
 4799       MI.removeFromParent();
 4811       loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT);