reference, declaration → definition definition → references, declarations, derived classes, virtual overrides reference to multiple definitions → definitions unreferenced |
110 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87(); 110 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87(); 111 X86ScalarSSEf64 = Subtarget.hasSSE2(); 112 X86ScalarSSEf32 = Subtarget.hasSSE1(); 125 if (Subtarget.isAtom()) 127 else if (Subtarget.is64Bit()) 131 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 136 if (Subtarget.hasSlowDivide32()) 138 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit()) 138 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit()) 142 if (Subtarget.isTargetWindowsMSVC() || 143 Subtarget.isTargetWindowsItanium()) { 157 if (Subtarget.isTargetDarwin()) { 161 } else if (Subtarget.isTargetWindowsGNU()) { 174 if (!Subtarget.hasCmpxchg8b()) 181 if (Subtarget.is64Bit()) 206 if (Subtarget.hasCMov()) { 216 if (Subtarget.is64Bit()) 226 if (!Subtarget.useSoftFloat()) { 242 if (!Subtarget.useSoftFloat()) { 262 if (!Subtarget.useSoftFloat()) { 282 if (!Subtarget.useSoftFloat()) { 291 if (Subtarget.is64Bit()) { 296 } else if (!Subtarget.is64Bit()) 325 if (Subtarget.is64Bit()) 341 if (!Subtarget.hasBMI()) { 346 if (Subtarget.is64Bit()) { 352 if (Subtarget.hasLZCNT()) { 364 if (Subtarget.is64Bit()) { 373 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) { 373 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) { 395 if (Subtarget.hasPOPCNT()) { 401 if (Subtarget.is64Bit()) 409 if (!Subtarget.hasMOVBE()) 420 if (VT == MVT::i64 && !Subtarget.is64Bit()) 441 if (VT == MVT::i64 && !Subtarget.is64Bit()) 453 if (VT == MVT::i64 && !Subtarget.is64Bit()) 460 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow()) 460 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow()) 476 if (!Subtarget.is64Bit()) 479 if (Subtarget.hasCmpxchg16b()) { 484 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() && 484 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() && 485 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() && 485 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() && 502 bool Is64Bit = Subtarget.is64Bit(); 515 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) { 518 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass 520 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass 665 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) { 665 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) { 665 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) { 666 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass 806 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) { 806 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) { 811 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) { 811 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) { 812 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass 831 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { 831 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { 832 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass 837 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass 839 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass 841 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass 843 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass 920 if (VT == MVT::v2i64 && !Subtarget.is64Bit()) 981 if (!Subtarget.hasAVX512()) 1009 if (!Subtarget.hasAVX512()) 1013 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) { 1013 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) { 1030 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { 1030 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { 1077 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) { 1077 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) { 1091 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) { 1091 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) { 1092 bool HasInt256 = Subtarget.hasInt256(); 1094 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass 1096 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass 1098 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass 1100 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass 1102 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass 1104 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass 1128 if (!Subtarget.hasAVX512()) 1149 if (!Subtarget.hasBWI()) 1181 if (Subtarget.hasAnyFMA()) { 1250 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); 1291 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { 1291 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { 1310 if (!Subtarget.hasDQI()) { 1357 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) { 1357 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) { 1400 if (!Subtarget.hasVLX()) { 1470 if (Subtarget.hasDQI()) { 1479 if (Subtarget.hasCDI()) { 1486 if (Subtarget.hasVPOPCNTDQ()) { 1511 if (!Subtarget.hasBWI()) { 1521 if (Subtarget.hasVBMI2()) { 1532 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { 1532 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { 1564 if (Subtarget.hasDQI()) { 1575 if (Subtarget.hasCDI()) { 1581 if (Subtarget.hasVPOPCNTDQ()) { 1590 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { 1590 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { 1629 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) { 1629 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) { 1699 if (Subtarget.hasBITALG()) { 1704 if (Subtarget.hasVBMI2()) { 1710 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { 1710 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { 1712 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); 1713 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); 1720 if (Subtarget.hasBITALG()) { 1726 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) { 1726 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) { 1739 if (Subtarget.hasDQI()) { 1750 if (Subtarget.hasBWI()) { 1755 if (Subtarget.hasVBMI2()) { 1773 if (!Subtarget.is64Bit()) { 1784 if (VT == MVT::i64 && !Subtarget.is64Bit()) 1800 if (!Subtarget.is64Bit()) { 1815 if (Subtarget.isTargetWin64()) { 1828 if (Subtarget.is32Bit() && 1829 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium())) 1829 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium())) 1879 computeRegisterProperties(Subtarget.getRegisterInfo()); 1899 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); 1908 return Subtarget.isTargetMachO() && Subtarget.is64Bit(); 1908 return Subtarget.isTargetMachO() && Subtarget.is64Bit(); 1913 return Subtarget.getTargetTriple().isOSMSVCRT(); 1919 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP; 1926 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) 1926 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) 1940 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) 1940 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) 1944 Subtarget.hasAVX512() && 1946 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) || 1947 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) 1951 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI) 1951 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI) 1960 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) 1960 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) 1964 Subtarget.hasAVX512() && 1966 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) || 1967 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) 1971 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI) 1971 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI) 1981 Subtarget.hasAVX512() && 1983 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) || 1984 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) { 2001 if (Subtarget.hasAVX512()) { 2013 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) { 2018 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32) 2057 if (Subtarget.is64Bit()) { 2066 if (Subtarget.hasSSE1()) 2089 if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() || 2093 if (Size >= 64 && Subtarget.hasAVX512() && 2094 (Subtarget.getPreferVectorWidth() >= 512)) { 2095 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32; 2098 if (Size >= 32 && Subtarget.hasAVX() && 2099 (Subtarget.getPreferVectorWidth() >= 256)) { 2107 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128)) 2107 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128)) 2111 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) && 2111 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) && 2111 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) && 2112 (Subtarget.getPreferVectorWidth() >= 128)) 2115 !Subtarget.is64Bit() && Subtarget.hasSSE2()) { 2115 !Subtarget.is64Bit() && Subtarget.hasSSE2()) { 2128 if (Subtarget.is64Bit() && Size >= 8) 2151 *Fast = !Subtarget.isUnalignedMem16Slow(); 2154 *Fast = !Subtarget.isUnalignedMem32Slow(); 2166 return (Align < 16 || !Subtarget.hasSSE41()); 2179 if (isPositionIndependent() && Subtarget.isPICStyleGOT()) 2187 return Subtarget.useSoftFloat(); 2194 if (Subtarget.is64Bit()) 2222 assert(isPositionIndependent() && Subtarget.isPICStyleGOT()); 2232 if (!Subtarget.is64Bit()) 2246 if (Subtarget.isPICStyleRIPRel()) 2262 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass; 2281 if (Subtarget.is64Bit()) 2302 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) { 2303 if (Subtarget.isTargetFuchsia()) { 2309 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14; 2319 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || 2320 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { 2336 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) 2343 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || 2344 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { 2352 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || 2353 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { 2360 if (Subtarget.getTargetTriple().isOSContiki()) 2366 if (Subtarget.isTargetAndroid()) { 2369 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24; 2374 if (Subtarget.isTargetFuchsia()) { 2527 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) { 2527 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) { 2531 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) { 2531 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) { 2554 if (Subtarget.is64Bit()) { 2562 if (!Subtarget.hasSSE2()) 2575 Subtarget); 2631 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ? 2631 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ? 2645 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 2710 bool Darwin = Subtarget.getTargetTriple().isOSDarwin(); 2826 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2829 bool Is64Bit = Subtarget.is64Bit(); 2850 ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) { 2854 (Is64Bit && !Subtarget.hasSSE2())) { 2864 if (!Subtarget.hasX87()) 2875 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag); 3179 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); 3182 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() && 3187 bool Is64Bit = Subtarget.is64Bit(); 3188 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv); 3231 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget); 3243 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass; 3245 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass; 3253 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass; 3255 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass; 3353 assert(!(Subtarget.useSoftFloat() && 3361 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget); 3362 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget); 3365 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) && 3446 if (Subtarget.useAVX512Regs() && 3450 else if (Subtarget.hasAVX()) 3452 else if (Subtarget.hasSSE2()) 3494 !Subtarget.getTargetTriple().isOSMSVCRT() && 3495 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn) 3614 bool Is64Bit = Subtarget.is64Bit(); 3615 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv); 3616 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU()); 3640 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) { 3752 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 3823 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget); 3855 if (Subtarget.isPICStyleGOT()) { 3896 assert((Subtarget.hasSSE1() || !NumXMMRegs) 4006 } else if (Subtarget.isTarget64BitILP32() && 4064 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4120 !Subtarget.getTargetTriple().isOSMSVCRT() && 4188 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 4189 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); 4322 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC); 4323 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC); 4344 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 4397 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 4427 const X86InstrInfo *TII = Subtarget.getInstrInfo(); 4448 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) && 4477 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg, 4580 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 4911 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX(); 4911 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX(); 4917 if (VT.isVector() && Subtarget.hasAVX512()) 4955 return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov(); 5000 return Subtarget.hasBMI(); 5005 return Subtarget.hasLZCNT(); 5011 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() && 5015 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8) 5034 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32; 5039 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth()) 5045 return Subtarget.hasFastLZCNT(); 5059 if (!Subtarget.hasBMI()) 5077 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128) 5083 return Subtarget.hasSSE2(); 5107 if (Subtarget.hasAVX2()) 5121 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) || 5122 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) { 5139 if (VT == MVT::i64 && !Subtarget.is64Bit()) 5148 !Subtarget.isOSWindows()) 9591 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) 9592 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget); 9594 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget)) 9598 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG)) 9600 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG)) 9602 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG)) 9688 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector"); 9711 (EltVT == MVT::i64 && Subtarget.is64Bit())) { 9717 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 9726 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); 9752 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG); 9776 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget)) 9783 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false)) 9790 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) { 9835 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); 9843 DAG, Subtarget)) 9848 DAG, Subtarget)) 9853 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget)) 9862 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl); 9903 if (Subtarget.hasSSE41()) { 17209 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG)) 17220 if (!Subtarget.hasSSE41()) 17263 if (Subtarget.hasAVX2()) 17380 return ExtractBitFromMaskVector(Op, DAG, Subtarget); 17443 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op))) 17454 if (Subtarget.hasSSE41()) 17552 return InsertBitToMaskVector(Op, DAG, Subtarget); 17570 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() && 17575 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl) 17589 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) || 17590 (Subtarget.hasAVX2() && EltVT == MVT::i32)) { 17619 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG); 17624 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) { 17627 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW"); 17631 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB"); 17642 if (Subtarget.hasSSE41()) { 17769 if (Subtarget.isPICStyleRIPRel() && 17792 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr); 17814 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr); 17839 Subtarget.classifyBlockAddressReference(); 17877 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod); 17879 OpFlags = Subtarget.classifyGlobalReference(GV, Mod); 18093 if (Subtarget.isTargetELF()) { 18097 if (Subtarget.is64Bit()) 18102 Subtarget.is64Bit()); 18105 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(), 18111 if (Subtarget.isTargetDarwin()) { 18114 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ? 18119 bool PIC32 = PositionIndependent && !Subtarget.is64Bit(); 18153 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; 18157 if (Subtarget.isOSWindows()) { 18175 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit() 18181 SDValue TlsArray = Subtarget.is64Bit() 18183 : (Subtarget.isTargetWindowsGNU() 18196 if (Subtarget.is64Bit()) 18428 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget)) 18447 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit()) 18450 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget)) 18455 !Subtarget.is64Bit()) 18803 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget); 18805 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget)) 18808 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) && 18809 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) { 18816 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) { 18821 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget)) 18825 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget); 18827 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget); 18828 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) 18845 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) 19380 assert(Subtarget.hasVLX() && "Unexpected subtarget!"); 19400 return LowerTruncateVecI1(Op, DAG, Subtarget); 19403 if (Subtarget.hasAVX512()) { 19408 if (InVT != MVT::v16i16 || Subtarget.hasBWI() || 19409 Subtarget.canExtendTo512DQ()) 19414 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8; 19422 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget)) 19429 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget)) 19437 if (Subtarget.hasInt256()) { 19457 if (Subtarget.hasInt256()) { 19537 if (!IsSigned && !Subtarget.hasVLX()) { 19552 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!"); 19552 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!"); 19568 if (Subtarget.hasAVX512()) 19578 if (Subtarget.is64Bit()) { 19585 if (!Subtarget.hasSSE3()) 19735 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget); 20137 return EmitTest(Op0, X86CC, dl, DAG, Subtarget); 20149 if (CmpVT == MVT::i16 && !Subtarget.isAtom() && 20192 if (Subtarget.hasCMov() || 20210 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?"); 20223 return Subtarget.hasFastVectorFSQRT(); 20224 return Subtarget.hasFastScalarFSQRT(); 20244 if ((VT == MVT::f32 && Subtarget.hasSSE1()) || 20245 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) || 20246 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) || 20247 (VT == MVT::v8f32 && Subtarget.hasAVX()) || 20248 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) { 20274 if ((VT == MVT::f32 && Subtarget.hasSSE1()) || 20275 (VT == MVT::v4f32 && Subtarget.hasSSE1()) || 20276 (VT == MVT::v8f32 && Subtarget.hasAVX()) || 20277 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) { 20317 if (!Subtarget.hasCMov()) 20324 !(Subtarget.is64Bit() && VT == MVT::i64)) 21018 if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC)) 21023 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC)) 21061 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG); 21214 ((Subtarget.hasSSE2() && VT == MVT::f64) || 21215 (Subtarget.hasSSE1() && VT == MVT::f32)) && 21221 if (Subtarget.hasAVX512()) { 21229 if (SSECC < 8 || Subtarget.hasAVX()) { 21245 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) && 21269 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) { 21275 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) { 21276 assert(Subtarget.hasBWI() && "Expected BWI to be legal"); 21363 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E && 21502 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) || 22196 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) || 22196 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) || 22211 bool Is64Bit = Subtarget.is64Bit(); 22223 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); 22254 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 22283 if (!Subtarget.is64Bit() || 22284 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) { 22323 Subtarget.isTarget64BitLP64() ? 8 : 4, DL)); 22327 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12)); 22333 assert(Subtarget.is64Bit() && 22338 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) 22368 assert(!Subtarget.useSoftFloat() && 22370 Subtarget.hasSSE1()); 22889 Mask, PassThru, Subtarget, DAG); 22894 Mask, PassThru, Subtarget, DAG); 22911 Mask, PassThru, Subtarget, DAG); 22931 Mask, passThru, Subtarget, DAG); 22937 Mask, passThru, Subtarget, DAG); 22953 Mask, passThru, Subtarget, DAG); 22972 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG); 22989 Mask, passThru, Subtarget, DAG); 23008 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG); 23026 Mask, PassThru, Subtarget, DAG); 23044 Mask, PassThru, Subtarget, DAG); 23062 Mask, PassThru, Subtarget, DAG); 23093 Subtarget, DAG); 23139 Subtarget, DAG); 23210 Op.getOperand(1), Op.getOperand(2), Subtarget, 23235 : getZeroVector(VT, Subtarget, DAG, dl); 23249 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG); 23251 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG); 23317 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); 23332 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); 23578 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 24193 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 24216 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 24254 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); 24269 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 24285 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 24292 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX; 24294 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX; 24301 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX; 24305 return Subtarget.isTargetWin64(); 24315 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 24343 if (!Subtarget.is64Bit()) { 24344 const X86InstrInfo *TII = Subtarget.getInstrInfo(); 24379 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 24381 if (Subtarget.is64Bit()) { 24540 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); 25384 assert(Subtarget.isTargetWin64() && "Unexpected target"); 26440 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit(); 26440 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit(); 26442 return Subtarget.hasCmpxchg16b(); 26454 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() && 26455 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2()) 26455 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2()) 26472 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() && 26473 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && 26474 (Subtarget.hasSSE2() || Subtarget.hasX87())) 26474 (Subtarget.hasSSE2() || Subtarget.hasX87())) 26483 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32; 26524 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32; 26568 if (!Subtarget.hasMFence()) 27663 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); 27665 return LowerCMP_SWAP(Op, Subtarget, DAG); 27666 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG); 27671 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget); 27672 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget); 27673 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG); 27675 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG); 27676 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG); 27680 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); 27681 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); 27682 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG); 27692 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG); 27696 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG); 27697 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG); 27698 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG); 27701 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG); 27707 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG); 27708 case ISD::STORE: return LowerStore(Op, Subtarget, DAG); 27724 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); 27727 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG); 27743 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG); 27745 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG); 27746 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); 27748 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG); 27750 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG); 27753 case ISD::SHL: return LowerShift(Op, Subtarget, DAG); 27760 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); 27761 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG); 27765 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget); 27769 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget); 27774 case ISD::ABS: return LowerABS(Op, Subtarget, DAG); 27775 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG); 27776 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG); 27777 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG); 27778 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG); 27779 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG); 27870 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); 28005 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) { 28007 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) { 28019 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 && 28049 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 && 28157 assert((IsSigned || Subtarget.hasAVX512()) && 28159 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); 28163 if (!IsSigned && !Subtarget.hasVLX()) { 28183 if (Subtarget.hasDQI() && VT == MVT::i64 && 28185 assert(!Subtarget.is64Bit() && "i64 should be legal"); 28186 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8; 28208 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!"); 28208 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!"); 28216 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); 28222 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) { 28222 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) { 28259 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, 28262 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget, 28265 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget, 28269 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget, 28275 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results); 28281 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) && 28309 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 28366 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) { 28368 if (Subtarget.hasSSE2()) { 28381 if (Subtarget.hasX87()) { 28437 assert(Subtarget.hasSSE2() && "Requires at least SSE2!"); 28443 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) { 28444 assert(!Subtarget.is64Bit() && "Expected 32-bit mode"); 28481 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) { 28481 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) { 28494 if (!Subtarget.hasVLX()) { 28523 if (Subtarget.hasSSE2()) { 28524 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64; 28537 assert(Subtarget.hasSSE1() && "Expected SSE"); 28913 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV); 28926 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1)) 28962 if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 && 28968 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64)) 28972 if (Subtarget.hasBWI() && Bits == 16) 29057 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit(); 29062 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit(); 29104 if (!Subtarget.hasAnyFMA()) 29153 if (!Subtarget.hasAVX2()) 29163 if (Subtarget.useRetpolineIndirectBranches()) 29293 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 29544 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 29551 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) { 29564 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; 29718 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 29810 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 29873 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 29965 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 30016 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 30022 const bool Is64Bit = Subtarget.is64Bit(); 30023 const bool IsLP64 = Subtarget.isTarget64BitLP64(); 30058 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP; 30090 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C); 30150 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 30159 if (!Subtarget.is32Bit()) 30185 if (IsSEH && Subtarget.is32Bit()) { 30186 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 30202 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 30231 const X86InstrInfo *TII = Subtarget.getInstrInfo(); 30234 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?"); 30241 Subtarget.is64Bit() ? 30242 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() : 30243 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C); 30244 if (Subtarget.is64Bit()) { 30363 const X86InstrInfo *TII = Subtarget.getInstrInfo(); 30374 if (Subtarget.is64Bit()) 30399 const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg); 30426 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 30469 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 30470 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 30542 if (Subtarget.is64Bit()) { 30555 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference()) 30582 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 30601 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); 30601 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); 30629 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 30805 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 30820 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 30890 const X86InstrInfo *TII = Subtarget.getInstrInfo(); 30909 if (Subtarget.is64Bit()) 30921 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference()) 30926 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36); 30939 const X86InstrInfo *TII = Subtarget.getInstrInfo(); 31016 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); 31016 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); 31034 Subtarget.is64Bit() ? 8 : 4); 31040 if (Subtarget.is64Bit()) { 31165 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 31318 return emitXBegin(MI, BB, Subtarget.getInstrInfo()); 31353 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 31365 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF)) 34498 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG, 34676 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op))); 35029 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op)); 41614 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) || 41654 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) || 44901 return combineExtractVectorElt(N, DAG, DCI, Subtarget); 44903 return combineConcatVectors(N, DAG, DCI, Subtarget); 44905 return combineInsertSubvector(N, DAG, DCI, Subtarget); 44907 return combineExtractSubvector(N, DAG, DCI, Subtarget); 44910 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget); 44911 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget); 44912 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget); 44914 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget); 44915 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget); 44920 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget); 44924 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget); 44925 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget); 44926 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget); 44927 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget); 44928 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget); 44929 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget); 44930 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget); 44931 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget); 44932 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, DCI, Subtarget); 44933 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget); 44935 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget); 44936 case ISD::FNEG: return combineFneg(N, DAG, Subtarget); 44937 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget); 44939 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget); 44940 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget); 44941 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget); 44943 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget); 44947 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget); 44956 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget); 44957 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget); 44958 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget); 44962 Subtarget); 44963 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget); 44964 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget); 44965 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget); 44967 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget); 44971 return combineVectorShiftVar(N, DAG, DCI, Subtarget); 44975 return combineVectorShiftImm(N, DAG, DCI, Subtarget); 44977 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget); 45010 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget); 45018 case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget); 45023 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget); 45029 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget); 45031 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget); 45189 Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && 45194 if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2()) 45456 if (type->isX86_MMXTy() && Subtarget.hasMMX()) 45471 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) 45476 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512()) 45481 if (type->isX86_MMXTy() && Subtarget.hasMMX()) 45488 if (!Subtarget.hasSSE2()) 45496 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()) 45500 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) || 45501 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX())) 45506 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512()) 45576 if (Subtarget.hasSSE2()) 45578 if (Subtarget.hasSSE1()) 45655 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) { 45733 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) 45733 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) 45742 Subtarget.classifyGlobalReference(GA->getGlobal()))) 45799 if (Subtarget.is64Bit()) 45801 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) && 45801 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) && 45809 if (Subtarget.hasAVX512()) { 45817 if (Subtarget.hasBWI()) { 45825 if (Subtarget.is64Bit()) { 45854 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit()) 45862 if (VT == MVT::i32 || !Subtarget.is64Bit()) 45874 if (!Subtarget.hasMMX()) break; 45877 if (!Subtarget.hasSSE2()) break; 45881 if (!Subtarget.hasSSE1()) break; 45889 if (VConstraint && Subtarget.hasVLX()) 45894 if (VConstraint && Subtarget.hasVLX()) 45906 if (VConstraint && Subtarget.hasVLX()) 45916 if (VConstraint && Subtarget.hasVLX()) 45918 if (Subtarget.hasAVX()) 45925 if (!Subtarget.hasAVX512()) break; 45941 if (!Subtarget.hasMMX()) break; 45945 if (!Subtarget.hasSSE1()) break; 45949 if (Subtarget.hasAVX512()) { 45957 if (Subtarget.hasBWI()) { 46011 if (!Subtarget.is64Bit() && 46019 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) && 46045 bool is64Bit = Subtarget.is64Bit(); 46165 if (!Subtarget.is64Bit()) 46177 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); 46182 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 46214 return Subtarget.is64Bit(); 46227 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() || 46227 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() || 46233 if (Subtarget.is64Bit()) 46234 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk"; 46235 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";lib/Target/X86/X86InterleavedAccess.cpp
817 X86InterleavedAccessGroup Grp(LI, Shuffles, Indices, Factor, Subtarget, 843 X86InterleavedAccessGroup Grp(SI, Shuffles, Indices, Factor, Subtarget,