reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

Declarations

include/llvm/IR/Instruction.def
  176 HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst )
lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
   46 class AtomicCmpXchgInst;

References

include/llvm/Analysis/AliasAnalysis.h
  564   ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
  568   ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
  711   ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
include/llvm/Analysis/MemoryLocation.h
  202   static MemoryLocation get(const AtomicCmpXchgInst *CXI);
  216       return get(cast<AtomicCmpXchgInst>(Inst));
include/llvm/CodeGen/TargetLowering.h
 1763       IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
 1852   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
include/llvm/IR/IRBuilder.h
  845   InstTy *Insert(InstTy *I, const Twine &Name = "") const {
  845   InstTy *Insert(InstTy *I, const Twine &Name = "") const {
 1677   AtomicCmpXchgInst *
 1682     return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
include/llvm/IR/InstVisitor.h
  173   RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
include/llvm/IR/Instructions.h
  545   AtomicCmpXchgInst *cloneImpl() const;
  686     public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
include/llvm/IR/OperandTraits.h
   31   static Use *op_begin(SubClass* U) {
   33         !std::is_polymorphic<SubClass>::value,
   37   static Use *op_end(SubClass* U) {
include/llvm/IR/User.h
  127   template <int Idx, typename U> static Use &OpFrom(const U *that) {
  129       ? OperandTraits<U>::op_end(const_cast<U*>(that))[Idx]
  130       : OperandTraits<U>::op_begin(const_cast<U*>(that))[Idx];
include/llvm/Support/Casting.h
   58     return To::classof(&Val);
   77     return isa_impl<To, From>::doit(Val);
   92     return isa_impl<To, From>::doit(*Val);
  106     return isa_impl<To, From>::doit(*Val);
  122     return isa_impl_wrap<To, SimpleFrom,
  132     return isa_impl_cl<To,FromTy>::doit(Val);
  142   return isa_impl_wrap<X, const Y,
  168   using ret_type = const To &; // Normal case, return Ty&
  172   using ret_type = To *;       // Pointer arg case, return Ty*
  176   using ret_type = const To *; // Constant pointer arg case, return const Ty*
  198   using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
  204   using ret_type = typename cast_retty_impl<To,FromTy>::ret_type;
  210       To, From, typename simplify_type<From>::SimpleType>::ret_type;
  218   static typename cast_retty<To, From>::ret_type doit(From &Val) {
  219     return cast_convert_val<To, SimpleFrom,
  227   static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
  228     typename cast_retty<To, FromTy>::ret_type Res2
  248                                typename cast_retty<X, const Y>::ret_type>::type
  256 inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
  258   return cast_convert_val<X, Y,
  263 inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
  265   return cast_convert_val<X, Y*,
  331                             typename cast_retty<X, const Y>::ret_type>::type
  337 LLVM_NODISCARD inline typename cast_retty<X, Y>::ret_type dyn_cast(Y &Val) {
  338   return isa<X>(Val) ? cast<X>(Val) : nullptr;
  338   return isa<X>(Val) ? cast<X>(Val) : nullptr;
  342 LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type dyn_cast(Y *Val) {
  343   return isa<X>(Val) ? cast<X>(Val) : nullptr;
  343   return isa<X>(Val) ? cast<X>(Val) : nullptr;
  366 LLVM_NODISCARD inline typename cast_retty<X, Y *>::ret_type
  368   return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
  368   return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
lib/Analysis/AliasAnalysis.cpp
  575 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
  581 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
lib/Analysis/CFLGraph.h
  299     void visitAtomicCmpXchgInst(AtomicCmpXchgInst &Inst) {
lib/Analysis/CaptureTracking.cpp
  334       auto *ACXI = cast<AtomicCmpXchgInst>(I);
  334       auto *ACXI = cast<AtomicCmpXchgInst>(I);
lib/Analysis/MemoryDependenceAnalysis.cpp
  322   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
  322   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
lib/Analysis/MemoryLocation.cpp
   63 MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) {
lib/Analysis/ValueTracking.cpp
 4362       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
lib/AsmParser/LLParser.cpp
 7069   AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
 7069   AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
lib/Bitcode/Reader/BitcodeReader.cpp
 4927             AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering);
 4931       I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
 4934       cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
 4944         cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]);
lib/Bitcode/Writer/BitcodeWriter.cpp
 2964     Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
 2966         getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
 2968         getEncodedSyncScopeID(cast<AtomicCmpXchgInst>(I).getSyncScopeID()));
 2970         getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
 2971     Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak());
lib/CodeGen/AtomicExpandPass.cpp
   91     void expandPartwordCmpXchg(AtomicCmpXchgInst *I);
   93     void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
   95     AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
   95     AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
  101     bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);
  103     bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
  115     void expandAtomicCASToLibcall(AtomicCmpXchgInst *I);
  149 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
  181 static unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) {
  191 static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
  222     auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
  442       AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
  515       AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
  809 void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
  891   AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
  973 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
 1053 AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
 1053 AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
 1069   auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
 1091 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
 1407 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
 1494 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
 1591       AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
 1593           AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
lib/CodeGen/CodeGenPrepare.cpp
 4463     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
 4463     if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
 4465       if (opNo != AtomicCmpXchgInst::getPointerOperandIndex())
 7004   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
 7004   if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
lib/CodeGen/GlobalISel/IRTranslator.cpp
  252   } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
  252   } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
 1942   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
 1942   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
 4550 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
  727   void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
lib/CodeGen/StackProtector.cpp
  170       if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
lib/IR/AsmWriter.cpp
 3694   if (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isWeak())
 3694   if (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isWeak())
 3700       (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
 3700       (isa<AtomicCmpXchgInst>(I) && cast<AtomicCmpXchgInst>(I).isVolatile()) ||
 4067   } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
 4067   } else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
lib/IR/Core.cpp
 3651   return cast<AtomicCmpXchgInst>(P)->isVolatile();
 3662   return cast<AtomicCmpXchgInst>(P)->setVolatile(isVolatile);
 3666   return unwrap<AtomicCmpXchgInst>(CmpXchgInst)->isWeak();
 3670   return unwrap<AtomicCmpXchgInst>(CmpXchgInst)->setWeak(isWeak);
 3944   return cast<AtomicCmpXchgInst>(P)->getSyncScopeID() ==
 3954   return cast<AtomicCmpXchgInst>(P)->setSyncScopeID(SSID);
 3959   return mapToLLVMOrdering(cast<AtomicCmpXchgInst>(P)->getSuccessOrdering());
 3967   return cast<AtomicCmpXchgInst>(P)->setSuccessOrdering(O);
 3972   return mapToLLVMOrdering(cast<AtomicCmpXchgInst>(P)->getFailureOrdering());
 3980   return cast<AtomicCmpXchgInst>(P)->setFailureOrdering(O);
lib/IR/Instruction.cpp
  422   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
  422   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
  423     return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
  424            CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
  426                cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
  428                cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
  430                cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
lib/IR/Instructions.cpp
 1475           AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
 1476           OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
 1487           AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
 1488           OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
 4150 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
 4151   AtomicCmpXchgInst *Result =
 4152     new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
lib/IR/Verifier.cpp
  480   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
 3483 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
lib/Target/AArch64/AArch64FastISel.cpp
  179   bool selectAtomicCmpXchg(const AtomicCmpXchgInst *I);
 5082 bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) {
 5223     return selectAtomicCmpXchg(cast<AtomicCmpXchgInst>(I));
lib/Target/AArch64/AArch64ISelLowering.cpp
12155     AtomicCmpXchgInst *AI) const {
lib/Target/AArch64/AArch64ISelLowering.h
  431   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
  131   if (auto AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
  559     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
  559     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
  565   if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
lib/Target/ARM/ARMISelLowering.cpp
16512 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
lib/Target/ARM/ARMISelLowering.h
  567     shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
lib/Target/Hexagon/HexagonISelLowering.cpp
 3300     AtomicCmpXchgInst *AI) const {
lib/Target/Hexagon/HexagonISelLowering.h
  326     shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
lib/Target/RISCV/RISCVISelLowering.cpp
 2849     AtomicCmpXchgInst *CI) const {
 2857     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
lib/Target/RISCV/RISCVISelLowering.h
  207   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
  209   emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, AtomicCmpXchgInst *CI,
lib/Target/SystemZ/SystemZISelLowering.cpp
 3854   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
lib/Target/X86/X86ISelLowering.cpp
26545   auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
lib/Target/XCore/XCoreISelLowering.cpp
 1012   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
lib/Transforms/IPO/Attributor.cpp
  294   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I))
 1313     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
 1314     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
 1373     return cast<AtomicCmpXchgInst>(I)->isVolatile();
lib/Transforms/InstCombine/InstCombineCompares.cpp
 5576       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
 5576       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
lib/Transforms/InstCombine/InstCombineSelect.cpp
 1939     return dyn_cast<AtomicCmpXchgInst>(Extract->getAggregateOperand());
 1952   auto *CmpXchg = isExtractFromCmpXchg(SI.getCondition(), 1);
 1959   if (auto *X = isExtractFromCmpXchg(SI.getTrueValue(), 0))
 1968   if (auto *X = isExtractFromCmpXchg(SI.getFalseValue(), 0))
lib/Transforms/Instrumentation/AddressSanitizer.cpp
 1375   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
 1375   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
lib/Transforms/Instrumentation/BoundsChecking.cpp
  161     } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
  161     } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
  538   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
  538   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
  571   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
  571   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
lib/Transforms/Instrumentation/MemorySanitizer.cpp
 1838     if (isa<AtomicCmpXchgInst>(I))
 1852   void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
lib/Transforms/Instrumentation/ThreadSanitizer.cpp
  421   if (isa<AtomicCmpXchgInst>(I))
  676   } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
  676   } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
lib/Transforms/Scalar/InferAddressSpaces.cpp
  366     else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
  366     else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
  776   if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
  776   if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
  777     return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
lib/Transforms/Scalar/LoopStrengthReduce.cpp
  829   } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
  829   } else if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
  847   } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
  847   } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
lib/Transforms/Scalar/LowerAtomic.cpp
   23 static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
  123     else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst))
  123     else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst))
lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
  571   if (isa<AtomicCmpXchgInst>(I))
lib/Transforms/Utils/FunctionComparator.cpp
  615   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
  615   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
  617                              cast<AtomicCmpXchgInst>(R)->isVolatile()))
  620                              cast<AtomicCmpXchgInst>(R)->isWeak()))
  624                          cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
  628                          cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
  631                       cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
lib/Transforms/Utils/InlineFunction.cpp
  991       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
  991       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
lib/Transforms/Utils/SimplifyCFG.cpp
 4188       } else if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(BBI)) {
 4188       } else if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(BBI)) {
tools/clang/lib/CodeGen/CGAtomic.cpp
  368   llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
  438           llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
 1607   auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
 1653     Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
 1739   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
 1768   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
 1825   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
 1850   auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
tools/clang/lib/CodeGen/CGBuiltin.cpp
  288   auto *Result = CGF.Builder.CreateAtomicCmpXchg(
12285     AtomicCmpXchgInst *CXI =
unittests/Analysis/AliasAnalysisTest.cpp
  181   auto *CmpXChg1 = new AtomicCmpXchgInst(
  181   auto *CmpXChg1 = new AtomicCmpXchgInst(