1//===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the function verifier interface, that can be used for some
10// sanity checking of input to the system.
11//
12// Note that this does not provide full `Java style' security and verifications,
13// instead it just tries to ensure that code is well-formed.
14//
15//  * Both of a binary operator's parameters are of the same type
16//  * Verify that the indices of mem access instructions match other operands
17//  * Verify that arithmetic and other things are only performed on first-class
18//    types.  Verify that shifts & logicals only happen on integrals f.e.
19//  * All of the constants in a switch statement are of the correct type
20//  * The code is in valid SSA form
21//  * It should be illegal to put a label into any other type (like a structure)
22//    or to return one. [except constant arrays!]
23//  * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24//  * PHI nodes must have an entry for each predecessor, with no extras.
25//  * PHI nodes must be the first thing in a basic block, all grouped together
26//  * PHI nodes must have at least one entry
27//  * All basic blocks should only end with terminator insts, not contain them
28//  * The entry node to a function must not have predecessors
29//  * All Instructions must be embedded into a basic block
30//  * Functions cannot take a void-typed parameter
31//  * Verify that a function's argument list agrees with it's declared type.
32//  * It is illegal to specify a name for a void value.
33//  * It is illegal to have a internal global value with no initializer
34//  * It is illegal to have a ret instruction that returns a value that does not
35//    agree with the function return value type.
36//  * Function call argument types match the function prototype
37//  * A landing pad is defined by a landingpad instruction, and can be jumped to
38//    only by the unwind edge of an invoke instruction.
39//  * A landingpad instruction must be the first non-PHI instruction in the
40//    block.
41//  * Landingpad instructions must be in a function with a personality function.
42//  * All other things that are tested by asserts spread about the code...
43//
44//===----------------------------------------------------------------------===//
45
46#include "llvm/IR/Verifier.h"
47#include "llvm/ADT/APFloat.h"
48#include "llvm/ADT/APInt.h"
49#include "llvm/ADT/ArrayRef.h"
50#include "llvm/ADT/DenseMap.h"
51#include "llvm/ADT/MapVector.h"
52#include "llvm/ADT/Optional.h"
53#include "llvm/ADT/STLExtras.h"
54#include "llvm/ADT/SmallPtrSet.h"
55#include "llvm/ADT/SmallSet.h"
56#include "llvm/ADT/SmallVector.h"
57#include "llvm/ADT/StringExtras.h"
58#include "llvm/ADT/StringMap.h"
59#include "llvm/ADT/StringRef.h"
60#include "llvm/ADT/Twine.h"
61#include "llvm/ADT/ilist.h"
62#include "llvm/BinaryFormat/Dwarf.h"
63#include "llvm/IR/Argument.h"
64#include "llvm/IR/Attributes.h"
65#include "llvm/IR/BasicBlock.h"
66#include "llvm/IR/CFG.h"
67#include "llvm/IR/CallingConv.h"
68#include "llvm/IR/Comdat.h"
69#include "llvm/IR/Constant.h"
70#include "llvm/IR/ConstantRange.h"
71#include "llvm/IR/Constants.h"
72#include "llvm/IR/DataLayout.h"
73#include "llvm/IR/DebugInfo.h"
74#include "llvm/IR/DebugInfoMetadata.h"
75#include "llvm/IR/DebugLoc.h"
76#include "llvm/IR/DerivedTypes.h"
77#include "llvm/IR/Dominators.h"
78#include "llvm/IR/Function.h"
79#include "llvm/IR/GlobalAlias.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/GlobalVariable.h"
82#include "llvm/IR/InlineAsm.h"
83#include "llvm/IR/InstVisitor.h"
84#include "llvm/IR/InstrTypes.h"
85#include "llvm/IR/Instruction.h"
86#include "llvm/IR/Instructions.h"
87#include "llvm/IR/IntrinsicInst.h"
88#include "llvm/IR/Intrinsics.h"
89#include "llvm/IR/IntrinsicsWebAssembly.h"
90#include "llvm/IR/LLVMContext.h"
91#include "llvm/IR/Metadata.h"
92#include "llvm/IR/Module.h"
93#include "llvm/IR/ModuleSlotTracker.h"
94#include "llvm/IR/PassManager.h"
95#include "llvm/IR/Statepoint.h"
96#include "llvm/IR/Type.h"
97#include "llvm/IR/Use.h"
98#include "llvm/IR/User.h"
99#include "llvm/IR/Value.h"
100#include "llvm/InitializePasses.h"
101#include "llvm/Pass.h"
102#include "llvm/Support/AtomicOrdering.h"
103#include "llvm/Support/Casting.h"
104#include "llvm/Support/CommandLine.h"
105#include "llvm/Support/Debug.h"
106#include "llvm/Support/ErrorHandling.h"
107#include "llvm/Support/MathExtras.h"
108#include "llvm/Support/raw_ostream.h"
109#include <algorithm>
110#include <cassert>
111#include <cstdint>
112#include <memory>
113#include <string>
114#include <utility>
115
116using namespace llvm;
117
118namespace llvm {
119
120struct VerifierSupport {
121  raw_ostream *OS;
122  const Module &M;
123  ModuleSlotTracker MST;
124  Triple TT;
125  const DataLayout &DL;
126  LLVMContext &Context;
127
128  /// Track the brokenness of the module while recursively visiting.
129  bool Broken = false;
130  /// Broken debug info can be "recovered" from by stripping the debug info.
131  bool BrokenDebugInfo = false;
132  /// Whether to treat broken debug info as an error.
133  bool TreatBrokenDebugInfoAsError = true;
134
135  explicit VerifierSupport(raw_ostream *OS, const Module &M)
136      : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
137        Context(M.getContext()) {}
138
139private:
140  void Write(const Module *M) {
141    *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
142  }
143
144  void Write(const Value *V) {
145    if (V)
146      Write(*V);
147  }
148
149  void Write(const Value &V) {
150    if (isa<Instruction>(V)) {
151      V.print(*OS, MST);
152      *OS << '\n';
153    } else {
154      V.printAsOperand(*OS, true, MST);
155      *OS << '\n';
156    }
157  }
158
159  void Write(const Metadata *MD) {
160    if (!MD)
161      return;
162    MD->print(*OS, MST, &M);
163    *OS << '\n';
164  }
165
166  template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
167    Write(MD.get());
168  }
169
170  void Write(const NamedMDNode *NMD) {
171    if (!NMD)
172      return;
173    NMD->print(*OS, MST);
174    *OS << '\n';
175  }
176
177  void Write(Type *T) {
178    if (!T)
179      return;
180    *OS << ' ' << *T;
181  }
182
183  void Write(const Comdat *C) {
184    if (!C)
185      return;
186    *OS << *C;
187  }
188
189  void Write(const APInt *AI) {
190    if (!AI)
191      return;
192    *OS << *AI << '\n';
193  }
194
195  void Write(const unsigned i) { *OS << i << '\n'; }
196
197  template <typename T> void Write(ArrayRef<T> Vs) {
198    for (const T &V : Vs)
199      Write(V);
200  }
201
202  template <typename T1, typename... Ts>
203  void WriteTs(const T1 &V1, const Ts &... Vs) {
204    Write(V1);
205    WriteTs(Vs...);
206  }
207
208  template <typename... Ts> void WriteTs() {}
209
210public:
211  /// A check failed, so printout out the condition and the message.
212  ///
213  /// This provides a nice place to put a breakpoint if you want to see why
214  /// something is not correct.
215  void CheckFailed(const Twine &Message) {
216    if (OS)
217      *OS << Message << '\n';
218    Broken = true;
219  }
220
221  /// A check failed (with values to print).
222  ///
223  /// This calls the Message-only version so that the above is easier to set a
224  /// breakpoint on.
225  template <typename T1, typename... Ts>
226  void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
227    CheckFailed(Message);
228    if (OS)
229      WriteTs(V1, Vs...);
230  }
231
232  /// A debug info check failed.
233  void DebugInfoCheckFailed(const Twine &Message) {
234    if (OS)
235      *OS << Message << '\n';
236    Broken |= TreatBrokenDebugInfoAsError;
237    BrokenDebugInfo = true;
238  }
239
240  /// A debug info check failed (with values to print).
241  template <typename T1, typename... Ts>
242  void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
243                            const Ts &... Vs) {
244    DebugInfoCheckFailed(Message);
245    if (OS)
246      WriteTs(V1, Vs...);
247  }
248};
249
250} // namespace llvm
251
252namespace {
253
254class Verifier : public InstVisitor<Verifier>, VerifierSupport {
255  friend class InstVisitor<Verifier>;
256
257  DominatorTree DT;
258
259  /// When verifying a basic block, keep track of all of the
260  /// instructions we have seen so far.
261  ///
262  /// This allows us to do efficient dominance checks for the case when an
263  /// instruction has an operand that is an instruction in the same block.
264  SmallPtrSet<Instruction *, 16> InstsInThisBlock;
265
266  /// Keep track of the metadata nodes that have been checked already.
267  SmallPtrSet<const Metadata *, 32> MDNodes;
268
269  /// Keep track which DISubprogram is attached to which function.
270  DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
271
272  /// Track all DICompileUnits visited.
273  SmallPtrSet<const Metadata *, 2> CUVisited;
274
275  /// The result type for a landingpad.
276  Type *LandingPadResultTy;
277
278  /// Whether we've seen a call to @llvm.localescape in this function
279  /// already.
280  bool SawFrameEscape;
281
282  /// Whether the current function has a DISubprogram attached to it.
283  bool HasDebugInfo = false;
284
285  /// Whether source was present on the first DIFile encountered in each CU.
286  DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
287
288  /// Stores the count of how many objects were passed to llvm.localescape for a
289  /// given function and the largest index passed to llvm.localrecover.
290  DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
291
292  // Maps catchswitches and cleanuppads that unwind to siblings to the
293  // terminators that indicate the unwind, used to detect cycles therein.
294  MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
295
296  /// Cache of constants visited in search of ConstantExprs.
297  SmallPtrSet<const Constant *, 32> ConstantExprVisited;
298
299  /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
300  SmallVector<const Function *, 4> DeoptimizeDeclarations;
301
302  // Verify that this GlobalValue is only used in this module.
303  // This map is used to avoid visiting uses twice. We can arrive at a user
304  // twice, if they have multiple operands. In particular for very large
305  // constant expressions, we can arrive at a particular user many times.
306  SmallPtrSet<const Value *, 32> GlobalValueVisited;
307
308  // Keeps track of duplicate function argument debug info.
309  SmallVector<const DILocalVariable *, 16> DebugFnArgs;
310
311  TBAAVerifier TBAAVerifyHelper;
312
313  void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
314
315public:
316  explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
317                    const Module &M)
318      : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
319        SawFrameEscape(false), TBAAVerifyHelper(this) {
320    TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
321  }
322
323  bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
324
325  bool verify(const Function &F) {
326    assert(F.getParent() == &M &&
327           "An instance of this class only works with a specific module!");
328
329    // First ensure the function is well-enough formed to compute dominance
330    // information, and directly compute a dominance tree. We don't rely on the
331    // pass manager to provide this as it isolates us from a potentially
332    // out-of-date dominator tree and makes it significantly more complex to run
333    // this code outside of a pass manager.
334    // FIXME: It's really gross that we have to cast away constness here.
335    if (!F.empty())
336      DT.recalculate(const_cast<Function &>(F));
337
338    for (const BasicBlock &BB : F) {
339      if (!BB.empty() && BB.back().isTerminator())
340        continue;
341
342      if (OS) {
343        *OS << "Basic Block in function '" << F.getName()
344            << "' does not have terminator!\n";
345        BB.printAsOperand(*OS, true, MST);
346        *OS << "\n";
347      }
348      return false;
349    }
350
351    Broken = false;
352    // FIXME: We strip const here because the inst visitor strips const.
353    visit(const_cast<Function &>(F));
354    verifySiblingFuncletUnwinds();
355    InstsInThisBlock.clear();
356    DebugFnArgs.clear();
357    LandingPadResultTy = nullptr;
358    SawFrameEscape = false;
359    SiblingFuncletInfo.clear();
360
361    return !Broken;
362  }
363
364  /// Verify the module that this instance of \c Verifier was initialized with.
365  bool verify() {
366    Broken = false;
367
368    // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
369    for (const Function &F : M)
370      if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
371        DeoptimizeDeclarations.push_back(&F);
372
373    // Now that we've visited every function, verify that we never asked to
374    // recover a frame index that wasn't escaped.
375    verifyFrameRecoverIndices();
376    for (const GlobalVariable &GV : M.globals())
377      visitGlobalVariable(GV);
378
379    for (const GlobalAlias &GA : M.aliases())
380      visitGlobalAlias(GA);
381
382    for (const NamedMDNode &NMD : M.named_metadata())
383      visitNamedMDNode(NMD);
384
385    for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
386      visitComdat(SMEC.getValue());
387
388    visitModuleFlags(M);
389    visitModuleIdents(M);
390    visitModuleCommandLines(M);
391
392    verifyCompileUnits();
393
394    verifyDeoptimizeCallingConvs();
395    DISubprogramAttachments.clear();
396    return !Broken;
397  }
398
399private:
400  /// Whether a metadata node is allowed to be, or contain, a DILocation.
401  enum class AreDebugLocsAllowed { No, Yes };
402
403  // Verification methods...
404  void visitGlobalValue(const GlobalValue &GV);
405  void visitGlobalVariable(const GlobalVariable &GV);
406  void visitGlobalAlias(const GlobalAlias &GA);
407  void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
408  void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
409                           const GlobalAlias &A, const Constant &C);
410  void visitNamedMDNode(const NamedMDNode &NMD);
411  void visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs);
412  void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
413  void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
414  void visitComdat(const Comdat &C);
415  void visitModuleIdents(const Module &M);
416  void visitModuleCommandLines(const Module &M);
417  void visitModuleFlags(const Module &M);
418  void visitModuleFlag(const MDNode *Op,
419                       DenseMap<const MDString *, const MDNode *> &SeenIDs,
420                       SmallVectorImpl<const MDNode *> &Requirements);
421  void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
422  void visitFunction(const Function &F);
423  void visitBasicBlock(BasicBlock &BB);
424  void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
425  void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
426  void visitProfMetadata(Instruction &I, MDNode *MD);
427
428  template <class Ty> bool isValidMetadataArray(const MDTuple &N);
429#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
430#include "llvm/IR/Metadata.def"
431  void visitDIScope(const DIScope &N);
432  void visitDIVariable(const DIVariable &N);
433  void visitDILexicalBlockBase(const DILexicalBlockBase &N);
434  void visitDITemplateParameter(const DITemplateParameter &N);
435
436  void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
437
438  // InstVisitor overrides...
439  using InstVisitor<Verifier>::visit;
440  void visit(Instruction &I);
441
442  void visitTruncInst(TruncInst &I);
443  void visitZExtInst(ZExtInst &I);
444  void visitSExtInst(SExtInst &I);
445  void visitFPTruncInst(FPTruncInst &I);
446  void visitFPExtInst(FPExtInst &I);
447  void visitFPToUIInst(FPToUIInst &I);
448  void visitFPToSIInst(FPToSIInst &I);
449  void visitUIToFPInst(UIToFPInst &I);
450  void visitSIToFPInst(SIToFPInst &I);
451  void visitIntToPtrInst(IntToPtrInst &I);
452  void visitPtrToIntInst(PtrToIntInst &I);
453  void visitBitCastInst(BitCastInst &I);
454  void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
455  void visitPHINode(PHINode &PN);
456  void visitCallBase(CallBase &Call);
457  void visitUnaryOperator(UnaryOperator &U);
458  void visitBinaryOperator(BinaryOperator &B);
459  void visitICmpInst(ICmpInst &IC);
460  void visitFCmpInst(FCmpInst &FC);
461  void visitExtractElementInst(ExtractElementInst &EI);
462  void visitInsertElementInst(InsertElementInst &EI);
463  void visitShuffleVectorInst(ShuffleVectorInst &EI);
464  void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
465  void visitCallInst(CallInst &CI);
466  void visitInvokeInst(InvokeInst &II);
467  void visitGetElementPtrInst(GetElementPtrInst &GEP);
468  void visitLoadInst(LoadInst &LI);
469  void visitStoreInst(StoreInst &SI);
470  void verifyDominatesUse(Instruction &I, unsigned i);
471  void visitInstruction(Instruction &I);
472  void visitTerminator(Instruction &I);
473  void visitBranchInst(BranchInst &BI);
474  void visitReturnInst(ReturnInst &RI);
475  void visitSwitchInst(SwitchInst &SI);
476  void visitIndirectBrInst(IndirectBrInst &BI);
477  void visitCallBrInst(CallBrInst &CBI);
478  void visitSelectInst(SelectInst &SI);
479  void visitUserOp1(Instruction &I);
480  void visitUserOp2(Instruction &I) { visitUserOp1(I); }
481  void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
482  void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
483  void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
484  void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
485  void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
486  void visitAtomicRMWInst(AtomicRMWInst &RMWI);
487  void visitFenceInst(FenceInst &FI);
488  void visitAllocaInst(AllocaInst &AI);
489  void visitExtractValueInst(ExtractValueInst &EVI);
490  void visitInsertValueInst(InsertValueInst &IVI);
491  void visitEHPadPredecessors(Instruction &I);
492  void visitLandingPadInst(LandingPadInst &LPI);
493  void visitResumeInst(ResumeInst &RI);
494  void visitCatchPadInst(CatchPadInst &CPI);
495  void visitCatchReturnInst(CatchReturnInst &CatchReturn);
496  void visitCleanupPadInst(CleanupPadInst &CPI);
497  void visitFuncletPadInst(FuncletPadInst &FPI);
498  void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
499  void visitCleanupReturnInst(CleanupReturnInst &CRI);
500
501  void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
502  void verifySwiftErrorValue(const Value *SwiftErrorVal);
503  void verifyMustTailCall(CallInst &CI);
504  bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
505                        unsigned ArgNo, std::string &Suffix);
506  bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
507  void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
508                            const Value *V);
509  void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
510  void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
511                           const Value *V, bool IsIntrinsic);
512  void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
513
514  void visitConstantExprsRecursively(const Constant *EntryC);
515  void visitConstantExpr(const ConstantExpr *CE);
516  void verifyStatepoint(const CallBase &Call);
517  void verifyFrameRecoverIndices();
518  void verifySiblingFuncletUnwinds();
519
520  void verifyFragmentExpression(const DbgVariableIntrinsic &I);
521  template <typename ValueOrMetadata>
522  void verifyFragmentExpression(const DIVariable &V,
523                                DIExpression::FragmentInfo Fragment,
524                                ValueOrMetadata *Desc);
525  void verifyFnArgs(const DbgVariableIntrinsic &I);
526  void verifyNotEntryValue(const DbgVariableIntrinsic &I);
527
528  /// Module-level debug info verification...
529  void verifyCompileUnits();
530
531  /// Module-level verification that all @llvm.experimental.deoptimize
532  /// declarations share the same calling convention.
533  void verifyDeoptimizeCallingConvs();
534
535  /// Verify all-or-nothing property of DIFile source attribute within a CU.
536  void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
537};
538
539} // end anonymous namespace
540
541/// We know that cond should be true, if not print an error message.
542#define Assert(C, ...) \
543  do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
544
545/// We know that a debug info condition should be true, if not print
546/// an error message.
547#define AssertDI(C, ...) \
548  do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
549
550void Verifier::visit(Instruction &I) {
551  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
552    Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
553  InstVisitor<Verifier>::visit(I);
554}
555
556// Helper to recursively iterate over indirect users. By
557// returning false, the callback can ask to stop recursing
558// further.
559static void forEachUser(const Value *User,
560                        SmallPtrSet<const Value *, 32> &Visited,
561                        llvm::function_ref<bool(const Value *)> Callback) {
562  if (!Visited.insert(User).second)
563    return;
564  for (const Value *TheNextUser : User->materialized_users())
565    if (Callback(TheNextUser))
566      forEachUser(TheNextUser, Visited, Callback);
567}
568
569void Verifier::visitGlobalValue(const GlobalValue &GV) {
570  Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
571         "Global is external, but doesn't have external or weak linkage!", &GV);
572
573  if (const GlobalObject *GO = dyn_cast<GlobalObject>(&GV))
574    Assert(GO->getAlignment() <= Value::MaximumAlignment,
575           "huge alignment values are unsupported", GO);
576  Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
577         "Only global variables can have appending linkage!", &GV);
578
579  if (GV.hasAppendingLinkage()) {
580    const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
581    Assert(GVar && GVar->getValueType()->isArrayTy(),
582           "Only global arrays can have appending linkage!", GVar);
583  }
584
585  if (GV.isDeclarationForLinker())
586    Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
587
588  if (GV.hasDLLImportStorageClass()) {
589    Assert(!GV.isDSOLocal(),
590           "GlobalValue with DLLImport Storage is dso_local!", &GV);
591
592    Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
593               GV.hasAvailableExternallyLinkage(),
594           "Global is marked as dllimport, but not external", &GV);
595  }
596
597  if (GV.isImplicitDSOLocal())
598    Assert(GV.isDSOLocal(),
599           "GlobalValue with local linkage or non-default "
600           "visibility must be dso_local!",
601           &GV);
602
603  forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
604    if (const Instruction *I = dyn_cast<Instruction>(V)) {
605      if (!I->getParent() || !I->getParent()->getParent())
606        CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
607                    I);
608      else if (I->getParent()->getParent()->getParent() != &M)
609        CheckFailed("Global is referenced in a different module!", &GV, &M, I,
610                    I->getParent()->getParent(),
611                    I->getParent()->getParent()->getParent());
612      return false;
613    } else if (const Function *F = dyn_cast<Function>(V)) {
614      if (F->getParent() != &M)
615        CheckFailed("Global is used by function in a different module", &GV, &M,
616                    F, F->getParent());
617      return false;
618    }
619    return true;
620  });
621}
622
623void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
624  if (GV.hasInitializer()) {
625    Assert(GV.getInitializer()->getType() == GV.getValueType(),
626           "Global variable initializer type does not match global "
627           "variable type!",
628           &GV);
629    // If the global has common linkage, it must have a zero initializer and
630    // cannot be constant.
631    if (GV.hasCommonLinkage()) {
632      Assert(GV.getInitializer()->isNullValue(),
633             "'common' global must have a zero initializer!", &GV);
634      Assert(!GV.isConstant(), "'common' global may not be marked constant!",
635             &GV);
636      Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
637    }
638  }
639
640  if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
641                       GV.getName() == "llvm.global_dtors")) {
642    Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
643           "invalid linkage for intrinsic global variable", &GV);
644    // Don't worry about emitting an error for it not being an array,
645    // visitGlobalValue will complain on appending non-array.
646    if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
647      StructType *STy = dyn_cast<StructType>(ATy->getElementType());
648      PointerType *FuncPtrTy =
649          FunctionType::get(Type::getVoidTy(Context), false)->
650          getPointerTo(DL.getProgramAddressSpace());
651      Assert(STy &&
652                 (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
653                 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
654                 STy->getTypeAtIndex(1) == FuncPtrTy,
655             "wrong type for intrinsic global variable", &GV);
656      Assert(STy->getNumElements() == 3,
657             "the third field of the element type is mandatory, "
658             "specify i8* null to migrate from the obsoleted 2-field form");
659      Type *ETy = STy->getTypeAtIndex(2);
660      Assert(ETy->isPointerTy() &&
661                 cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
662             "wrong type for intrinsic global variable", &GV);
663    }
664  }
665
666  if (GV.hasName() && (GV.getName() == "llvm.used" ||
667                       GV.getName() == "llvm.compiler.used")) {
668    Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
669           "invalid linkage for intrinsic global variable", &GV);
670    Type *GVType = GV.getValueType();
671    if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
672      PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
673      Assert(PTy, "wrong type for intrinsic global variable", &GV);
674      if (GV.hasInitializer()) {
675        const Constant *Init = GV.getInitializer();
676        const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
677        Assert(InitArray, "wrong initalizer for intrinsic global variable",
678               Init);
679        for (Value *Op : InitArray->operands()) {
680          Value *V = Op->stripPointerCasts();
681          Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
682                     isa<GlobalAlias>(V),
683                 "invalid llvm.used member", V);
684          Assert(V->hasName(), "members of llvm.used must be named", V);
685        }
686      }
687    }
688  }
689
690  // Visit any debug info attachments.
691  SmallVector<MDNode *, 1> MDs;
692  GV.getMetadata(LLVMContext::MD_dbg, MDs);
693  for (auto *MD : MDs) {
694    if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
695      visitDIGlobalVariableExpression(*GVE);
696    else
697      AssertDI(false, "!dbg attachment of global variable must be a "
698                      "DIGlobalVariableExpression");
699  }
700
701  // Scalable vectors cannot be global variables, since we don't know
702  // the runtime size. If the global is a struct or an array containing
703  // scalable vectors, that will be caught by the isValidElementType methods
704  // in StructType or ArrayType instead.
705  Assert(!isa<ScalableVectorType>(GV.getValueType()),
706         "Globals cannot contain scalable vectors", &GV);
707
708  if (!GV.hasInitializer()) {
709    visitGlobalValue(GV);
710    return;
711  }
712
713  // Walk any aggregate initializers looking for bitcasts between address spaces
714  visitConstantExprsRecursively(GV.getInitializer());
715
716  visitGlobalValue(GV);
717}
718
719void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
720  SmallPtrSet<const GlobalAlias*, 4> Visited;
721  Visited.insert(&GA);
722  visitAliaseeSubExpr(Visited, GA, C);
723}
724
725void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
726                                   const GlobalAlias &GA, const Constant &C) {
727  if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
728    Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
729           &GA);
730
731    if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
732      Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
733
734      Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
735             &GA);
736    } else {
737      // Only continue verifying subexpressions of GlobalAliases.
738      // Do not recurse into global initializers.
739      return;
740    }
741  }
742
743  if (const auto *CE = dyn_cast<ConstantExpr>(&C))
744    visitConstantExprsRecursively(CE);
745
746  for (const Use &U : C.operands()) {
747    Value *V = &*U;
748    if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
749      visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
750    else if (const auto *C2 = dyn_cast<Constant>(V))
751      visitAliaseeSubExpr(Visited, GA, *C2);
752  }
753}
754
755void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
756  Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
757         "Alias should have private, internal, linkonce, weak, linkonce_odr, "
758         "weak_odr, or external linkage!",
759         &GA);
760  const Constant *Aliasee = GA.getAliasee();
761  Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
762  Assert(GA.getType() == Aliasee->getType(),
763         "Alias and aliasee types should match!", &GA);
764
765  Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
766         "Aliasee should be either GlobalValue or ConstantExpr", &GA);
767
768  visitAliaseeSubExpr(GA, *Aliasee);
769
770  visitGlobalValue(GA);
771}
772
773void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
774  // There used to be various other llvm.dbg.* nodes, but we don't support
775  // upgrading them and we want to reserve the namespace for future uses.
776  if (NMD.getName().startswith("llvm.dbg."))
777    AssertDI(NMD.getName() == "llvm.dbg.cu",
778             "unrecognized named metadata node in the llvm.dbg namespace",
779             &NMD);
780  for (const MDNode *MD : NMD.operands()) {
781    if (NMD.getName() == "llvm.dbg.cu")
782      AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
783
784    if (!MD)
785      continue;
786
787    visitMDNode(*MD, AreDebugLocsAllowed::Yes);
788  }
789}
790
791void Verifier::visitMDNode(const MDNode &MD, AreDebugLocsAllowed AllowLocs) {
792  // Only visit each node once.  Metadata can be mutually recursive, so this
793  // avoids infinite recursion here, as well as being an optimization.
794  if (!MDNodes.insert(&MD).second)
795    return;
796
797  switch (MD.getMetadataID()) {
798  default:
799    llvm_unreachable("Invalid MDNode subclass");
800  case Metadata::MDTupleKind:
801    break;
802#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
803  case Metadata::CLASS##Kind:                                                  \
804    visit##CLASS(cast<CLASS>(MD));                                             \
805    break;
806#include "llvm/IR/Metadata.def"
807  }
808
809  for (const Metadata *Op : MD.operands()) {
810    if (!Op)
811      continue;
812    Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
813           &MD, Op);
814    AssertDI(!isa<DILocation>(Op) || AllowLocs == AreDebugLocsAllowed::Yes,
815             "DILocation not allowed within this metadata node", &MD, Op);
816    if (auto *N = dyn_cast<MDNode>(Op)) {
817      visitMDNode(*N, AllowLocs);
818      continue;
819    }
820    if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
821      visitValueAsMetadata(*V, nullptr);
822      continue;
823    }
824  }
825
826  // Check these last, so we diagnose problems in operands first.
827  Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
828  Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
829}
830
831void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
832  Assert(MD.getValue(), "Expected valid value", &MD);
833  Assert(!MD.getValue()->getType()->isMetadataTy(),
834         "Unexpected metadata round-trip through values", &MD, MD.getValue());
835
836  auto *L = dyn_cast<LocalAsMetadata>(&MD);
837  if (!L)
838    return;
839
840  Assert(F, "function-local metadata used outside a function", L);
841
842  // If this was an instruction, bb, or argument, verify that it is in the
843  // function that we expect.
844  Function *ActualF = nullptr;
845  if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
846    Assert(I->getParent(), "function-local metadata not in basic block", L, I);
847    ActualF = I->getParent()->getParent();
848  } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
849    ActualF = BB->getParent();
850  else if (Argument *A = dyn_cast<Argument>(L->getValue()))
851    ActualF = A->getParent();
852  assert(ActualF && "Unimplemented function local metadata case!");
853
854  Assert(ActualF == F, "function-local metadata used in wrong function", L);
855}
856
857void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
858  Metadata *MD = MDV.getMetadata();
859  if (auto *N = dyn_cast<MDNode>(MD)) {
860    visitMDNode(*N, AreDebugLocsAllowed::No);
861    return;
862  }
863
864  // Only visit each node once.  Metadata can be mutually recursive, so this
865  // avoids infinite recursion here, as well as being an optimization.
866  if (!MDNodes.insert(MD).second)
867    return;
868
869  if (auto *V = dyn_cast<ValueAsMetadata>(MD))
870    visitValueAsMetadata(*V, F);
871}
872
873static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
874static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
875static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
876
877void Verifier::visitDILocation(const DILocation &N) {
878  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
879           "location requires a valid scope", &N, N.getRawScope());
880  if (auto *IA = N.getRawInlinedAt())
881    AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
882  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
883    AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
884}
885
886void Verifier::visitGenericDINode(const GenericDINode &N) {
887  AssertDI(N.getTag(), "invalid tag", &N);
888}
889
890void Verifier::visitDIScope(const DIScope &N) {
891  if (auto *F = N.getRawFile())
892    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
893}
894
895void Verifier::visitDISubrange(const DISubrange &N) {
896  AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
897  AssertDI(N.getRawCountNode() || N.getRawUpperBound(),
898           "Subrange must contain count or upperBound", &N);
899  AssertDI(!N.getRawCountNode() || !N.getRawUpperBound(),
900           "Subrange can have any one of count or upperBound", &N);
901  AssertDI(!N.getRawCountNode() || N.getCount(),
902           "Count must either be a signed constant or a DIVariable", &N);
903  auto Count = N.getCount();
904  AssertDI(!Count || !Count.is<ConstantInt *>() ||
905               Count.get<ConstantInt *>()->getSExtValue() >= -1,
906           "invalid subrange count", &N);
907  auto *LBound = N.getRawLowerBound();
908  AssertDI(!LBound || isa<ConstantAsMetadata>(LBound) ||
909               isa<DIVariable>(LBound) || isa<DIExpression>(LBound),
910           "LowerBound must be signed constant or DIVariable or DIExpression",
911           &N);
912  auto *UBound = N.getRawUpperBound();
913  AssertDI(!UBound || isa<ConstantAsMetadata>(UBound) ||
914               isa<DIVariable>(UBound) || isa<DIExpression>(UBound),
915           "UpperBound must be signed constant or DIVariable or DIExpression",
916           &N);
917  auto *Stride = N.getRawStride();
918  AssertDI(!Stride || isa<ConstantAsMetadata>(Stride) ||
919               isa<DIVariable>(Stride) || isa<DIExpression>(Stride),
920           "Stride must be signed constant or DIVariable or DIExpression", &N);
921}
922
923void Verifier::visitDIEnumerator(const DIEnumerator &N) {
924  AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
925}
926
927void Verifier::visitDIBasicType(const DIBasicType &N) {
928  AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
929               N.getTag() == dwarf::DW_TAG_unspecified_type,
930           "invalid tag", &N);
931  AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
932            "has conflicting flags", &N);
933}
934
935void Verifier::visitDIDerivedType(const DIDerivedType &N) {
936  // Common scope checks.
937  visitDIScope(N);
938
939  AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
940               N.getTag() == dwarf::DW_TAG_pointer_type ||
941               N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
942               N.getTag() == dwarf::DW_TAG_reference_type ||
943               N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
944               N.getTag() == dwarf::DW_TAG_const_type ||
945               N.getTag() == dwarf::DW_TAG_volatile_type ||
946               N.getTag() == dwarf::DW_TAG_restrict_type ||
947               N.getTag() == dwarf::DW_TAG_atomic_type ||
948               N.getTag() == dwarf::DW_TAG_member ||
949               N.getTag() == dwarf::DW_TAG_inheritance ||
950               N.getTag() == dwarf::DW_TAG_friend,
951           "invalid tag", &N);
952  if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
953    AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
954             N.getRawExtraData());
955  }
956
957  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
958  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
959           N.getRawBaseType());
960
961  if (N.getDWARFAddressSpace()) {
962    AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
963                 N.getTag() == dwarf::DW_TAG_reference_type ||
964                 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
965             "DWARF address space only applies to pointer or reference types",
966             &N);
967  }
968}
969
970/// Detect mutually exclusive flags.
971static bool hasConflictingReferenceFlags(unsigned Flags) {
972  return ((Flags & DINode::FlagLValueReference) &&
973          (Flags & DINode::FlagRValueReference)) ||
974         ((Flags & DINode::FlagTypePassByValue) &&
975          (Flags & DINode::FlagTypePassByReference));
976}
977
978void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
979  auto *Params = dyn_cast<MDTuple>(&RawParams);
980  AssertDI(Params, "invalid template params", &N, &RawParams);
981  for (Metadata *Op : Params->operands()) {
982    AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
983             &N, Params, Op);
984  }
985}
986
987void Verifier::visitDICompositeType(const DICompositeType &N) {
988  // Common scope checks.
989  visitDIScope(N);
990
991  AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
992               N.getTag() == dwarf::DW_TAG_structure_type ||
993               N.getTag() == dwarf::DW_TAG_union_type ||
994               N.getTag() == dwarf::DW_TAG_enumeration_type ||
995               N.getTag() == dwarf::DW_TAG_class_type ||
996               N.getTag() == dwarf::DW_TAG_variant_part,
997           "invalid tag", &N);
998
999  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1000  AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
1001           N.getRawBaseType());
1002
1003  AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
1004           "invalid composite elements", &N, N.getRawElements());
1005  AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
1006           N.getRawVTableHolder());
1007  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1008           "invalid reference flags", &N);
1009  unsigned DIBlockByRefStruct = 1 << 4;
1010  AssertDI((N.getFlags() & DIBlockByRefStruct) == 0,
1011           "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
1012
1013  if (N.isVector()) {
1014    const DINodeArray Elements = N.getElements();
1015    AssertDI(Elements.size() == 1 &&
1016             Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
1017             "invalid vector, expected one element of type subrange", &N);
1018  }
1019
1020  if (auto *Params = N.getRawTemplateParams())
1021    visitTemplateParams(N, *Params);
1022
1023  if (N.getTag() == dwarf::DW_TAG_class_type ||
1024      N.getTag() == dwarf::DW_TAG_union_type) {
1025    AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
1026             "class/union requires a filename", &N, N.getFile());
1027  }
1028
1029  if (auto *D = N.getRawDiscriminator()) {
1030    AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1031             "discriminator can only appear on variant part");
1032  }
1033
1034  if (N.getRawDataLocation()) {
1035    AssertDI(N.getTag() == dwarf::DW_TAG_array_type,
1036             "dataLocation can only appear in array type");
1037  }
1038}
1039
1040void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1041  AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1042  if (auto *Types = N.getRawTypeArray()) {
1043    AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1044    for (Metadata *Ty : N.getTypeArray()->operands()) {
1045      AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1046    }
1047  }
1048  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1049           "invalid reference flags", &N);
1050}
1051
1052void Verifier::visitDIFile(const DIFile &N) {
1053  AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1054  Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1055  if (Checksum) {
1056    AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1057             "invalid checksum kind", &N);
1058    size_t Size;
1059    switch (Checksum->Kind) {
1060    case DIFile::CSK_MD5:
1061      Size = 32;
1062      break;
1063    case DIFile::CSK_SHA1:
1064      Size = 40;
1065      break;
1066    case DIFile::CSK_SHA256:
1067      Size = 64;
1068      break;
1069    }
1070    AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1071    AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1072             "invalid checksum", &N);
1073  }
1074}
1075
1076void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1077  AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1078  AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1079
1080  // Don't bother verifying the compilation directory or producer string
1081  // as those could be empty.
1082  AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1083           N.getRawFile());
1084  AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1085           N.getFile());
1086
1087  verifySourceDebugInfo(N, *N.getFile());
1088
1089  AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1090           "invalid emission kind", &N);
1091
1092  if (auto *Array = N.getRawEnumTypes()) {
1093    AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1094    for (Metadata *Op : N.getEnumTypes()->operands()) {
1095      auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1096      AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1097               "invalid enum type", &N, N.getEnumTypes(), Op);
1098    }
1099  }
1100  if (auto *Array = N.getRawRetainedTypes()) {
1101    AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1102    for (Metadata *Op : N.getRetainedTypes()->operands()) {
1103      AssertDI(Op && (isa<DIType>(Op) ||
1104                      (isa<DISubprogram>(Op) &&
1105                       !cast<DISubprogram>(Op)->isDefinition())),
1106               "invalid retained type", &N, Op);
1107    }
1108  }
1109  if (auto *Array = N.getRawGlobalVariables()) {
1110    AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1111    for (Metadata *Op : N.getGlobalVariables()->operands()) {
1112      AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1113               "invalid global variable ref", &N, Op);
1114    }
1115  }
1116  if (auto *Array = N.getRawImportedEntities()) {
1117    AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1118    for (Metadata *Op : N.getImportedEntities()->operands()) {
1119      AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1120               &N, Op);
1121    }
1122  }
1123  if (auto *Array = N.getRawMacros()) {
1124    AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1125    for (Metadata *Op : N.getMacros()->operands()) {
1126      AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1127    }
1128  }
1129  CUVisited.insert(&N);
1130}
1131
1132void Verifier::visitDISubprogram(const DISubprogram &N) {
1133  AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1134  AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1135  if (auto *F = N.getRawFile())
1136    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1137  else
1138    AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1139  if (auto *T = N.getRawType())
1140    AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1141  AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1142           N.getRawContainingType());
1143  if (auto *Params = N.getRawTemplateParams())
1144    visitTemplateParams(N, *Params);
1145  if (auto *S = N.getRawDeclaration())
1146    AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1147             "invalid subprogram declaration", &N, S);
1148  if (auto *RawNode = N.getRawRetainedNodes()) {
1149    auto *Node = dyn_cast<MDTuple>(RawNode);
1150    AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1151    for (Metadata *Op : Node->operands()) {
1152      AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1153               "invalid retained nodes, expected DILocalVariable or DILabel",
1154               &N, Node, Op);
1155    }
1156  }
1157  AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1158           "invalid reference flags", &N);
1159
1160  auto *Unit = N.getRawUnit();
1161  if (N.isDefinition()) {
1162    // Subprogram definitions (not part of the type hierarchy).
1163    AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1164    AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1165    AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1166    if (N.getFile())
1167      verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1168  } else {
1169    // Subprogram declarations (part of the type hierarchy).
1170    AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1171  }
1172
1173  if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1174    auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1175    AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1176    for (Metadata *Op : ThrownTypes->operands())
1177      AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1178               Op);
1179  }
1180
1181  if (N.areAllCallsDescribed())
1182    AssertDI(N.isDefinition(),
1183             "DIFlagAllCallsDescribed must be attached to a definition");
1184}
1185
1186void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1187  AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1188  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1189           "invalid local scope", &N, N.getRawScope());
1190  if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1191    AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1192}
1193
1194void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1195  visitDILexicalBlockBase(N);
1196
1197  AssertDI(N.getLine() || !N.getColumn(),
1198           "cannot have column info without line info", &N);
1199}
1200
1201void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1202  visitDILexicalBlockBase(N);
1203}
1204
1205void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1206  AssertDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1207  if (auto *S = N.getRawScope())
1208    AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1209  if (auto *S = N.getRawDecl())
1210    AssertDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1211}
1212
1213void Verifier::visitDINamespace(const DINamespace &N) {
1214  AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1215  if (auto *S = N.getRawScope())
1216    AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1217}
1218
1219void Verifier::visitDIMacro(const DIMacro &N) {
1220  AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1221               N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1222           "invalid macinfo type", &N);
1223  AssertDI(!N.getName().empty(), "anonymous macro", &N);
1224  if (!N.getValue().empty()) {
1225    assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1226  }
1227}
1228
1229void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1230  AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1231           "invalid macinfo type", &N);
1232  if (auto *F = N.getRawFile())
1233    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1234
1235  if (auto *Array = N.getRawElements()) {
1236    AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1237    for (Metadata *Op : N.getElements()->operands()) {
1238      AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1239    }
1240  }
1241}
1242
1243void Verifier::visitDIModule(const DIModule &N) {
1244  AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1245  AssertDI(!N.getName().empty(), "anonymous module", &N);
1246}
1247
1248void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1249  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1250}
1251
1252void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1253  visitDITemplateParameter(N);
1254
1255  AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1256           &N);
1257}
1258
1259void Verifier::visitDITemplateValueParameter(
1260    const DITemplateValueParameter &N) {
1261  visitDITemplateParameter(N);
1262
1263  AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1264               N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1265               N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1266           "invalid tag", &N);
1267}
1268
1269void Verifier::visitDIVariable(const DIVariable &N) {
1270  if (auto *S = N.getRawScope())
1271    AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1272  if (auto *F = N.getRawFile())
1273    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1274}
1275
1276void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1277  // Checks common to all variables.
1278  visitDIVariable(N);
1279
1280  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1281  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1282  // Assert only if the global variable is not an extern
1283  if (N.isDefinition())
1284    AssertDI(N.getType(), "missing global variable type", &N);
1285  if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1286    AssertDI(isa<DIDerivedType>(Member),
1287             "invalid static data member declaration", &N, Member);
1288  }
1289}
1290
1291void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1292  // Checks common to all variables.
1293  visitDIVariable(N);
1294
1295  AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1296  AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1297  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1298           "local variable requires a valid scope", &N, N.getRawScope());
1299  if (auto Ty = N.getType())
1300    AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1301}
1302
1303void Verifier::visitDILabel(const DILabel &N) {
1304  if (auto *S = N.getRawScope())
1305    AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1306  if (auto *F = N.getRawFile())
1307    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1308
1309  AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1310  AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1311           "label requires a valid scope", &N, N.getRawScope());
1312}
1313
1314void Verifier::visitDIExpression(const DIExpression &N) {
1315  AssertDI(N.isValid(), "invalid expression", &N);
1316}
1317
1318void Verifier::visitDIGlobalVariableExpression(
1319    const DIGlobalVariableExpression &GVE) {
1320  AssertDI(GVE.getVariable(), "missing variable");
1321  if (auto *Var = GVE.getVariable())
1322    visitDIGlobalVariable(*Var);
1323  if (auto *Expr = GVE.getExpression()) {
1324    visitDIExpression(*Expr);
1325    if (auto Fragment = Expr->getFragmentInfo())
1326      verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1327  }
1328}
1329
1330void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1331  AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1332  if (auto *T = N.getRawType())
1333    AssertDI(isType(T), "invalid type ref", &N, T);
1334  if (auto *F = N.getRawFile())
1335    AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1336}
1337
1338void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1339  AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1340               N.getTag() == dwarf::DW_TAG_imported_declaration,
1341           "invalid tag", &N);
1342  if (auto *S = N.getRawScope())
1343    AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1344  AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1345           N.getRawEntity());
1346}
1347
1348void Verifier::visitComdat(const Comdat &C) {
1349  // In COFF the Module is invalid if the GlobalValue has private linkage.
1350  // Entities with private linkage don't have entries in the symbol table.
1351  if (TT.isOSBinFormatCOFF())
1352    if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1353      Assert(!GV->hasPrivateLinkage(),
1354             "comdat global value has private linkage", GV);
1355}
1356
1357void Verifier::visitModuleIdents(const Module &M) {
1358  const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1359  if (!Idents)
1360    return;
1361
1362  // llvm.ident takes a list of metadata entry. Each entry has only one string.
1363  // Scan each llvm.ident entry and make sure that this requirement is met.
1364  for (const MDNode *N : Idents->operands()) {
1365    Assert(N->getNumOperands() == 1,
1366           "incorrect number of operands in llvm.ident metadata", N);
1367    Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1368           ("invalid value for llvm.ident metadata entry operand"
1369            "(the operand should be a string)"),
1370           N->getOperand(0));
1371  }
1372}
1373
1374void Verifier::visitModuleCommandLines(const Module &M) {
1375  const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1376  if (!CommandLines)
1377    return;
1378
1379  // llvm.commandline takes a list of metadata entry. Each entry has only one
1380  // string. Scan each llvm.commandline entry and make sure that this
1381  // requirement is met.
1382  for (const MDNode *N : CommandLines->operands()) {
1383    Assert(N->getNumOperands() == 1,
1384           "incorrect number of operands in llvm.commandline metadata", N);
1385    Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1386           ("invalid value for llvm.commandline metadata entry operand"
1387            "(the operand should be a string)"),
1388           N->getOperand(0));
1389  }
1390}
1391
1392void Verifier::visitModuleFlags(const Module &M) {
1393  const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1394  if (!Flags) return;
1395
1396  // Scan each flag, and track the flags and requirements.
1397  DenseMap<const MDString*, const MDNode*> SeenIDs;
1398  SmallVector<const MDNode*, 16> Requirements;
1399  for (const MDNode *MDN : Flags->operands())
1400    visitModuleFlag(MDN, SeenIDs, Requirements);
1401
1402  // Validate that the requirements in the module are valid.
1403  for (const MDNode *Requirement : Requirements) {
1404    const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1405    const Metadata *ReqValue = Requirement->getOperand(1);
1406
1407    const MDNode *Op = SeenIDs.lookup(Flag);
1408    if (!Op) {
1409      CheckFailed("invalid requirement on flag, flag is not present in module",
1410                  Flag);
1411      continue;
1412    }
1413
1414    if (Op->getOperand(2) != ReqValue) {
1415      CheckFailed(("invalid requirement on flag, "
1416                   "flag does not have the required value"),
1417                  Flag);
1418      continue;
1419    }
1420  }
1421}
1422
1423void
1424Verifier::visitModuleFlag(const MDNode *Op,
1425                          DenseMap<const MDString *, const MDNode *> &SeenIDs,
1426                          SmallVectorImpl<const MDNode *> &Requirements) {
1427  // Each module flag should have three arguments, the merge behavior (a
1428  // constant int), the flag ID (an MDString), and the value.
1429  Assert(Op->getNumOperands() == 3,
1430         "incorrect number of operands in module flag", Op);
1431  Module::ModFlagBehavior MFB;
1432  if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1433    Assert(
1434        mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1435        "invalid behavior operand in module flag (expected constant integer)",
1436        Op->getOperand(0));
1437    Assert(false,
1438           "invalid behavior operand in module flag (unexpected constant)",
1439           Op->getOperand(0));
1440  }
1441  MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1442  Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1443         Op->getOperand(1));
1444
1445  // Sanity check the values for behaviors with additional requirements.
1446  switch (MFB) {
1447  case Module::Error:
1448  case Module::Warning:
1449  case Module::Override:
1450    // These behavior types accept any value.
1451    break;
1452
1453  case Module::Max: {
1454    Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1455           "invalid value for 'max' module flag (expected constant integer)",
1456           Op->getOperand(2));
1457    break;
1458  }
1459
1460  case Module::Require: {
1461    // The value should itself be an MDNode with two operands, a flag ID (an
1462    // MDString), and a value.
1463    MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1464    Assert(Value && Value->getNumOperands() == 2,
1465           "invalid value for 'require' module flag (expected metadata pair)",
1466           Op->getOperand(2));
1467    Assert(isa<MDString>(Value->getOperand(0)),
1468           ("invalid value for 'require' module flag "
1469            "(first value operand should be a string)"),
1470           Value->getOperand(0));
1471
1472    // Append it to the list of requirements, to check once all module flags are
1473    // scanned.
1474    Requirements.push_back(Value);
1475    break;
1476  }
1477
1478  case Module::Append:
1479  case Module::AppendUnique: {
1480    // These behavior types require the operand be an MDNode.
1481    Assert(isa<MDNode>(Op->getOperand(2)),
1482           "invalid value for 'append'-type module flag "
1483           "(expected a metadata node)",
1484           Op->getOperand(2));
1485    break;
1486  }
1487  }
1488
1489  // Unless this is a "requires" flag, check the ID is unique.
1490  if (MFB != Module::Require) {
1491    bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1492    Assert(Inserted,
1493           "module flag identifiers must be unique (or of 'require' type)", ID);
1494  }
1495
1496  if (ID->getString() == "wchar_size") {
1497    ConstantInt *Value
1498      = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1499    Assert(Value, "wchar_size metadata requires constant integer argument");
1500  }
1501
1502  if (ID->getString() == "Linker Options") {
1503    // If the llvm.linker.options named metadata exists, we assume that the
1504    // bitcode reader has upgraded the module flag. Otherwise the flag might
1505    // have been created by a client directly.
1506    Assert(M.getNamedMetadata("llvm.linker.options"),
1507           "'Linker Options' named metadata no longer supported");
1508  }
1509
1510  if (ID->getString() == "SemanticInterposition") {
1511    ConstantInt *Value =
1512        mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1513    Assert(Value,
1514           "SemanticInterposition metadata requires constant integer argument");
1515  }
1516
1517  if (ID->getString() == "CG Profile") {
1518    for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1519      visitModuleFlagCGProfileEntry(MDO);
1520  }
1521}
1522
1523void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1524  auto CheckFunction = [&](const MDOperand &FuncMDO) {
1525    if (!FuncMDO)
1526      return;
1527    auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1528    Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1529           FuncMDO);
1530  };
1531  auto Node = dyn_cast_or_null<MDNode>(MDO);
1532  Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1533  CheckFunction(Node->getOperand(0));
1534  CheckFunction(Node->getOperand(1));
1535  auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1536  Assert(Count && Count->getType()->isIntegerTy(),
1537         "expected an integer constant", Node->getOperand(2));
1538}
1539
1540/// Return true if this attribute kind only applies to functions.
1541static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
1542  switch (Kind) {
1543  case Attribute::NoMerge:
1544  case Attribute::NoReturn:
1545  case Attribute::NoSync:
1546  case Attribute::WillReturn:
1547  case Attribute::NoCfCheck:
1548  case Attribute::NoUnwind:
1549  case Attribute::NoInline:
1550  case Attribute::AlwaysInline:
1551  case Attribute::OptimizeForSize:
1552  case Attribute::StackProtect:
1553  case Attribute::StackProtectReq:
1554  case Attribute::StackProtectStrong:
1555  case Attribute::SafeStack:
1556  case Attribute::ShadowCallStack:
1557  case Attribute::NoRedZone:
1558  case Attribute::NoImplicitFloat:
1559  case Attribute::Naked:
1560  case Attribute::InlineHint:
1561  case Attribute::StackAlignment:
1562  case Attribute::UWTable:
1563  case Attribute::NonLazyBind:
1564  case Attribute::ReturnsTwice:
1565  case Attribute::SanitizeAddress:
1566  case Attribute::SanitizeHWAddress:
1567  case Attribute::SanitizeMemTag:
1568  case Attribute::SanitizeThread:
1569  case Attribute::SanitizeMemory:
1570  case Attribute::MinSize:
1571  case Attribute::NoDuplicate:
1572  case Attribute::Builtin:
1573  case Attribute::NoBuiltin:
1574  case Attribute::Cold:
1575  case Attribute::OptForFuzzing:
1576  case Attribute::OptimizeNone:
1577  case Attribute::JumpTable:
1578  case Attribute::Convergent:
1579  case Attribute::ArgMemOnly:
1580  case Attribute::NoRecurse:
1581  case Attribute::InaccessibleMemOnly:
1582  case Attribute::InaccessibleMemOrArgMemOnly:
1583  case Attribute::AllocSize:
1584  case Attribute::SpeculativeLoadHardening:
1585  case Attribute::Speculatable:
1586  case Attribute::StrictFP:
1587  case Attribute::NullPointerIsValid:
1588    return true;
1589  default:
1590    break;
1591  }
1592  return false;
1593}
1594
1595/// Return true if this is a function attribute that can also appear on
1596/// arguments.
1597static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
1598  return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1599         Kind == Attribute::ReadNone || Kind == Attribute::NoFree ||
1600         Kind == Attribute::Preallocated;
1601}
1602
1603void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1604                                    const Value *V) {
1605  for (Attribute A : Attrs) {
1606    if (A.isStringAttribute())
1607      continue;
1608
1609    if (A.isIntAttribute() !=
1610        Attribute::doesAttrKindHaveArgument(A.getKindAsEnum())) {
1611      CheckFailed("Attribute '" + A.getAsString() + "' should have an Argument",
1612                  V);
1613      return;
1614    }
1615
1616    if (isFuncOnlyAttr(A.getKindAsEnum())) {
1617      if (!IsFunction) {
1618        CheckFailed("Attribute '" + A.getAsString() +
1619                        "' only applies to functions!",
1620                    V);
1621        return;
1622      }
1623    } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1624      CheckFailed("Attribute '" + A.getAsString() +
1625                      "' does not apply to functions!",
1626                  V);
1627      return;
1628    }
1629  }
1630}
1631
1632// VerifyParameterAttrs - Check the given attributes for an argument or return
1633// value of the specified type.  The value V is printed in error messages.
1634void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1635                                    const Value *V) {
1636  if (!Attrs.hasAttributes())
1637    return;
1638
1639  verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1640
1641  if (Attrs.hasAttribute(Attribute::ImmArg)) {
1642    Assert(Attrs.getNumAttributes() == 1,
1643           "Attribute 'immarg' is incompatible with other attributes", V);
1644  }
1645
1646  // Check for mutually incompatible attributes.  Only inreg is compatible with
1647  // sret.
1648  unsigned AttrCount = 0;
1649  AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1650  AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1651  AttrCount += Attrs.hasAttribute(Attribute::Preallocated);
1652  AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1653               Attrs.hasAttribute(Attribute::InReg);
1654  AttrCount += Attrs.hasAttribute(Attribute::Nest);
1655  Assert(AttrCount <= 1,
1656         "Attributes 'byval', 'inalloca', 'preallocated', 'inreg', 'nest', "
1657         "and 'sret' are incompatible!",
1658         V);
1659
1660  Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1661           Attrs.hasAttribute(Attribute::ReadOnly)),
1662         "Attributes "
1663         "'inalloca and readonly' are incompatible!",
1664         V);
1665
1666  Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1667           Attrs.hasAttribute(Attribute::Returned)),
1668         "Attributes "
1669         "'sret and returned' are incompatible!",
1670         V);
1671
1672  Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1673           Attrs.hasAttribute(Attribute::SExt)),
1674         "Attributes "
1675         "'zeroext and signext' are incompatible!",
1676         V);
1677
1678  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1679           Attrs.hasAttribute(Attribute::ReadOnly)),
1680         "Attributes "
1681         "'readnone and readonly' are incompatible!",
1682         V);
1683
1684  Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1685           Attrs.hasAttribute(Attribute::WriteOnly)),
1686         "Attributes "
1687         "'readnone and writeonly' are incompatible!",
1688         V);
1689
1690  Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1691           Attrs.hasAttribute(Attribute::WriteOnly)),
1692         "Attributes "
1693         "'readonly and writeonly' are incompatible!",
1694         V);
1695
1696  Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1697           Attrs.hasAttribute(Attribute::AlwaysInline)),
1698         "Attributes "
1699         "'noinline and alwaysinline' are incompatible!",
1700         V);
1701
1702  if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
1703    Assert(Attrs.getByValType() == cast<PointerType>(Ty)->getElementType(),
1704           "Attribute 'byval' type does not match parameter!", V);
1705  }
1706
1707  if (Attrs.hasAttribute(Attribute::Preallocated)) {
1708    Assert(Attrs.getPreallocatedType() ==
1709               cast<PointerType>(Ty)->getElementType(),
1710           "Attribute 'preallocated' type does not match parameter!", V);
1711  }
1712
1713  AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1714  Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1715         "Wrong types for attribute: " +
1716             AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1717         V);
1718
1719  if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1720    SmallPtrSet<Type*, 4> Visited;
1721    if (!PTy->getElementType()->isSized(&Visited)) {
1722      Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1723                 !Attrs.hasAttribute(Attribute::InAlloca) &&
1724                 !Attrs.hasAttribute(Attribute::Preallocated),
1725             "Attributes 'byval', 'inalloca', and 'preallocated' do not "
1726             "support unsized types!",
1727             V);
1728    }
1729    if (!isa<PointerType>(PTy->getElementType()))
1730      Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1731             "Attribute 'swifterror' only applies to parameters "
1732             "with pointer to pointer type!",
1733             V);
1734  } else {
1735    Assert(!Attrs.hasAttribute(Attribute::ByVal),
1736           "Attribute 'byval' only applies to parameters with pointer type!",
1737           V);
1738    Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1739           "Attribute 'swifterror' only applies to parameters "
1740           "with pointer type!",
1741           V);
1742  }
1743}
1744
1745// Check parameter attributes against a function type.
1746// The value V is printed in error messages.
1747void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1748                                   const Value *V, bool IsIntrinsic) {
1749  if (Attrs.isEmpty())
1750    return;
1751
1752  bool SawNest = false;
1753  bool SawReturned = false;
1754  bool SawSRet = false;
1755  bool SawSwiftSelf = false;
1756  bool SawSwiftError = false;
1757
1758  // Verify return value attributes.
1759  AttributeSet RetAttrs = Attrs.getRetAttributes();
1760  Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1761          !RetAttrs.hasAttribute(Attribute::Nest) &&
1762          !RetAttrs.hasAttribute(Attribute::StructRet) &&
1763          !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1764          !RetAttrs.hasAttribute(Attribute::NoFree) &&
1765          !RetAttrs.hasAttribute(Attribute::Returned) &&
1766          !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1767          !RetAttrs.hasAttribute(Attribute::Preallocated) &&
1768          !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1769          !RetAttrs.hasAttribute(Attribute::SwiftError)),
1770         "Attributes 'byval', 'inalloca', 'preallocated', 'nest', 'sret', "
1771         "'nocapture', 'nofree', "
1772         "'returned', 'swiftself', and 'swifterror' do not apply to return "
1773         "values!",
1774         V);
1775  Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1776          !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1777          !RetAttrs.hasAttribute(Attribute::ReadNone)),
1778         "Attribute '" + RetAttrs.getAsString() +
1779             "' does not apply to function returns",
1780         V);
1781  verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1782
1783  // Verify parameter attributes.
1784  for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1785    Type *Ty = FT->getParamType(i);
1786    AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1787
1788    if (!IsIntrinsic) {
1789      Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg),
1790             "immarg attribute only applies to intrinsics",V);
1791    }
1792
1793    verifyParameterAttrs(ArgAttrs, Ty, V);
1794
1795    if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1796      Assert(!SawNest, "More than one parameter has attribute nest!", V);
1797      SawNest = true;
1798    }
1799
1800    if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1801      Assert(!SawReturned, "More than one parameter has attribute returned!",
1802             V);
1803      Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1804             "Incompatible argument and return types for 'returned' attribute",
1805             V);
1806      SawReturned = true;
1807    }
1808
1809    if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1810      Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1811      Assert(i == 0 || i == 1,
1812             "Attribute 'sret' is not on first or second parameter!", V);
1813      SawSRet = true;
1814    }
1815
1816    if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1817      Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1818      SawSwiftSelf = true;
1819    }
1820
1821    if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1822      Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1823             V);
1824      SawSwiftError = true;
1825    }
1826
1827    if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1828      Assert(i == FT->getNumParams() - 1,
1829             "inalloca isn't on the last parameter!", V);
1830    }
1831  }
1832
1833  if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
1834    return;
1835
1836  verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1837
1838  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1839           Attrs.hasFnAttribute(Attribute::ReadOnly)),
1840         "Attributes 'readnone and readonly' are incompatible!", V);
1841
1842  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1843           Attrs.hasFnAttribute(Attribute::WriteOnly)),
1844         "Attributes 'readnone and writeonly' are incompatible!", V);
1845
1846  Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1847           Attrs.hasFnAttribute(Attribute::WriteOnly)),
1848         "Attributes 'readonly and writeonly' are incompatible!", V);
1849
1850  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1851           Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1852         "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1853         "incompatible!",
1854         V);
1855
1856  Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1857           Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1858         "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1859
1860  Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1861           Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1862         "Attributes 'noinline and alwaysinline' are incompatible!", V);
1863
1864  if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1865    Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1866           "Attribute 'optnone' requires 'noinline'!", V);
1867
1868    Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1869           "Attributes 'optsize and optnone' are incompatible!", V);
1870
1871    Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1872           "Attributes 'minsize and optnone' are incompatible!", V);
1873  }
1874
1875  if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1876    const GlobalValue *GV = cast<GlobalValue>(V);
1877    Assert(GV->hasGlobalUnnamedAddr(),
1878           "Attribute 'jumptable' requires 'unnamed_addr'", V);
1879  }
1880
1881  if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1882    std::pair<unsigned, Optional<unsigned>> Args =
1883        Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
1884
1885    auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1886      if (ParamNo >= FT->getNumParams()) {
1887        CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1888        return false;
1889      }
1890
1891      if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1892        CheckFailed("'allocsize' " + Name +
1893                        " argument must refer to an integer parameter",
1894                    V);
1895        return false;
1896      }
1897
1898      return true;
1899    };
1900
1901    if (!CheckParam("element size", Args.first))
1902      return;
1903
1904    if (Args.second && !CheckParam("number of elements", *Args.second))
1905      return;
1906  }
1907
1908  if (Attrs.hasFnAttribute("frame-pointer")) {
1909    StringRef FP = Attrs.getAttribute(AttributeList::FunctionIndex,
1910                                      "frame-pointer").getValueAsString();
1911    if (FP != "all" && FP != "non-leaf" && FP != "none")
1912      CheckFailed("invalid value for 'frame-pointer' attribute: " + FP, V);
1913  }
1914
1915  if (Attrs.hasFnAttribute("patchable-function-prefix")) {
1916    StringRef S = Attrs
1917                      .getAttribute(AttributeList::FunctionIndex,
1918                                    "patchable-function-prefix")
1919                      .getValueAsString();
1920    unsigned N;
1921    if (S.getAsInteger(10, N))
1922      CheckFailed(
1923          "\"patchable-function-prefix\" takes an unsigned integer: " + S, V);
1924  }
1925  if (Attrs.hasFnAttribute("patchable-function-entry")) {
1926    StringRef S = Attrs
1927                      .getAttribute(AttributeList::FunctionIndex,
1928                                    "patchable-function-entry")
1929                      .getValueAsString();
1930    unsigned N;
1931    if (S.getAsInteger(10, N))
1932      CheckFailed(
1933          "\"patchable-function-entry\" takes an unsigned integer: " + S, V);
1934  }
1935}
1936
1937void Verifier::verifyFunctionMetadata(
1938    ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1939  for (const auto &Pair : MDs) {
1940    if (Pair.first == LLVMContext::MD_prof) {
1941      MDNode *MD = Pair.second;
1942      Assert(MD->getNumOperands() >= 2,
1943             "!prof annotations should have no less than 2 operands", MD);
1944
1945      // Check first operand.
1946      Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1947             MD);
1948      Assert(isa<MDString>(MD->getOperand(0)),
1949             "expected string with name of the !prof annotation", MD);
1950      MDString *MDS = cast<MDString>(MD->getOperand(0));
1951      StringRef ProfName = MDS->getString();
1952      Assert(ProfName.equals("function_entry_count") ||
1953                 ProfName.equals("synthetic_function_entry_count"),
1954             "first operand should be 'function_entry_count'"
1955             " or 'synthetic_function_entry_count'",
1956             MD);
1957
1958      // Check second operand.
1959      Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1960             MD);
1961      Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1962             "expected integer argument to function_entry_count", MD);
1963    }
1964  }
1965}
1966
1967void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1968  if (!ConstantExprVisited.insert(EntryC).second)
1969    return;
1970
1971  SmallVector<const Constant *, 16> Stack;
1972  Stack.push_back(EntryC);
1973
1974  while (!Stack.empty()) {
1975    const Constant *C = Stack.pop_back_val();
1976
1977    // Check this constant expression.
1978    if (const auto *CE = dyn_cast<ConstantExpr>(C))
1979      visitConstantExpr(CE);
1980
1981    if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1982      // Global Values get visited separately, but we do need to make sure
1983      // that the global value is in the correct module
1984      Assert(GV->getParent() == &M, "Referencing global in another module!",
1985             EntryC, &M, GV, GV->getParent());
1986      continue;
1987    }
1988
1989    // Visit all sub-expressions.
1990    for (const Use &U : C->operands()) {
1991      const auto *OpC = dyn_cast<Constant>(U);
1992      if (!OpC)
1993        continue;
1994      if (!ConstantExprVisited.insert(OpC).second)
1995        continue;
1996      Stack.push_back(OpC);
1997    }
1998  }
1999}
2000
2001void Verifier::visitConstantExpr(const ConstantExpr *CE) {
2002  if (CE->getOpcode() == Instruction::BitCast)
2003    Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
2004                                 CE->getType()),
2005           "Invalid bitcast", CE);
2006
2007  if (CE->getOpcode() == Instruction::IntToPtr ||
2008      CE->getOpcode() == Instruction::PtrToInt) {
2009    auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
2010                      ? CE->getType()
2011                      : CE->getOperand(0)->getType();
2012    StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
2013                        ? "inttoptr not supported for non-integral pointers"
2014                        : "ptrtoint not supported for non-integral pointers";
2015    Assert(
2016        !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
2017        Msg);
2018  }
2019}
2020
2021bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
2022  // There shouldn't be more attribute sets than there are parameters plus the
2023  // function and return value.
2024  return Attrs.getNumAttrSets() <= Params + 2;
2025}
2026
2027/// Verify that statepoint intrinsic is well formed.
2028void Verifier::verifyStatepoint(const CallBase &Call) {
2029  assert(Call.getCalledFunction() &&
2030         Call.getCalledFunction()->getIntrinsicID() ==
2031             Intrinsic::experimental_gc_statepoint);
2032
2033  Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
2034             !Call.onlyAccessesArgMemory(),
2035         "gc.statepoint must read and write all memory to preserve "
2036         "reordering restrictions required by safepoint semantics",
2037         Call);
2038
2039  const int64_t NumPatchBytes =
2040      cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
2041  assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
2042  Assert(NumPatchBytes >= 0,
2043         "gc.statepoint number of patchable bytes must be "
2044         "positive",
2045         Call);
2046
2047  const Value *Target = Call.getArgOperand(2);
2048  auto *PT = dyn_cast<PointerType>(Target->getType());
2049  Assert(PT && PT->getElementType()->isFunctionTy(),
2050         "gc.statepoint callee must be of function pointer type", Call, Target);
2051  FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
2052
2053  const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
2054  Assert(NumCallArgs >= 0,
2055         "gc.statepoint number of arguments to underlying call "
2056         "must be positive",
2057         Call);
2058  const int NumParams = (int)TargetFuncType->getNumParams();
2059  if (TargetFuncType->isVarArg()) {
2060    Assert(NumCallArgs >= NumParams,
2061           "gc.statepoint mismatch in number of vararg call args", Call);
2062
2063    // TODO: Remove this limitation
2064    Assert(TargetFuncType->getReturnType()->isVoidTy(),
2065           "gc.statepoint doesn't support wrapping non-void "
2066           "vararg functions yet",
2067           Call);
2068  } else
2069    Assert(NumCallArgs == NumParams,
2070           "gc.statepoint mismatch in number of call args", Call);
2071
2072  const uint64_t Flags
2073    = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
2074  Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
2075         "unknown flag used in gc.statepoint flags argument", Call);
2076
2077  // Verify that the types of the call parameter arguments match
2078  // the type of the wrapped callee.
2079  AttributeList Attrs = Call.getAttributes();
2080  for (int i = 0; i < NumParams; i++) {
2081    Type *ParamType = TargetFuncType->getParamType(i);
2082    Type *ArgType = Call.getArgOperand(5 + i)->getType();
2083    Assert(ArgType == ParamType,
2084           "gc.statepoint call argument does not match wrapped "
2085           "function type",
2086           Call);
2087
2088    if (TargetFuncType->isVarArg()) {
2089      AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
2090      Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2091             "Attribute 'sret' cannot be used for vararg call arguments!",
2092             Call);
2093    }
2094  }
2095
2096  const int EndCallArgsInx = 4 + NumCallArgs;
2097
2098  const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2099  Assert(isa<ConstantInt>(NumTransitionArgsV),
2100         "gc.statepoint number of transition arguments "
2101         "must be constant integer",
2102         Call);
2103  const int NumTransitionArgs =
2104      cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2105  Assert(NumTransitionArgs >= 0,
2106         "gc.statepoint number of transition arguments must be positive", Call);
2107  const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2108
2109  // We're migrating away from inline operands to operand bundles, enforce
2110  // the either/or property during transition.
2111  if (Call.getOperandBundle(LLVMContext::OB_gc_transition)) {
2112    Assert(NumTransitionArgs == 0,
2113           "can't use both deopt operands and deopt bundle on a statepoint");
2114  }
2115
2116  const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2117  Assert(isa<ConstantInt>(NumDeoptArgsV),
2118         "gc.statepoint number of deoptimization arguments "
2119         "must be constant integer",
2120         Call);
2121  const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2122  Assert(NumDeoptArgs >= 0,
2123         "gc.statepoint number of deoptimization arguments "
2124         "must be positive",
2125         Call);
2126
2127  // We're migrating away from inline operands to operand bundles, enforce
2128  // the either/or property during transition.
2129  if (Call.getOperandBundle(LLVMContext::OB_deopt)) {
2130    Assert(NumDeoptArgs == 0,
2131           "can't use both deopt operands and deopt bundle on a statepoint");
2132  }
2133
2134  const int ExpectedNumArgs =
2135      7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2136  Assert(ExpectedNumArgs <= (int)Call.arg_size(),
2137         "gc.statepoint too few arguments according to length fields", Call);
2138
2139  // Check that the only uses of this gc.statepoint are gc.result or
2140  // gc.relocate calls which are tied to this statepoint and thus part
2141  // of the same statepoint sequence
2142  for (const User *U : Call.users()) {
2143    const CallInst *UserCall = dyn_cast<const CallInst>(U);
2144    Assert(UserCall, "illegal use of statepoint token", Call, U);
2145    if (!UserCall)
2146      continue;
2147    Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2148           "gc.result or gc.relocate are the only value uses "
2149           "of a gc.statepoint",
2150           Call, U);
2151    if (isa<GCResultInst>(UserCall)) {
2152      Assert(UserCall->getArgOperand(0) == &Call,
2153             "gc.result connected to wrong gc.statepoint", Call, UserCall);
2154    } else if (isa<GCRelocateInst>(Call)) {
2155      Assert(UserCall->getArgOperand(0) == &Call,
2156             "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2157    }
2158  }
2159
2160  // Note: It is legal for a single derived pointer to be listed multiple
2161  // times.  It's non-optimal, but it is legal.  It can also happen after
2162  // insertion if we strip a bitcast away.
2163  // Note: It is really tempting to check that each base is relocated and
2164  // that a derived pointer is never reused as a base pointer.  This turns
2165  // out to be problematic since optimizations run after safepoint insertion
2166  // can recognize equality properties that the insertion logic doesn't know
2167  // about.  See example statepoint.ll in the verifier subdirectory
2168}
2169
2170void Verifier::verifyFrameRecoverIndices() {
2171  for (auto &Counts : FrameEscapeInfo) {
2172    Function *F = Counts.first;
2173    unsigned EscapedObjectCount = Counts.second.first;
2174    unsigned MaxRecoveredIndex = Counts.second.second;
2175    Assert(MaxRecoveredIndex <= EscapedObjectCount,
2176           "all indices passed to llvm.localrecover must be less than the "
2177           "number of arguments passed to llvm.localescape in the parent "
2178           "function",
2179           F);
2180  }
2181}
2182
2183static Instruction *getSuccPad(Instruction *Terminator) {
2184  BasicBlock *UnwindDest;
2185  if (auto *II = dyn_cast<InvokeInst>(Terminator))
2186    UnwindDest = II->getUnwindDest();
2187  else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2188    UnwindDest = CSI->getUnwindDest();
2189  else
2190    UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2191  return UnwindDest->getFirstNonPHI();
2192}
2193
2194void Verifier::verifySiblingFuncletUnwinds() {
2195  SmallPtrSet<Instruction *, 8> Visited;
2196  SmallPtrSet<Instruction *, 8> Active;
2197  for (const auto &Pair : SiblingFuncletInfo) {
2198    Instruction *PredPad = Pair.first;
2199    if (Visited.count(PredPad))
2200      continue;
2201    Active.insert(PredPad);
2202    Instruction *Terminator = Pair.second;
2203    do {
2204      Instruction *SuccPad = getSuccPad(Terminator);
2205      if (Active.count(SuccPad)) {
2206        // Found a cycle; report error
2207        Instruction *CyclePad = SuccPad;
2208        SmallVector<Instruction *, 8> CycleNodes;
2209        do {
2210          CycleNodes.push_back(CyclePad);
2211          Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2212          if (CycleTerminator != CyclePad)
2213            CycleNodes.push_back(CycleTerminator);
2214          CyclePad = getSuccPad(CycleTerminator);
2215        } while (CyclePad != SuccPad);
2216        Assert(false, "EH pads can't handle each other's exceptions",
2217               ArrayRef<Instruction *>(CycleNodes));
2218      }
2219      // Don't re-walk a node we've already checked
2220      if (!Visited.insert(SuccPad).second)
2221        break;
2222      // Walk to this successor if it has a map entry.
2223      PredPad = SuccPad;
2224      auto TermI = SiblingFuncletInfo.find(PredPad);
2225      if (TermI == SiblingFuncletInfo.end())
2226        break;
2227      Terminator = TermI->second;
2228      Active.insert(PredPad);
2229    } while (true);
2230    // Each node only has one successor, so we've walked all the active
2231    // nodes' successors.
2232    Active.clear();
2233  }
2234}
2235
2236// visitFunction - Verify that a function is ok.
2237//
2238void Verifier::visitFunction(const Function &F) {
2239  visitGlobalValue(F);
2240
2241  // Check function arguments.
2242  FunctionType *FT = F.getFunctionType();
2243  unsigned NumArgs = F.arg_size();
2244
2245  Assert(&Context == &F.getContext(),
2246         "Function context does not match Module context!", &F);
2247
2248  Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2249  Assert(FT->getNumParams() == NumArgs,
2250         "# formal arguments must match # of arguments for function type!", &F,
2251         FT);
2252  Assert(F.getReturnType()->isFirstClassType() ||
2253             F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2254         "Functions cannot return aggregate values!", &F);
2255
2256  Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2257         "Invalid struct return type!", &F);
2258
2259  AttributeList Attrs = F.getAttributes();
2260
2261  Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2262         "Attribute after last parameter!", &F);
2263
2264  bool isLLVMdotName = F.getName().size() >= 5 &&
2265                       F.getName().substr(0, 5) == "llvm.";
2266
2267  // Check function attributes.
2268  verifyFunctionAttrs(FT, Attrs, &F, isLLVMdotName);
2269
2270  // On function declarations/definitions, we do not support the builtin
2271  // attribute. We do not check this in VerifyFunctionAttrs since that is
2272  // checking for Attributes that can/can not ever be on functions.
2273  Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2274         "Attribute 'builtin' can only be applied to a callsite.", &F);
2275
2276  // Check that this function meets the restrictions on this calling convention.
2277  // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2278  // restrictions can be lifted.
2279  switch (F.getCallingConv()) {
2280  default:
2281  case CallingConv::C:
2282    break;
2283  case CallingConv::AMDGPU_KERNEL:
2284  case CallingConv::SPIR_KERNEL:
2285    Assert(F.getReturnType()->isVoidTy(),
2286           "Calling convention requires void return type", &F);
2287    LLVM_FALLTHROUGH;
2288  case CallingConv::AMDGPU_VS:
2289  case CallingConv::AMDGPU_HS:
2290  case CallingConv::AMDGPU_GS:
2291  case CallingConv::AMDGPU_PS:
2292  case CallingConv::AMDGPU_CS:
2293    Assert(!F.hasStructRetAttr(),
2294           "Calling convention does not allow sret", &F);
2295    LLVM_FALLTHROUGH;
2296  case CallingConv::Fast:
2297  case CallingConv::Cold:
2298  case CallingConv::Intel_OCL_BI:
2299  case CallingConv::PTX_Kernel:
2300  case CallingConv::PTX_Device:
2301    Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2302                          "perfect forwarding!",
2303           &F);
2304    break;
2305  }
2306
2307  // Check that the argument values match the function type for this function...
2308  unsigned i = 0;
2309  for (const Argument &Arg : F.args()) {
2310    Assert(Arg.getType() == FT->getParamType(i),
2311           "Argument value does not match function argument type!", &Arg,
2312           FT->getParamType(i));
2313    Assert(Arg.getType()->isFirstClassType(),
2314           "Function arguments must have first-class types!", &Arg);
2315    if (!isLLVMdotName) {
2316      Assert(!Arg.getType()->isMetadataTy(),
2317             "Function takes metadata but isn't an intrinsic", &Arg, &F);
2318      Assert(!Arg.getType()->isTokenTy(),
2319             "Function takes token but isn't an intrinsic", &Arg, &F);
2320    }
2321
2322    // Check that swifterror argument is only used by loads and stores.
2323    if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2324      verifySwiftErrorValue(&Arg);
2325    }
2326    ++i;
2327  }
2328
2329  if (!isLLVMdotName)
2330    Assert(!F.getReturnType()->isTokenTy(),
2331           "Functions returns a token but isn't an intrinsic", &F);
2332
2333  // Get the function metadata attachments.
2334  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2335  F.getAllMetadata(MDs);
2336  assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2337  verifyFunctionMetadata(MDs);
2338
2339  // Check validity of the personality function
2340  if (F.hasPersonalityFn()) {
2341    auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2342    if (Per)
2343      Assert(Per->getParent() == F.getParent(),
2344             "Referencing personality function in another module!",
2345             &F, F.getParent(), Per, Per->getParent());
2346  }
2347
2348  if (F.isMaterializable()) {
2349    // Function has a body somewhere we can't see.
2350    Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2351           MDs.empty() ? nullptr : MDs.front().second);
2352  } else if (F.isDeclaration()) {
2353    for (const auto &I : MDs) {
2354      // This is used for call site debug information.
2355      AssertDI(I.first != LLVMContext::MD_dbg ||
2356                   !cast<DISubprogram>(I.second)->isDistinct(),
2357               "function declaration may only have a unique !dbg attachment",
2358               &F);
2359      Assert(I.first != LLVMContext::MD_prof,
2360             "function declaration may not have a !prof attachment", &F);
2361
2362      // Verify the metadata itself.
2363      visitMDNode(*I.second, AreDebugLocsAllowed::Yes);
2364    }
2365    Assert(!F.hasPersonalityFn(),
2366           "Function declaration shouldn't have a personality routine", &F);
2367  } else {
2368    // Verify that this function (which has a body) is not named "llvm.*".  It
2369    // is not legal to define intrinsics.
2370    Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2371
2372    // Check the entry node
2373    const BasicBlock *Entry = &F.getEntryBlock();
2374    Assert(pred_empty(Entry),
2375           "Entry block to function must not have predecessors!", Entry);
2376
2377    // The address of the entry block cannot be taken, unless it is dead.
2378    if (Entry->hasAddressTaken()) {
2379      Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2380             "blockaddress may not be used with the entry block!", Entry);
2381    }
2382
2383    unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2384    // Visit metadata attachments.
2385    for (const auto &I : MDs) {
2386      // Verify that the attachment is legal.
2387      auto AllowLocs = AreDebugLocsAllowed::No;
2388      switch (I.first) {
2389      default:
2390        break;
2391      case LLVMContext::MD_dbg: {
2392        ++NumDebugAttachments;
2393        AssertDI(NumDebugAttachments == 1,
2394                 "function must have a single !dbg attachment", &F, I.second);
2395        AssertDI(isa<DISubprogram>(I.second),
2396                 "function !dbg attachment must be a subprogram", &F, I.second);
2397        auto *SP = cast<DISubprogram>(I.second);
2398        const Function *&AttachedTo = DISubprogramAttachments[SP];
2399        AssertDI(!AttachedTo || AttachedTo == &F,
2400                 "DISubprogram attached to more than one function", SP, &F);
2401        AttachedTo = &F;
2402        AllowLocs = AreDebugLocsAllowed::Yes;
2403        break;
2404      }
2405      case LLVMContext::MD_prof:
2406        ++NumProfAttachments;
2407        Assert(NumProfAttachments == 1,
2408               "function must have a single !prof attachment", &F, I.second);
2409        break;
2410      }
2411
2412      // Verify the metadata itself.
2413      visitMDNode(*I.second, AllowLocs);
2414    }
2415  }
2416
2417  // If this function is actually an intrinsic, verify that it is only used in
2418  // direct call/invokes, never having its "address taken".
2419  // Only do this if the module is materialized, otherwise we don't have all the
2420  // uses.
2421  if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2422    const User *U;
2423    if (F.hasAddressTaken(&U))
2424      Assert(false, "Invalid user of intrinsic instruction!", U);
2425  }
2426
2427  auto *N = F.getSubprogram();
2428  HasDebugInfo = (N != nullptr);
2429  if (!HasDebugInfo)
2430    return;
2431
2432  // Check that all !dbg attachments lead to back to N.
2433  //
2434  // FIXME: Check this incrementally while visiting !dbg attachments.
2435  // FIXME: Only check when N is the canonical subprogram for F.
2436  SmallPtrSet<const MDNode *, 32> Seen;
2437  auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2438    // Be careful about using DILocation here since we might be dealing with
2439    // broken code (this is the Verifier after all).
2440    const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2441    if (!DL)
2442      return;
2443    if (!Seen.insert(DL).second)
2444      return;
2445
2446    Metadata *Parent = DL->getRawScope();
2447    AssertDI(Parent && isa<DILocalScope>(Parent),
2448             "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2449             Parent);
2450
2451    DILocalScope *Scope = DL->getInlinedAtScope();
2452    Assert(Scope, "Failed to find DILocalScope", DL);
2453
2454    if (!Seen.insert(Scope).second)
2455      return;
2456
2457    DISubprogram *SP = Scope->getSubprogram();
2458
2459    // Scope and SP could be the same MDNode and we don't want to skip
2460    // validation in that case
2461    if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2462      return;
2463
2464    AssertDI(SP->describes(&F),
2465             "!dbg attachment points at wrong subprogram for function", N, &F,
2466             &I, DL, Scope, SP);
2467  };
2468  for (auto &BB : F)
2469    for (auto &I : BB) {
2470      VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
2471      // The llvm.loop annotations also contain two DILocations.
2472      if (auto MD = I.getMetadata(LLVMContext::MD_loop))
2473        for (unsigned i = 1; i < MD->getNumOperands(); ++i)
2474          VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
2475      if (BrokenDebugInfo)
2476        return;
2477    }
2478}
2479
2480// verifyBasicBlock - Verify that a basic block is well formed...
2481//
2482void Verifier::visitBasicBlock(BasicBlock &BB) {
2483  InstsInThisBlock.clear();
2484
2485  // Ensure that basic blocks have terminators!
2486  Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2487
2488  // Check constraints that this basic block imposes on all of the PHI nodes in
2489  // it.
2490  if (isa<PHINode>(BB.front())) {
2491    SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
2492    SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2493    llvm::sort(Preds);
2494    for (const PHINode &PN : BB.phis()) {
2495      // Ensure that PHI nodes have at least one entry!
2496      Assert(PN.getNumIncomingValues() != 0,
2497             "PHI nodes must have at least one entry.  If the block is dead, "
2498             "the PHI should be removed!",
2499             &PN);
2500      Assert(PN.getNumIncomingValues() == Preds.size(),
2501             "PHINode should have one entry for each predecessor of its "
2502             "parent basic block!",
2503             &PN);
2504
2505      // Get and sort all incoming values in the PHI node...
2506      Values.clear();
2507      Values.reserve(PN.getNumIncomingValues());
2508      for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2509        Values.push_back(
2510            std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2511      llvm::sort(Values);
2512
2513      for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2514        // Check to make sure that if there is more than one entry for a
2515        // particular basic block in this PHI node, that the incoming values are
2516        // all identical.
2517        //
2518        Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2519                   Values[i].second == Values[i - 1].second,
2520               "PHI node has multiple entries for the same basic block with "
2521               "different incoming values!",
2522               &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2523
2524        // Check to make sure that the predecessors and PHI node entries are
2525        // matched up.
2526        Assert(Values[i].first == Preds[i],
2527               "PHI node entries do not match predecessors!", &PN,
2528               Values[i].first, Preds[i]);
2529      }
2530    }
2531  }
2532
2533  // Check that all instructions have their parent pointers set up correctly.
2534  for (auto &I : BB)
2535  {
2536    Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2537  }
2538}
2539
2540void Verifier::visitTerminator(Instruction &I) {
2541  // Ensure that terminators only exist at the end of the basic block.
2542  Assert(&I == I.getParent()->getTerminator(),
2543         "Terminator found in the middle of a basic block!", I.getParent());
2544  visitInstruction(I);
2545}
2546
2547void Verifier::visitBranchInst(BranchInst &BI) {
2548  if (BI.isConditional()) {
2549    Assert(BI.getCondition()->getType()->isIntegerTy(1),
2550           "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2551  }
2552  visitTerminator(BI);
2553}
2554
2555void Verifier::visitReturnInst(ReturnInst &RI) {
2556  Function *F = RI.getParent()->getParent();
2557  unsigned N = RI.getNumOperands();
2558  if (F->getReturnType()->isVoidTy())
2559    Assert(N == 0,
2560           "Found return instr that returns non-void in Function of void "
2561           "return type!",
2562           &RI, F->getReturnType());
2563  else
2564    Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2565           "Function return type does not match operand "
2566           "type of return inst!",
2567           &RI, F->getReturnType());
2568
2569  // Check to make sure that the return value has necessary properties for
2570  // terminators...
2571  visitTerminator(RI);
2572}
2573
2574void Verifier::visitSwitchInst(SwitchInst &SI) {
2575  // Check to make sure that all of the constants in the switch instruction
2576  // have the same type as the switched-on value.
2577  Type *SwitchTy = SI.getCondition()->getType();
2578  SmallPtrSet<ConstantInt*, 32> Constants;
2579  for (auto &Case : SI.cases()) {
2580    Assert(Case.getCaseValue()->getType() == SwitchTy,
2581           "Switch constants must all be same type as switch value!", &SI);
2582    Assert(Constants.insert(Case.getCaseValue()).second,
2583           "Duplicate integer as switch case", &SI, Case.getCaseValue());
2584  }
2585
2586  visitTerminator(SI);
2587}
2588
2589void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2590  Assert(BI.getAddress()->getType()->isPointerTy(),
2591         "Indirectbr operand must have pointer type!", &BI);
2592  for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2593    Assert(BI.getDestination(i)->getType()->isLabelTy(),
2594           "Indirectbr destinations must all have pointer type!", &BI);
2595
2596  visitTerminator(BI);
2597}
2598
2599void Verifier::visitCallBrInst(CallBrInst &CBI) {
2600  Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!",
2601         &CBI);
2602  for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
2603    Assert(CBI.getSuccessor(i)->getType()->isLabelTy(),
2604           "Callbr successors must all have pointer type!", &CBI);
2605  for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
2606    Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)),
2607           "Using an unescaped label as a callbr argument!", &CBI);
2608    if (isa<BasicBlock>(CBI.getOperand(i)))
2609      for (unsigned j = i + 1; j != e; ++j)
2610        Assert(CBI.getOperand(i) != CBI.getOperand(j),
2611               "Duplicate callbr destination!", &CBI);
2612  }
2613  {
2614    SmallPtrSet<BasicBlock *, 4> ArgBBs;
2615    for (Value *V : CBI.args())
2616      if (auto *BA = dyn_cast<BlockAddress>(V))
2617        ArgBBs.insert(BA->getBasicBlock());
2618    for (BasicBlock *BB : CBI.getIndirectDests())
2619      Assert(ArgBBs.count(BB), "Indirect label missing from arglist.", &CBI);
2620  }
2621
2622  visitTerminator(CBI);
2623}
2624
2625void Verifier::visitSelectInst(SelectInst &SI) {
2626  Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2627                                         SI.getOperand(2)),
2628         "Invalid operands for select instruction!", &SI);
2629
2630  Assert(SI.getTrueValue()->getType() == SI.getType(),
2631         "Select values must have same type as select instruction!", &SI);
2632  visitInstruction(SI);
2633}
2634
2635/// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2636/// a pass, if any exist, it's an error.
2637///
2638void Verifier::visitUserOp1(Instruction &I) {
2639  Assert(false, "User-defined operators should not live outside of a pass!", &I);
2640}
2641
2642void Verifier::visitTruncInst(TruncInst &I) {
2643  // Get the source and destination types
2644  Type *SrcTy = I.getOperand(0)->getType();
2645  Type *DestTy = I.getType();
2646
2647  // Get the size of the types in bits, we'll need this later
2648  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2649  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2650
2651  Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2652  Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2653  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2654         "trunc source and destination must both be a vector or neither", &I);
2655  Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2656
2657  visitInstruction(I);
2658}
2659
2660void Verifier::visitZExtInst(ZExtInst &I) {
2661  // Get the source and destination types
2662  Type *SrcTy = I.getOperand(0)->getType();
2663  Type *DestTy = I.getType();
2664
2665  // Get the size of the types in bits, we'll need this later
2666  Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2667  Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2668  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2669         "zext source and destination must both be a vector or neither", &I);
2670  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2671  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2672
2673  Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2674
2675  visitInstruction(I);
2676}
2677
2678void Verifier::visitSExtInst(SExtInst &I) {
2679  // Get the source and destination types
2680  Type *SrcTy = I.getOperand(0)->getType();
2681  Type *DestTy = I.getType();
2682
2683  // Get the size of the types in bits, we'll need this later
2684  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2685  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2686
2687  Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2688  Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2689  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2690         "sext source and destination must both be a vector or neither", &I);
2691  Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2692
2693  visitInstruction(I);
2694}
2695
2696void Verifier::visitFPTruncInst(FPTruncInst &I) {
2697  // Get the source and destination types
2698  Type *SrcTy = I.getOperand(0)->getType();
2699  Type *DestTy = I.getType();
2700  // Get the size of the types in bits, we'll need this later
2701  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2702  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2703
2704  Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2705  Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2706  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2707         "fptrunc source and destination must both be a vector or neither", &I);
2708  Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2709
2710  visitInstruction(I);
2711}
2712
2713void Verifier::visitFPExtInst(FPExtInst &I) {
2714  // Get the source and destination types
2715  Type *SrcTy = I.getOperand(0)->getType();
2716  Type *DestTy = I.getType();
2717
2718  // Get the size of the types in bits, we'll need this later
2719  unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2720  unsigned DestBitSize = DestTy->getScalarSizeInBits();
2721
2722  Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2723  Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2724  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2725         "fpext source and destination must both be a vector or neither", &I);
2726  Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2727
2728  visitInstruction(I);
2729}
2730
2731void Verifier::visitUIToFPInst(UIToFPInst &I) {
2732  // Get the source and destination types
2733  Type *SrcTy = I.getOperand(0)->getType();
2734  Type *DestTy = I.getType();
2735
2736  bool SrcVec = SrcTy->isVectorTy();
2737  bool DstVec = DestTy->isVectorTy();
2738
2739  Assert(SrcVec == DstVec,
2740         "UIToFP source and dest must both be vector or scalar", &I);
2741  Assert(SrcTy->isIntOrIntVectorTy(),
2742         "UIToFP source must be integer or integer vector", &I);
2743  Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2744         &I);
2745
2746  if (SrcVec && DstVec)
2747    Assert(cast<VectorType>(SrcTy)->getElementCount() ==
2748               cast<VectorType>(DestTy)->getElementCount(),
2749           "UIToFP source and dest vector length mismatch", &I);
2750
2751  visitInstruction(I);
2752}
2753
2754void Verifier::visitSIToFPInst(SIToFPInst &I) {
2755  // Get the source and destination types
2756  Type *SrcTy = I.getOperand(0)->getType();
2757  Type *DestTy = I.getType();
2758
2759  bool SrcVec = SrcTy->isVectorTy();
2760  bool DstVec = DestTy->isVectorTy();
2761
2762  Assert(SrcVec == DstVec,
2763         "SIToFP source and dest must both be vector or scalar", &I);
2764  Assert(SrcTy->isIntOrIntVectorTy(),
2765         "SIToFP source must be integer or integer vector", &I);
2766  Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2767         &I);
2768
2769  if (SrcVec && DstVec)
2770    Assert(cast<VectorType>(SrcTy)->getElementCount() ==
2771               cast<VectorType>(DestTy)->getElementCount(),
2772           "SIToFP source and dest vector length mismatch", &I);
2773
2774  visitInstruction(I);
2775}
2776
2777void Verifier::visitFPToUIInst(FPToUIInst &I) {
2778  // Get the source and destination types
2779  Type *SrcTy = I.getOperand(0)->getType();
2780  Type *DestTy = I.getType();
2781
2782  bool SrcVec = SrcTy->isVectorTy();
2783  bool DstVec = DestTy->isVectorTy();
2784
2785  Assert(SrcVec == DstVec,
2786         "FPToUI source and dest must both be vector or scalar", &I);
2787  Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2788         &I);
2789  Assert(DestTy->isIntOrIntVectorTy(),
2790         "FPToUI result must be integer or integer vector", &I);
2791
2792  if (SrcVec && DstVec)
2793    Assert(cast<VectorType>(SrcTy)->getElementCount() ==
2794               cast<VectorType>(DestTy)->getElementCount(),
2795           "FPToUI source and dest vector length mismatch", &I);
2796
2797  visitInstruction(I);
2798}
2799
2800void Verifier::visitFPToSIInst(FPToSIInst &I) {
2801  // Get the source and destination types
2802  Type *SrcTy = I.getOperand(0)->getType();
2803  Type *DestTy = I.getType();
2804
2805  bool SrcVec = SrcTy->isVectorTy();
2806  bool DstVec = DestTy->isVectorTy();
2807
2808  Assert(SrcVec == DstVec,
2809         "FPToSI source and dest must both be vector or scalar", &I);
2810  Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2811         &I);
2812  Assert(DestTy->isIntOrIntVectorTy(),
2813         "FPToSI result must be integer or integer vector", &I);
2814
2815  if (SrcVec && DstVec)
2816    Assert(cast<VectorType>(SrcTy)->getElementCount() ==
2817               cast<VectorType>(DestTy)->getElementCount(),
2818           "FPToSI source and dest vector length mismatch", &I);
2819
2820  visitInstruction(I);
2821}
2822
2823void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2824  // Get the source and destination types
2825  Type *SrcTy = I.getOperand(0)->getType();
2826  Type *DestTy = I.getType();
2827
2828  Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2829
2830  if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2831    Assert(!DL.isNonIntegralPointerType(PTy),
2832           "ptrtoint not supported for non-integral pointers");
2833
2834  Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2835  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2836         &I);
2837
2838  if (SrcTy->isVectorTy()) {
2839    auto *VSrc = cast<VectorType>(SrcTy);
2840    auto *VDest = cast<VectorType>(DestTy);
2841    Assert(VSrc->getElementCount() == VDest->getElementCount(),
2842           "PtrToInt Vector width mismatch", &I);
2843  }
2844
2845  visitInstruction(I);
2846}
2847
2848void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2849  // Get the source and destination types
2850  Type *SrcTy = I.getOperand(0)->getType();
2851  Type *DestTy = I.getType();
2852
2853  Assert(SrcTy->isIntOrIntVectorTy(),
2854         "IntToPtr source must be an integral", &I);
2855  Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2856
2857  if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2858    Assert(!DL.isNonIntegralPointerType(PTy),
2859           "inttoptr not supported for non-integral pointers");
2860
2861  Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2862         &I);
2863  if (SrcTy->isVectorTy()) {
2864    auto *VSrc = cast<VectorType>(SrcTy);
2865    auto *VDest = cast<VectorType>(DestTy);
2866    Assert(VSrc->getElementCount() == VDest->getElementCount(),
2867           "IntToPtr Vector width mismatch", &I);
2868  }
2869  visitInstruction(I);
2870}
2871
2872void Verifier::visitBitCastInst(BitCastInst &I) {
2873  Assert(
2874      CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2875      "Invalid bitcast", &I);
2876  visitInstruction(I);
2877}
2878
2879void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2880  Type *SrcTy = I.getOperand(0)->getType();
2881  Type *DestTy = I.getType();
2882
2883  Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2884         &I);
2885  Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2886         &I);
2887  Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
2888         "AddrSpaceCast must be between different address spaces", &I);
2889  if (auto *SrcVTy = dyn_cast<VectorType>(SrcTy))
2890    Assert(SrcVTy->getNumElements() ==
2891               cast<VectorType>(DestTy)->getNumElements(),
2892           "AddrSpaceCast vector pointer number of elements mismatch", &I);
2893  visitInstruction(I);
2894}
2895
2896/// visitPHINode - Ensure that a PHI node is well formed.
2897///
2898void Verifier::visitPHINode(PHINode &PN) {
2899  // Ensure that the PHI nodes are all grouped together at the top of the block.
2900  // This can be tested by checking whether the instruction before this is
2901  // either nonexistent (because this is begin()) or is a PHI node.  If not,
2902  // then there is some other instruction before a PHI.
2903  Assert(&PN == &PN.getParent()->front() ||
2904             isa<PHINode>(--BasicBlock::iterator(&PN)),
2905         "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2906
2907  // Check that a PHI doesn't yield a Token.
2908  Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2909
2910  // Check that all of the values of the PHI node have the same type as the
2911  // result, and that the incoming blocks are really basic blocks.
2912  for (Value *IncValue : PN.incoming_values()) {
2913    Assert(PN.getType() == IncValue->getType(),
2914           "PHI node operands are not the same type as the result!", &PN);
2915  }
2916
2917  // All other PHI node constraints are checked in the visitBasicBlock method.
2918
2919  visitInstruction(PN);
2920}
2921
2922void Verifier::visitCallBase(CallBase &Call) {
2923  Assert(Call.getCalledOperand()->getType()->isPointerTy(),
2924         "Called function must be a pointer!", Call);
2925  PointerType *FPTy = cast<PointerType>(Call.getCalledOperand()->getType());
2926
2927  Assert(FPTy->getElementType()->isFunctionTy(),
2928         "Called function is not pointer to function type!", Call);
2929
2930  Assert(FPTy->getElementType() == Call.getFunctionType(),
2931         "Called function is not the same type as the call!", Call);
2932
2933  FunctionType *FTy = Call.getFunctionType();
2934
2935  // Verify that the correct number of arguments are being passed
2936  if (FTy->isVarArg())
2937    Assert(Call.arg_size() >= FTy->getNumParams(),
2938           "Called function requires more parameters than were provided!",
2939           Call);
2940  else
2941    Assert(Call.arg_size() == FTy->getNumParams(),
2942           "Incorrect number of arguments passed to called function!", Call);
2943
2944  // Verify that all arguments to the call match the function type.
2945  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2946    Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
2947           "Call parameter type does not match function signature!",
2948           Call.getArgOperand(i), FTy->getParamType(i), Call);
2949
2950  AttributeList Attrs = Call.getAttributes();
2951
2952  Assert(verifyAttributeCount(Attrs, Call.arg_size()),
2953         "Attribute after last parameter!", Call);
2954
2955  bool IsIntrinsic = Call.getCalledFunction() &&
2956                     Call.getCalledFunction()->getName().startswith("llvm.");
2957
2958  Function *Callee =
2959      dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
2960
2961  if (Attrs.hasFnAttribute(Attribute::Speculatable)) {
2962    // Don't allow speculatable on call sites, unless the underlying function
2963    // declaration is also speculatable.
2964    Assert(Callee && Callee->isSpeculatable(),
2965           "speculatable attribute may not apply to call sites", Call);
2966  }
2967
2968  if (Attrs.hasFnAttribute(Attribute::Preallocated)) {
2969    Assert(Call.getCalledFunction()->getIntrinsicID() ==
2970               Intrinsic::call_preallocated_arg,
2971           "preallocated as a call site attribute can only be on "
2972           "llvm.call.preallocated.arg");
2973  }
2974
2975  // Verify call attributes.
2976  verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic);
2977
2978  // Conservatively check the inalloca argument.
2979  // We have a bug if we can find that there is an underlying alloca without
2980  // inalloca.
2981  if (Call.hasInAllocaArgument()) {
2982    Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
2983    if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2984      Assert(AI->isUsedWithInAlloca(),
2985             "inalloca argument for call has mismatched alloca", AI, Call);
2986  }
2987
2988  // For each argument of the callsite, if it has the swifterror argument,
2989  // make sure the underlying alloca/parameter it comes from has a swifterror as
2990  // well.
2991  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
2992    if (Call.paramHasAttr(i, Attribute::SwiftError)) {
2993      Value *SwiftErrorArg = Call.getArgOperand(i);
2994      if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2995        Assert(AI->isSwiftError(),
2996               "swifterror argument for call has mismatched alloca", AI, Call);
2997        continue;
2998      }
2999      auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
3000      Assert(ArgI,
3001             "swifterror argument should come from an alloca or parameter",
3002             SwiftErrorArg, Call);
3003      Assert(ArgI->hasSwiftErrorAttr(),
3004             "swifterror argument for call has mismatched parameter", ArgI,
3005             Call);
3006    }
3007
3008    if (Attrs.hasParamAttribute(i, Attribute::ImmArg)) {
3009      // Don't allow immarg on call sites, unless the underlying declaration
3010      // also has the matching immarg.
3011      Assert(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
3012             "immarg may not apply only to call sites",
3013             Call.getArgOperand(i), Call);
3014    }
3015
3016    if (Call.paramHasAttr(i, Attribute::ImmArg)) {
3017      Value *ArgVal = Call.getArgOperand(i);
3018      Assert(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
3019             "immarg operand has non-immediate parameter", ArgVal, Call);
3020    }
3021
3022    if (Call.paramHasAttr(i, Attribute::Preallocated)) {
3023      Value *ArgVal = Call.getArgOperand(i);
3024      bool hasOB =
3025          Call.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0;
3026      bool isMustTail = Call.isMustTailCall();
3027      Assert(hasOB != isMustTail,
3028             "preallocated operand either requires a preallocated bundle or "
3029             "the call to be musttail (but not both)",
3030             ArgVal, Call);
3031    }
3032  }
3033
3034  if (FTy->isVarArg()) {
3035    // FIXME? is 'nest' even legal here?
3036    bool SawNest = false;
3037    bool SawReturned = false;
3038
3039    for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
3040      if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
3041        SawNest = true;
3042      if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
3043        SawReturned = true;
3044    }
3045
3046    // Check attributes on the varargs part.
3047    for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
3048      Type *Ty = Call.getArgOperand(Idx)->getType();
3049      AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
3050      verifyParameterAttrs(ArgAttrs, Ty, &Call);
3051
3052      if (ArgAttrs.hasAttribute(Attribute::Nest)) {
3053        Assert(!SawNest, "More than one parameter has attribute nest!", Call);
3054        SawNest = true;
3055      }
3056
3057      if (ArgAttrs.hasAttribute(Attribute::Returned)) {
3058        Assert(!SawReturned, "More than one parameter has attribute returned!",
3059               Call);
3060        Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
3061               "Incompatible argument and return types for 'returned' "
3062               "attribute",
3063               Call);
3064        SawReturned = true;
3065      }
3066
3067      // Statepoint intrinsic is vararg but the wrapped function may be not.
3068      // Allow sret here and check the wrapped function in verifyStatepoint.
3069      if (!Call.getCalledFunction() ||
3070          Call.getCalledFunction()->getIntrinsicID() !=
3071              Intrinsic::experimental_gc_statepoint)
3072        Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
3073               "Attribute 'sret' cannot be used for vararg call arguments!",
3074               Call);
3075
3076      if (ArgAttrs.hasAttribute(Attribute::InAlloca))
3077        Assert(Idx == Call.arg_size() - 1,
3078               "inalloca isn't on the last argument!", Call);
3079    }
3080  }
3081
3082  // Verify that there's no metadata unless it's a direct call to an intrinsic.
3083  if (!IsIntrinsic) {
3084    for (Type *ParamTy : FTy->params()) {
3085      Assert(!ParamTy->isMetadataTy(),
3086             "Function has metadata parameter but isn't an intrinsic", Call);
3087      Assert(!ParamTy->isTokenTy(),
3088             "Function has token parameter but isn't an intrinsic", Call);
3089    }
3090  }
3091
3092  // Verify that indirect calls don't return tokens.
3093  if (!Call.getCalledFunction())
3094    Assert(!FTy->getReturnType()->isTokenTy(),
3095           "Return type cannot be token for indirect call!");
3096
3097  if (Function *F = Call.getCalledFunction())
3098    if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
3099      visitIntrinsicCall(ID, Call);
3100
3101  // Verify that a callsite has at most one "deopt", at most one "funclet", at
3102  // most one "gc-transition", at most one "cfguardtarget",
3103  // and at most one "preallocated" operand bundle.
3104  bool FoundDeoptBundle = false, FoundFuncletBundle = false,
3105       FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
3106       FoundPreallocatedBundle = false, FoundGCLiveBundle = false;;
3107  for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
3108    OperandBundleUse BU = Call.getOperandBundleAt(i);
3109    uint32_t Tag = BU.getTagID();
3110    if (Tag == LLVMContext::OB_deopt) {
3111      Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
3112      FoundDeoptBundle = true;
3113    } else if (Tag == LLVMContext::OB_gc_transition) {
3114      Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
3115             Call);
3116      FoundGCTransitionBundle = true;
3117    } else if (Tag == LLVMContext::OB_funclet) {
3118      Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
3119      FoundFuncletBundle = true;
3120      Assert(BU.Inputs.size() == 1,
3121             "Expected exactly one funclet bundle operand", Call);
3122      Assert(isa<FuncletPadInst>(BU.Inputs.front()),
3123             "Funclet bundle operands should correspond to a FuncletPadInst",
3124             Call);
3125    } else if (Tag == LLVMContext::OB_cfguardtarget) {
3126      Assert(!FoundCFGuardTargetBundle,
3127             "Multiple CFGuardTarget operand bundles", Call);
3128      FoundCFGuardTargetBundle = true;
3129      Assert(BU.Inputs.size() == 1,
3130             "Expected exactly one cfguardtarget bundle operand", Call);
3131    } else if (Tag == LLVMContext::OB_preallocated) {
3132      Assert(!FoundPreallocatedBundle, "Multiple preallocated operand bundles",
3133             Call);
3134      FoundPreallocatedBundle = true;
3135      Assert(BU.Inputs.size() == 1,
3136             "Expected exactly one preallocated bundle operand", Call);
3137      auto Input = dyn_cast<IntrinsicInst>(BU.Inputs.front());
3138      Assert(Input &&
3139                 Input->getIntrinsicID() == Intrinsic::call_preallocated_setup,
3140             "\"preallocated\" argument must be a token from "
3141             "llvm.call.preallocated.setup",
3142             Call);
3143    } else if (Tag == LLVMContext::OB_gc_live) {
3144      Assert(!FoundGCLiveBundle, "Multiple gc-live operand bundles",
3145             Call);
3146      FoundGCLiveBundle = true;
3147    }
3148  }
3149
3150  // Verify that each inlinable callsite of a debug-info-bearing function in a
3151  // debug-info-bearing function has a debug location attached to it. Failure to
3152  // do so causes assertion failures when the inliner sets up inline scope info.
3153  if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
3154      Call.getCalledFunction()->getSubprogram())
3155    AssertDI(Call.getDebugLoc(),
3156             "inlinable function call in a function with "
3157             "debug info must have a !dbg location",
3158             Call);
3159
3160  visitInstruction(Call);
3161}
3162
3163/// Two types are "congruent" if they are identical, or if they are both pointer
3164/// types with different pointee types and the same address space.
3165static bool isTypeCongruent(Type *L, Type *R) {
3166  if (L == R)
3167    return true;
3168  PointerType *PL = dyn_cast<PointerType>(L);
3169  PointerType *PR = dyn_cast<PointerType>(R);
3170  if (!PL || !PR)
3171    return false;
3172  return PL->getAddressSpace() == PR->getAddressSpace();
3173}
3174
3175static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
3176  static const Attribute::AttrKind ABIAttrs[] = {
3177      Attribute::StructRet,   Attribute::ByVal,     Attribute::InAlloca,
3178      Attribute::InReg,       Attribute::SwiftSelf, Attribute::SwiftError,
3179      Attribute::Preallocated};
3180  AttrBuilder Copy;
3181  for (auto AK : ABIAttrs) {
3182    if (Attrs.hasParamAttribute(I, AK))
3183      Copy.addAttribute(AK);
3184  }
3185  // `align` is ABI-affecting only in combination with `byval`.
3186  if (Attrs.hasParamAttribute(I, Attribute::Alignment) &&
3187      Attrs.hasParamAttribute(I, Attribute::ByVal))
3188    Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3189  return Copy;
3190}
3191
3192void Verifier::verifyMustTailCall(CallInst &CI) {
3193  Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3194
3195  // - The caller and callee prototypes must match.  Pointer types of
3196  //   parameters or return types may differ in pointee type, but not
3197  //   address space.
3198  Function *F = CI.getParent()->getParent();
3199  FunctionType *CallerTy = F->getFunctionType();
3200  FunctionType *CalleeTy = CI.getFunctionType();
3201  if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3202    Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3203           "cannot guarantee tail call due to mismatched parameter counts",
3204           &CI);
3205    for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3206      Assert(
3207          isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3208          "cannot guarantee tail call due to mismatched parameter types", &CI);
3209    }
3210  }
3211  Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3212         "cannot guarantee tail call due to mismatched varargs", &CI);
3213  Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3214         "cannot guarantee tail call due to mismatched return types", &CI);
3215
3216  // - The calling conventions of the caller and callee must match.
3217  Assert(F->getCallingConv() == CI.getCallingConv(),
3218         "cannot guarantee tail call due to mismatched calling conv", &CI);
3219
3220  // - All ABI-impacting function attributes, such as sret, byval, inreg,
3221  //   returned, preallocated, and inalloca, must match.
3222  AttributeList CallerAttrs = F->getAttributes();
3223  AttributeList CalleeAttrs = CI.getAttributes();
3224  for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3225    AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
3226    AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
3227    Assert(CallerABIAttrs == CalleeABIAttrs,
3228           "cannot guarantee tail call due to mismatched ABI impacting "
3229           "function attributes",
3230           &CI, CI.getOperand(I));
3231  }
3232
3233  // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3234  //   or a pointer bitcast followed by a ret instruction.
3235  // - The ret instruction must return the (possibly bitcasted) value
3236  //   produced by the call or void.
3237  Value *RetVal = &CI;
3238  Instruction *Next = CI.getNextNode();
3239
3240  // Handle the optional bitcast.
3241  if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3242    Assert(BI->getOperand(0) == RetVal,
3243           "bitcast following musttail call must use the call", BI);
3244    RetVal = BI;
3245    Next = BI->getNextNode();
3246  }
3247
3248  // Check the return.
3249  ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3250  Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3251         &CI);
3252  Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3253         "musttail call result must be returned", Ret);
3254}
3255
3256void Verifier::visitCallInst(CallInst &CI) {
3257  visitCallBase(CI);
3258
3259  if (CI.isMustTailCall())
3260    verifyMustTailCall(CI);
3261}
3262
3263void Verifier::visitInvokeInst(InvokeInst &II) {
3264  visitCallBase(II);
3265
3266  // Verify that the first non-PHI instruction of the unwind destination is an
3267  // exception handling instruction.
3268  Assert(
3269      II.getUnwindDest()->isEHPad(),
3270      "The unwind destination does not have an exception handling instruction!",
3271      &II);
3272
3273  visitTerminator(II);
3274}
3275
3276/// visitUnaryOperator - Check the argument to the unary operator.
3277///
3278void Verifier::visitUnaryOperator(UnaryOperator &U) {
3279  Assert(U.getType() == U.getOperand(0)->getType(),
3280         "Unary operators must have same type for"
3281         "operands and result!",
3282         &U);
3283
3284  switch (U.getOpcode()) {
3285  // Check that floating-point arithmetic operators are only used with
3286  // floating-point operands.
3287  case Instruction::FNeg:
3288    Assert(U.getType()->isFPOrFPVectorTy(),
3289           "FNeg operator only works with float types!", &U);
3290    break;
3291  default:
3292    llvm_unreachable("Unknown UnaryOperator opcode!");
3293  }
3294
3295  visitInstruction(U);
3296}
3297
3298/// visitBinaryOperator - Check that both arguments to the binary operator are
3299/// of the same type!
3300///
3301void Verifier::visitBinaryOperator(BinaryOperator &B) {
3302  Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3303         "Both operands to a binary operator are not of the same type!", &B);
3304
3305  switch (B.getOpcode()) {
3306  // Check that integer arithmetic operators are only used with
3307  // integral operands.
3308  case Instruction::Add:
3309  case Instruction::Sub:
3310  case Instruction::Mul:
3311  case Instruction::SDiv:
3312  case Instruction::UDiv:
3313  case Instruction::SRem:
3314  case Instruction::URem:
3315    Assert(B.getType()->isIntOrIntVectorTy(),
3316           "Integer arithmetic operators only work with integral types!", &B);
3317    Assert(B.getType() == B.getOperand(0)->getType(),
3318           "Integer arithmetic operators must have same type "
3319           "for operands and result!",
3320           &B);
3321    break;
3322  // Check that floating-point arithmetic operators are only used with
3323  // floating-point operands.
3324  case Instruction::FAdd:
3325  case Instruction::FSub:
3326  case Instruction::FMul:
3327  case Instruction::FDiv:
3328  case Instruction::FRem:
3329    Assert(B.getType()->isFPOrFPVectorTy(),
3330           "Floating-point arithmetic operators only work with "
3331           "floating-point types!",
3332           &B);
3333    Assert(B.getType() == B.getOperand(0)->getType(),
3334           "Floating-point arithmetic operators must have same type "
3335           "for operands and result!",
3336           &B);
3337    break;
3338  // Check that logical operators are only used with integral operands.
3339  case Instruction::And:
3340  case Instruction::Or:
3341  case Instruction::Xor:
3342    Assert(B.getType()->isIntOrIntVectorTy(),
3343           "Logical operators only work with integral types!", &B);
3344    Assert(B.getType() == B.getOperand(0)->getType(),
3345           "Logical operators must have same type for operands and result!",
3346           &B);
3347    break;
3348  case Instruction::Shl:
3349  case Instruction::LShr:
3350  case Instruction::AShr:
3351    Assert(B.getType()->isIntOrIntVectorTy(),
3352           "Shifts only work with integral types!", &B);
3353    Assert(B.getType() == B.getOperand(0)->getType(),
3354           "Shift return type must be same as operands!", &B);
3355    break;
3356  default:
3357    llvm_unreachable("Unknown BinaryOperator opcode!");
3358  }
3359
3360  visitInstruction(B);
3361}
3362
3363void Verifier::visitICmpInst(ICmpInst &IC) {
3364  // Check that the operands are the same type
3365  Type *Op0Ty = IC.getOperand(0)->getType();
3366  Type *Op1Ty = IC.getOperand(1)->getType();
3367  Assert(Op0Ty == Op1Ty,
3368         "Both operands to ICmp instruction are not of the same type!", &IC);
3369  // Check that the operands are the right type
3370  Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3371         "Invalid operand types for ICmp instruction", &IC);
3372  // Check that the predicate is valid.
3373  Assert(IC.isIntPredicate(),
3374         "Invalid predicate in ICmp instruction!", &IC);
3375
3376  visitInstruction(IC);
3377}
3378
3379void Verifier::visitFCmpInst(FCmpInst &FC) {
3380  // Check that the operands are the same type
3381  Type *Op0Ty = FC.getOperand(0)->getType();
3382  Type *Op1Ty = FC.getOperand(1)->getType();
3383  Assert(Op0Ty == Op1Ty,
3384         "Both operands to FCmp instruction are not of the same type!", &FC);
3385  // Check that the operands are the right type
3386  Assert(Op0Ty->isFPOrFPVectorTy(),
3387         "Invalid operand types for FCmp instruction", &FC);
3388  // Check that the predicate is valid.
3389  Assert(FC.isFPPredicate(),
3390         "Invalid predicate in FCmp instruction!", &FC);
3391
3392  visitInstruction(FC);
3393}
3394
3395void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3396  Assert(
3397      ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3398      "Invalid extractelement operands!", &EI);
3399  visitInstruction(EI);
3400}
3401
3402void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3403  Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3404                                            IE.getOperand(2)),
3405         "Invalid insertelement operands!", &IE);
3406  visitInstruction(IE);
3407}
3408
3409void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3410  Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3411                                            SV.getShuffleMask()),
3412         "Invalid shufflevector operands!", &SV);
3413  visitInstruction(SV);
3414}
3415
3416void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3417  Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3418
3419  Assert(isa<PointerType>(TargetTy),
3420         "GEP base pointer is not a vector or a vector of pointers", &GEP);
3421  Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3422
3423  SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3424  Assert(all_of(
3425      Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3426      "GEP indexes must be integers", &GEP);
3427  Type *ElTy =
3428      GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3429  Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3430
3431  Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3432             GEP.getResultElementType() == ElTy,
3433         "GEP is not of right type for indices!", &GEP, ElTy);
3434
3435  if (auto *GEPVTy = dyn_cast<VectorType>(GEP.getType())) {
3436    // Additional checks for vector GEPs.
3437    ElementCount GEPWidth = GEPVTy->getElementCount();
3438    if (GEP.getPointerOperandType()->isVectorTy())
3439      Assert(
3440          GEPWidth ==
3441              cast<VectorType>(GEP.getPointerOperandType())->getElementCount(),
3442          "Vector GEP result width doesn't match operand's", &GEP);
3443    for (Value *Idx : Idxs) {
3444      Type *IndexTy = Idx->getType();
3445      if (auto *IndexVTy = dyn_cast<VectorType>(IndexTy)) {
3446        ElementCount IndexWidth = IndexVTy->getElementCount();
3447        Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3448      }
3449      Assert(IndexTy->isIntOrIntVectorTy(),
3450             "All GEP indices should be of integer type");
3451    }
3452  }
3453
3454  if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3455    Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3456           "GEP address space doesn't match type", &GEP);
3457  }
3458
3459  visitInstruction(GEP);
3460}
3461
3462static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3463  return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3464}
3465
3466void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3467  assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3468         "precondition violation");
3469
3470  unsigned NumOperands = Range->getNumOperands();
3471  Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3472  unsigned NumRanges = NumOperands / 2;
3473  Assert(NumRanges >= 1, "It should have at least one range!", Range);
3474
3475  ConstantRange LastRange(1, true); // Dummy initial value
3476  for (unsigned i = 0; i < NumRanges; ++i) {
3477    ConstantInt *Low =
3478        mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3479    Assert(Low, "The lower limit must be an integer!", Low);
3480    ConstantInt *High =
3481        mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3482    Assert(High, "The upper limit must be an integer!", High);
3483    Assert(High->getType() == Low->getType() && High->getType() == Ty,
3484           "Range types must match instruction type!", &I);
3485
3486    APInt HighV = High->getValue();
3487    APInt LowV = Low->getValue();
3488    ConstantRange CurRange(LowV, HighV);
3489    Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3490           "Range must not be empty!", Range);
3491    if (i != 0) {
3492      Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3493             "Intervals are overlapping", Range);
3494      Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3495             Range);
3496      Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3497             Range);
3498    }
3499    LastRange = ConstantRange(LowV, HighV);
3500  }
3501  if (NumRanges > 2) {
3502    APInt FirstLow =
3503        mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3504    APInt FirstHigh =
3505        mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3506    ConstantRange FirstRange(FirstLow, FirstHigh);
3507    Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3508           "Intervals are overlapping", Range);
3509    Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3510           Range);
3511  }
3512}
3513
3514void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3515  unsigned Size = DL.getTypeSizeInBits(Ty);
3516  Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3517  Assert(!(Size & (Size - 1)),
3518         "atomic memory access' operand must have a power-of-two size", Ty, I);
3519}
3520
3521void Verifier::visitLoadInst(LoadInst &LI) {
3522  PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3523  Assert(PTy, "Load operand must be a pointer.", &LI);
3524  Type *ElTy = LI.getType();
3525  Assert(LI.getAlignment() <= Value::MaximumAlignment,
3526         "huge alignment values are unsupported", &LI);
3527  Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3528  if (LI.isAtomic()) {
3529    Assert(LI.getOrdering() != AtomicOrdering::Release &&
3530               LI.getOrdering() != AtomicOrdering::AcquireRelease,
3531           "Load cannot have Release ordering", &LI);
3532    Assert(LI.getAlignment() != 0,
3533           "Atomic load must specify explicit alignment", &LI);
3534    Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3535           "atomic load operand must have integer, pointer, or floating point "
3536           "type!",
3537           ElTy, &LI);
3538    checkAtomicMemAccessSize(ElTy, &LI);
3539  } else {
3540    Assert(LI.getSyncScopeID() == SyncScope::System,
3541           "Non-atomic load cannot have SynchronizationScope specified", &LI);
3542  }
3543
3544  visitInstruction(LI);
3545}
3546
3547void Verifier::visitStoreInst(StoreInst &SI) {
3548  PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3549  Assert(PTy, "Store operand must be a pointer.", &SI);
3550  Type *ElTy = PTy->getElementType();
3551  Assert(ElTy == SI.getOperand(0)->getType(),
3552         "Stored value type does not match pointer operand type!", &SI, ElTy);
3553  Assert(SI.getAlignment() <= Value::MaximumAlignment,
3554         "huge alignment values are unsupported", &SI);
3555  Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3556  if (SI.isAtomic()) {
3557    Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
3558               SI.getOrdering() != AtomicOrdering::AcquireRelease,
3559           "Store cannot have Acquire ordering", &SI);
3560    Assert(SI.getAlignment() != 0,
3561           "Atomic store must specify explicit alignment", &SI);
3562    Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3563           "atomic store operand must have integer, pointer, or floating point "
3564           "type!",
3565           ElTy, &SI);
3566    checkAtomicMemAccessSize(ElTy, &SI);
3567  } else {
3568    Assert(SI.getSyncScopeID() == SyncScope::System,
3569           "Non-atomic store cannot have SynchronizationScope specified", &SI);
3570  }
3571  visitInstruction(SI);
3572}
3573
3574/// Check that SwiftErrorVal is used as a swifterror argument in CS.
3575void Verifier::verifySwiftErrorCall(CallBase &Call,
3576                                    const Value *SwiftErrorVal) {
3577  unsigned Idx = 0;
3578  for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
3579    if (*I == SwiftErrorVal) {
3580      Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
3581             "swifterror value when used in a callsite should be marked "
3582             "with swifterror attribute",
3583             SwiftErrorVal, Call);
3584    }
3585  }
3586}
3587
3588void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3589  // Check that swifterror value is only used by loads, stores, or as
3590  // a swifterror argument.
3591  for (const User *U : SwiftErrorVal->users()) {
3592    Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3593           isa<InvokeInst>(U),
3594           "swifterror value can only be loaded and stored from, or "
3595           "as a swifterror argument!",
3596           SwiftErrorVal, U);
3597    // If it is used by a store, check it is the second operand.
3598    if (auto StoreI = dyn_cast<StoreInst>(U))
3599      Assert(StoreI->getOperand(1) == SwiftErrorVal,
3600             "swifterror value should be the second operand when used "
3601             "by stores", SwiftErrorVal, U);
3602    if (auto *Call = dyn_cast<CallBase>(U))
3603      verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3604  }
3605}
3606
3607void Verifier::visitAllocaInst(AllocaInst &AI) {
3608  SmallPtrSet<Type*, 4> Visited;
3609  PointerType *PTy = AI.getType();
3610  // TODO: Relax this restriction?
3611  Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
3612         "Allocation instruction pointer not in the stack address space!",
3613         &AI);
3614  Assert(AI.getAllocatedType()->isSized(&Visited),
3615         "Cannot allocate unsized type", &AI);
3616  Assert(AI.getArraySize()->getType()->isIntegerTy(),
3617         "Alloca array size must have integer type", &AI);
3618  Assert(AI.getAlignment() <= Value::MaximumAlignment,
3619         "huge alignment values are unsupported", &AI);
3620
3621  if (AI.isSwiftError()) {
3622    verifySwiftErrorValue(&AI);
3623  }
3624
3625  visitInstruction(AI);
3626}
3627
3628void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3629
3630  // FIXME: more conditions???
3631  Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
3632         "cmpxchg instructions must be atomic.", &CXI);
3633  Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
3634         "cmpxchg instructions must be atomic.", &CXI);
3635  Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
3636         "cmpxchg instructions cannot be unordered.", &CXI);
3637  Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
3638         "cmpxchg instructions cannot be unordered.", &CXI);
3639  Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3640         "cmpxchg instructions failure argument shall be no stronger than the "
3641         "success argument",
3642         &CXI);
3643  Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3644             CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
3645         "cmpxchg failure ordering cannot include release semantics", &CXI);
3646
3647  PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3648  Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3649  Type *ElTy = PTy->getElementType();
3650  Assert(ElTy->isIntOrPtrTy(),
3651         "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3652  checkAtomicMemAccessSize(ElTy, &CXI);
3653  Assert(ElTy == CXI.getOperand(1)->getType(),
3654         "Expected value type does not match pointer operand type!", &CXI,
3655         ElTy);
3656  Assert(ElTy == CXI.getOperand(2)->getType(),
3657         "Stored value type does not match pointer operand type!", &CXI, ElTy);
3658  visitInstruction(CXI);
3659}
3660
3661void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3662  Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
3663         "atomicrmw instructions must be atomic.", &RMWI);
3664  Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
3665         "atomicrmw instructions cannot be unordered.", &RMWI);
3666  auto Op = RMWI.getOperation();
3667  PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3668  Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3669  Type *ElTy = PTy->getElementType();
3670  if (Op == AtomicRMWInst::Xchg) {
3671    Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " +
3672           AtomicRMWInst::getOperationName(Op) +
3673           " operand must have integer or floating point type!",
3674           &RMWI, ElTy);
3675  } else if (AtomicRMWInst::isFPOperation(Op)) {
3676    Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
3677           AtomicRMWInst::getOperationName(Op) +
3678           " operand must have floating point type!",
3679           &RMWI, ElTy);
3680  } else {
3681    Assert(ElTy->isIntegerTy(), "atomicrmw " +
3682           AtomicRMWInst::getOperationName(Op) +
3683           " operand must have integer type!",
3684           &RMWI, ElTy);
3685  }
3686  checkAtomicMemAccessSize(ElTy, &RMWI);
3687  Assert(ElTy == RMWI.getOperand(1)->getType(),
3688         "Argument value type does not match pointer operand type!", &RMWI,
3689         ElTy);
3690  Assert(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
3691         "Invalid binary operation!", &RMWI);
3692  visitInstruction(RMWI);
3693}
3694
3695void Verifier::visitFenceInst(FenceInst &FI) {
3696  const AtomicOrdering Ordering = FI.getOrdering();
3697  Assert(Ordering == AtomicOrdering::Acquire ||
3698             Ordering == AtomicOrdering::Release ||
3699             Ordering == AtomicOrdering::AcquireRelease ||
3700             Ordering == AtomicOrdering::SequentiallyConsistent,
3701         "fence instructions may only have acquire, release, acq_rel, or "
3702         "seq_cst ordering.",
3703         &FI);
3704  visitInstruction(FI);
3705}
3706
3707void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3708  Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3709                                          EVI.getIndices()) == EVI.getType(),
3710         "Invalid ExtractValueInst operands!", &EVI);
3711
3712  visitInstruction(EVI);
3713}
3714
3715void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3716  Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3717                                          IVI.getIndices()) ==
3718             IVI.getOperand(1)->getType(),
3719         "Invalid InsertValueInst operands!", &IVI);
3720
3721  visitInstruction(IVI);
3722}
3723
3724static Value *getParentPad(Value *EHPad) {
3725  if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3726    return FPI->getParentPad();
3727
3728  return cast<CatchSwitchInst>(EHPad)->getParentPad();
3729}
3730
3731void Verifier::visitEHPadPredecessors(Instruction &I) {
3732  assert(I.isEHPad());
3733
3734  BasicBlock *BB = I.getParent();
3735  Function *F = BB->getParent();
3736
3737  Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3738
3739  if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3740    // The landingpad instruction defines its parent as a landing pad block. The
3741    // landing pad block may be branched to only by the unwind edge of an
3742    // invoke.
3743    for (BasicBlock *PredBB : predecessors(BB)) {
3744      const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3745      Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3746             "Block containing LandingPadInst must be jumped to "
3747             "only by the unwind edge of an invoke.",
3748             LPI);
3749    }
3750    return;
3751  }
3752  if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3753    if (!pred_empty(BB))
3754      Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3755             "Block containg CatchPadInst must be jumped to "
3756             "only by its catchswitch.",
3757             CPI);
3758    Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3759           "Catchswitch cannot unwind to one of its catchpads",
3760           CPI->getCatchSwitch(), CPI);
3761    return;
3762  }
3763
3764  // Verify that each pred has a legal terminator with a legal to/from EH
3765  // pad relationship.
3766  Instruction *ToPad = &I;
3767  Value *ToPadParent = getParentPad(ToPad);
3768  for (BasicBlock *PredBB : predecessors(BB)) {
3769    Instruction *TI = PredBB->getTerminator();
3770    Value *FromPad;
3771    if (auto *II = dyn_cast<InvokeInst>(TI)) {
3772      Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3773             "EH pad must be jumped to via an unwind edge", ToPad, II);
3774      if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3775        FromPad = Bundle->Inputs[0];
3776      else
3777        FromPad = ConstantTokenNone::get(II->getContext());
3778    } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3779      FromPad = CRI->getOperand(0);
3780      Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3781    } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3782      FromPad = CSI;
3783    } else {
3784      Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3785    }
3786
3787    // The edge may exit from zero or more nested pads.
3788    SmallSet<Value *, 8> Seen;
3789    for (;; FromPad = getParentPad(FromPad)) {
3790      Assert(FromPad != ToPad,
3791             "EH pad cannot handle exceptions raised within it", FromPad, TI);
3792      if (FromPad == ToPadParent) {
3793        // This is a legal unwind edge.
3794        break;
3795      }
3796      Assert(!isa<ConstantTokenNone>(FromPad),
3797             "A single unwind edge may only enter one EH pad", TI);
3798      Assert(Seen.insert(FromPad).second,
3799             "EH pad jumps through a cycle of pads", FromPad);
3800    }
3801  }
3802}
3803
3804void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3805  // The landingpad instruction is ill-formed if it doesn't have any clauses and
3806  // isn't a cleanup.
3807  Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3808         "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3809
3810  visitEHPadPredecessors(LPI);
3811
3812  if (!LandingPadResultTy)
3813    LandingPadResultTy = LPI.getType();
3814  else
3815    Assert(LandingPadResultTy == LPI.getType(),
3816           "The landingpad instruction should have a consistent result type "
3817           "inside a function.",
3818           &LPI);
3819
3820  Function *F = LPI.getParent()->getParent();
3821  Assert(F->hasPersonalityFn(),
3822         "LandingPadInst needs to be in a function with a personality.", &LPI);
3823
3824  // The landingpad instruction must be the first non-PHI instruction in the
3825  // block.
3826  Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3827         "LandingPadInst not the first non-PHI instruction in the block.",
3828         &LPI);
3829
3830  for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3831    Constant *Clause = LPI.getClause(i);
3832    if (LPI.isCatch(i)) {
3833      Assert(isa<PointerType>(Clause->getType()),
3834             "Catch operand does not have pointer type!", &LPI);
3835    } else {
3836      Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3837      Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3838             "Filter operand is not an array of constants!", &LPI);
3839    }
3840  }
3841
3842  visitInstruction(LPI);
3843}
3844
3845void Verifier::visitResumeInst(ResumeInst &RI) {
3846  Assert(RI.getFunction()->hasPersonalityFn(),
3847         "ResumeInst needs to be in a function with a personality.", &RI);
3848
3849  if (!LandingPadResultTy)
3850    LandingPadResultTy = RI.getValue()->getType();
3851  else
3852    Assert(LandingPadResultTy == RI.getValue()->getType(),
3853           "The resume instruction should have a consistent result type "
3854           "inside a function.",
3855           &RI);
3856
3857  visitTerminator(RI);
3858}
3859
3860void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3861  BasicBlock *BB = CPI.getParent();
3862
3863  Function *F = BB->getParent();
3864  Assert(F->hasPersonalityFn(),
3865         "CatchPadInst needs to be in a function with a personality.", &CPI);
3866
3867  Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3868         "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3869         CPI.getParentPad());
3870
3871  // The catchpad instruction must be the first non-PHI instruction in the
3872  // block.
3873  Assert(BB->getFirstNonPHI() == &CPI,
3874         "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3875
3876  visitEHPadPredecessors(CPI);
3877  visitFuncletPadInst(CPI);
3878}
3879
3880void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3881  Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3882         "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3883         CatchReturn.getOperand(0));
3884
3885  visitTerminator(CatchReturn);
3886}
3887
3888void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3889  BasicBlock *BB = CPI.getParent();
3890
3891  Function *F = BB->getParent();
3892  Assert(F->hasPersonalityFn(),
3893         "CleanupPadInst needs to be in a function with a personality.", &CPI);
3894
3895  // The cleanuppad instruction must be the first non-PHI instruction in the
3896  // block.
3897  Assert(BB->getFirstNonPHI() == &CPI,
3898         "CleanupPadInst not the first non-PHI instruction in the block.",
3899         &CPI);
3900
3901  auto *ParentPad = CPI.getParentPad();
3902  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3903         "CleanupPadInst has an invalid parent.", &CPI);
3904
3905  visitEHPadPredecessors(CPI);
3906  visitFuncletPadInst(CPI);
3907}
3908
3909void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3910  User *FirstUser = nullptr;
3911  Value *FirstUnwindPad = nullptr;
3912  SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3913  SmallSet<FuncletPadInst *, 8> Seen;
3914
3915  while (!Worklist.empty()) {
3916    FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3917    Assert(Seen.insert(CurrentPad).second,
3918           "FuncletPadInst must not be nested within itself", CurrentPad);
3919    Value *UnresolvedAncestorPad = nullptr;
3920    for (User *U : CurrentPad->users()) {
3921      BasicBlock *UnwindDest;
3922      if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3923        UnwindDest = CRI->getUnwindDest();
3924      } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3925        // We allow catchswitch unwind to caller to nest
3926        // within an outer pad that unwinds somewhere else,
3927        // because catchswitch doesn't have a nounwind variant.
3928        // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3929        if (CSI->unwindsToCaller())
3930          continue;
3931        UnwindDest = CSI->getUnwindDest();
3932      } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3933        UnwindDest = II->getUnwindDest();
3934      } else if (isa<CallInst>(U)) {
3935        // Calls which don't unwind may be found inside funclet
3936        // pads that unwind somewhere else.  We don't *require*
3937        // such calls to be annotated nounwind.
3938        continue;
3939      } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3940        // The unwind dest for a cleanup can only be found by
3941        // recursive search.  Add it to the worklist, and we'll
3942        // search for its first use that determines where it unwinds.
3943        Worklist.push_back(CPI);
3944        continue;
3945      } else {
3946        Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3947        continue;
3948      }
3949
3950      Value *UnwindPad;
3951      bool ExitsFPI;
3952      if (UnwindDest) {
3953        UnwindPad = UnwindDest->getFirstNonPHI();
3954        if (!cast<Instruction>(UnwindPad)->isEHPad())
3955          continue;
3956        Value *UnwindParent = getParentPad(UnwindPad);
3957        // Ignore unwind edges that don't exit CurrentPad.
3958        if (UnwindParent == CurrentPad)
3959          continue;
3960        // Determine whether the original funclet pad is exited,
3961        // and if we are scanning nested pads determine how many
3962        // of them are exited so we can stop searching their
3963        // children.
3964        Value *ExitedPad = CurrentPad;
3965        ExitsFPI = false;
3966        do {
3967          if (ExitedPad == &FPI) {
3968            ExitsFPI = true;
3969            // Now we can resolve any ancestors of CurrentPad up to
3970            // FPI, but not including FPI since we need to make sure
3971            // to check all direct users of FPI for consistency.
3972            UnresolvedAncestorPad = &FPI;
3973            break;
3974          }
3975          Value *ExitedParent = getParentPad(ExitedPad);
3976          if (ExitedParent == UnwindParent) {
3977            // ExitedPad is the ancestor-most pad which this unwind
3978            // edge exits, so we can resolve up to it, meaning that
3979            // ExitedParent is the first ancestor still unresolved.
3980            UnresolvedAncestorPad = ExitedParent;
3981            break;
3982          }
3983          ExitedPad = ExitedParent;
3984        } while (!isa<ConstantTokenNone>(ExitedPad));
3985      } else {
3986        // Unwinding to caller exits all pads.
3987        UnwindPad = ConstantTokenNone::get(FPI.getContext());
3988        ExitsFPI = true;
3989        UnresolvedAncestorPad = &FPI;
3990      }
3991
3992      if (ExitsFPI) {
3993        // This unwind edge exits FPI.  Make sure it agrees with other
3994        // such edges.
3995        if (FirstUser) {
3996          Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3997                                              "pad must have the same unwind "
3998                                              "dest",
3999                 &FPI, U, FirstUser);
4000        } else {
4001          FirstUser = U;
4002          FirstUnwindPad = UnwindPad;
4003          // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
4004          if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
4005              getParentPad(UnwindPad) == getParentPad(&FPI))
4006            SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
4007        }
4008      }
4009      // Make sure we visit all uses of FPI, but for nested pads stop as
4010      // soon as we know where they unwind to.
4011      if (CurrentPad != &FPI)
4012        break;
4013    }
4014    if (UnresolvedAncestorPad) {
4015      if (CurrentPad == UnresolvedAncestorPad) {
4016        // When CurrentPad is FPI itself, we don't mark it as resolved even if
4017        // we've found an unwind edge that exits it, because we need to verify
4018        // all direct uses of FPI.
4019        assert(CurrentPad == &FPI);
4020        continue;
4021      }
4022      // Pop off the worklist any nested pads that we've found an unwind
4023      // destination for.  The pads on the worklist are the uncles,
4024      // great-uncles, etc. of CurrentPad.  We've found an unwind destination
4025      // for all ancestors of CurrentPad up to but not including
4026      // UnresolvedAncestorPad.
4027      Value *ResolvedPad = CurrentPad;
4028      while (!Worklist.empty()) {
4029        Value *UnclePad = Worklist.back();
4030        Value *AncestorPad = getParentPad(UnclePad);
4031        // Walk ResolvedPad up the ancestor list until we either find the
4032        // uncle's parent or the last resolved ancestor.
4033        while (ResolvedPad != AncestorPad) {
4034          Value *ResolvedParent = getParentPad(ResolvedPad);
4035          if (ResolvedParent == UnresolvedAncestorPad) {
4036            break;
4037          }
4038          ResolvedPad = ResolvedParent;
4039        }
4040        // If the resolved ancestor search didn't find the uncle's parent,
4041        // then the uncle is not yet resolved.
4042        if (ResolvedPad != AncestorPad)
4043          break;
4044        // This uncle is resolved, so pop it from the worklist.
4045        Worklist.pop_back();
4046      }
4047    }
4048  }
4049
4050  if (FirstUnwindPad) {
4051    if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
4052      BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
4053      Value *SwitchUnwindPad;
4054      if (SwitchUnwindDest)
4055        SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
4056      else
4057        SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
4058      Assert(SwitchUnwindPad == FirstUnwindPad,
4059             "Unwind edges out of a catch must have the same unwind dest as "
4060             "the parent catchswitch",
4061             &FPI, FirstUser, CatchSwitch);
4062    }
4063  }
4064
4065  visitInstruction(FPI);
4066}
4067
4068void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
4069  BasicBlock *BB = CatchSwitch.getParent();
4070
4071  Function *F = BB->getParent();
4072  Assert(F->hasPersonalityFn(),
4073         "CatchSwitchInst needs to be in a function with a personality.",
4074         &CatchSwitch);
4075
4076  // The catchswitch instruction must be the first non-PHI instruction in the
4077  // block.
4078  Assert(BB->getFirstNonPHI() == &CatchSwitch,
4079         "CatchSwitchInst not the first non-PHI instruction in the block.",
4080         &CatchSwitch);
4081
4082  auto *ParentPad = CatchSwitch.getParentPad();
4083  Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
4084         "CatchSwitchInst has an invalid parent.", ParentPad);
4085
4086  if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
4087    Instruction *I = UnwindDest->getFirstNonPHI();
4088    Assert(I->isEHPad() && !isa<LandingPadInst>(I),
4089           "CatchSwitchInst must unwind to an EH block which is not a "
4090           "landingpad.",
4091           &CatchSwitch);
4092
4093    // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
4094    if (getParentPad(I) == ParentPad)
4095      SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
4096  }
4097
4098  Assert(CatchSwitch.getNumHandlers() != 0,
4099         "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
4100
4101  for (BasicBlock *Handler : CatchSwitch.handlers()) {
4102    Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
4103           "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
4104  }
4105
4106  visitEHPadPredecessors(CatchSwitch);
4107  visitTerminator(CatchSwitch);
4108}
4109
4110void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
4111  Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
4112         "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
4113         CRI.getOperand(0));
4114
4115  if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
4116    Instruction *I = UnwindDest->getFirstNonPHI();
4117    Assert(I->isEHPad() && !isa<LandingPadInst>(I),
4118           "CleanupReturnInst must unwind to an EH block which is not a "
4119           "landingpad.",
4120           &CRI);
4121  }
4122
4123  visitTerminator(CRI);
4124}
4125
4126void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
4127  Instruction *Op = cast<Instruction>(I.getOperand(i));
4128  // If the we have an invalid invoke, don't try to compute the dominance.
4129  // We already reject it in the invoke specific checks and the dominance
4130  // computation doesn't handle multiple edges.
4131  if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
4132    if (II->getNormalDest() == II->getUnwindDest())
4133      return;
4134  }
4135
4136  // Quick check whether the def has already been encountered in the same block.
4137  // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
4138  // uses are defined to happen on the incoming edge, not at the instruction.
4139  //
4140  // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
4141  // wrapping an SSA value, assert that we've already encountered it.  See
4142  // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
4143  if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
4144    return;
4145
4146  const Use &U = I.getOperandUse(i);
4147  Assert(DT.dominates(Op, U),
4148         "Instruction does not dominate all uses!", Op, &I);
4149}
4150
4151void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
4152  Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
4153         "apply only to pointer types", &I);
4154  Assert((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
4155         "dereferenceable, dereferenceable_or_null apply only to load"
4156         " and inttoptr instructions, use attributes for calls or invokes", &I);
4157  Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
4158         "take one operand!", &I);
4159  ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
4160  Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
4161         "dereferenceable_or_null metadata value must be an i64!", &I);
4162}
4163
4164void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4165  Assert(MD->getNumOperands() >= 2,
4166         "!prof annotations should have no less than 2 operands", MD);
4167
4168  // Check first operand.
4169  Assert(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4170  Assert(isa<MDString>(MD->getOperand(0)),
4171         "expected string with name of the !prof annotation", MD);
4172  MDString *MDS = cast<MDString>(MD->getOperand(0));
4173  StringRef ProfName = MDS->getString();
4174
4175  // Check consistency of !prof branch_weights metadata.
4176  if (ProfName.equals("branch_weights")) {
4177    if (isa<InvokeInst>(&I)) {
4178      Assert(MD->getNumOperands() == 2 || MD->getNumOperands() == 3,
4179             "Wrong number of InvokeInst branch_weights operands", MD);
4180    } else {
4181      unsigned ExpectedNumOperands = 0;
4182      if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4183        ExpectedNumOperands = BI->getNumSuccessors();
4184      else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4185        ExpectedNumOperands = SI->getNumSuccessors();
4186      else if (isa<CallInst>(&I))
4187        ExpectedNumOperands = 1;
4188      else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4189        ExpectedNumOperands = IBI->getNumDestinations();
4190      else if (isa<SelectInst>(&I))
4191        ExpectedNumOperands = 2;
4192      else
4193        CheckFailed("!prof branch_weights are not allowed for this instruction",
4194                    MD);
4195
4196      Assert(MD->getNumOperands() == 1 + ExpectedNumOperands,
4197             "Wrong number of operands", MD);
4198    }
4199    for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4200      auto &MDO = MD->getOperand(i);
4201      Assert(MDO, "second operand should not be null", MD);
4202      Assert(mdconst::dyn_extract<ConstantInt>(MDO),
4203             "!prof brunch_weights operand is not a const int");
4204    }
4205  }
4206}
4207
4208/// verifyInstruction - Verify that an instruction is well formed.
4209///
4210void Verifier::visitInstruction(Instruction &I) {
4211  BasicBlock *BB = I.getParent();
4212  Assert(BB, "Instruction not embedded in basic block!", &I);
4213
4214  if (!isa<PHINode>(I)) {   // Check that non-phi nodes are not self referential
4215    for (User *U : I.users()) {
4216      Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
4217             "Only PHI nodes may reference their own value!", &I);
4218    }
4219  }
4220
4221  // Check that void typed values don't have names
4222  Assert(!I.getType()->isVoidTy() || !I.hasName(),
4223         "Instruction has a name, but provides a void value!", &I);
4224
4225  // Check that the return value of the instruction is either void or a legal
4226  // value type.
4227  Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4228         "Instruction returns a non-scalar type!", &I);
4229
4230  // Check that the instruction doesn't produce metadata. Calls are already
4231  // checked against the callee type.
4232  Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4233         "Invalid use of metadata!", &I);
4234
4235  // Check that all uses of the instruction, if they are instructions
4236  // themselves, actually have parent basic blocks.  If the use is not an
4237  // instruction, it is an error!
4238  for (Use &U : I.uses()) {
4239    if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4240      Assert(Used->getParent() != nullptr,
4241             "Instruction referencing"
4242             " instruction not embedded in a basic block!",
4243             &I, Used);
4244    else {
4245      CheckFailed("Use of instruction is not an instruction!", U);
4246      return;
4247    }
4248  }
4249
4250  // Get a pointer to the call base of the instruction if it is some form of
4251  // call.
4252  const CallBase *CBI = dyn_cast<CallBase>(&I);
4253
4254  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4255    Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4256
4257    // Check to make sure that only first-class-values are operands to
4258    // instructions.
4259    if (!I.getOperand(i)->getType()->isFirstClassType()) {
4260      Assert(false, "Instruction operands must be first-class values!", &I);
4261    }
4262
4263    if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4264      // Check to make sure that the "address of" an intrinsic function is never
4265      // taken.
4266      Assert(!F->isIntrinsic() ||
4267                 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
4268             "Cannot take the address of an intrinsic!", &I);
4269      Assert(
4270          !F->isIntrinsic() || isa<CallInst>(I) ||
4271              F->getIntrinsicID() == Intrinsic::donothing ||
4272              F->getIntrinsicID() == Intrinsic::coro_resume ||
4273              F->getIntrinsicID() == Intrinsic::coro_destroy ||
4274              F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
4275              F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4276              F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4277              F->getIntrinsicID() == Intrinsic::wasm_rethrow_in_catch,
4278          "Cannot invoke an intrinsic other than donothing, patchpoint, "
4279          "statepoint, coro_resume or coro_destroy",
4280          &I);
4281      Assert(F->getParent() == &M, "Referencing function in another module!",
4282             &I, &M, F, F->getParent());
4283    } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4284      Assert(OpBB->getParent() == BB->getParent(),
4285             "Referring to a basic block in another function!", &I);
4286    } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4287      Assert(OpArg->getParent() == BB->getParent(),
4288             "Referring to an argument in another function!", &I);
4289    } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4290      Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
4291             &M, GV, GV->getParent());
4292    } else if (isa<Instruction>(I.getOperand(i))) {
4293      verifyDominatesUse(I, i);
4294    } else if (isa<InlineAsm>(I.getOperand(i))) {
4295      Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4296             "Cannot take the address of an inline asm!", &I);
4297    } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4298      if (CE->getType()->isPtrOrPtrVectorTy() ||
4299          !DL.getNonIntegralAddressSpaces().empty()) {
4300        // If we have a ConstantExpr pointer, we need to see if it came from an
4301        // illegal bitcast.  If the datalayout string specifies non-integral
4302        // address spaces then we also need to check for illegal ptrtoint and
4303        // inttoptr expressions.
4304        visitConstantExprsRecursively(CE);
4305      }
4306    }
4307  }
4308
4309  if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4310    Assert(I.getType()->isFPOrFPVectorTy(),
4311           "fpmath requires a floating point result!", &I);
4312    Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4313    if (ConstantFP *CFP0 =
4314            mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4315      const APFloat &Accuracy = CFP0->getValueAPF();
4316      Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4317             "fpmath accuracy must have float type", &I);
4318      Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4319             "fpmath accuracy not a positive number!", &I);
4320    } else {
4321      Assert(false, "invalid fpmath accuracy!", &I);
4322    }
4323  }
4324
4325  if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4326    Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4327           "Ranges are only for loads, calls and invokes!", &I);
4328    visitRangeMetadata(I, Range, I.getType());
4329  }
4330
4331  if (I.getMetadata(LLVMContext::MD_nonnull)) {
4332    Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4333           &I);
4334    Assert(isa<LoadInst>(I),
4335           "nonnull applies only to load instructions, use attributes"
4336           " for calls or invokes",
4337           &I);
4338  }
4339
4340  if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
4341    visitDereferenceableMetadata(I, MD);
4342
4343  if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
4344    visitDereferenceableMetadata(I, MD);
4345
4346  if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4347    TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4348
4349  if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4350    Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4351           &I);
4352    Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4353           "use attributes for calls or invokes", &I);
4354    Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4355    ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4356    Assert(CI && CI->getType()->isIntegerTy(64),
4357           "align metadata value must be an i64!", &I);
4358    uint64_t Align = CI->getZExtValue();
4359    Assert(isPowerOf2_64(Align),
4360           "align metadata value must be a power of 2!", &I);
4361    Assert(Align <= Value::MaximumAlignment,
4362           "alignment is larger that implementation defined limit", &I);
4363  }
4364
4365  if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
4366    visitProfMetadata(I, MD);
4367
4368  if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4369    AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4370    visitMDNode(*N, AreDebugLocsAllowed::Yes);
4371  }
4372
4373  if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
4374    verifyFragmentExpression(*DII);
4375    verifyNotEntryValue(*DII);
4376  }
4377
4378  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
4379  I.getAllMetadata(MDs);
4380  for (auto Attachment : MDs) {
4381    unsigned Kind = Attachment.first;
4382    auto AllowLocs =
4383        (Kind == LLVMContext::MD_dbg || Kind == LLVMContext::MD_loop)
4384            ? AreDebugLocsAllowed::Yes
4385            : AreDebugLocsAllowed::No;
4386    visitMDNode(*Attachment.second, AllowLocs);
4387  }
4388
4389  InstsInThisBlock.insert(&I);
4390}
4391
4392/// Allow intrinsics to be verified in different ways.
4393void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4394  Function *IF = Call.getCalledFunction();
4395  Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4396         IF);
4397
4398  // Verify that the intrinsic prototype lines up with what the .td files
4399  // describe.
4400  FunctionType *IFTy = IF->getFunctionType();
4401  bool IsVarArg = IFTy->isVarArg();
4402
4403  SmallVector<Intrinsic::IITDescriptor, 8> Table;
4404  getIntrinsicInfoTableEntries(ID, Table);
4405  ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
4406
4407  // Walk the descriptors to extract overloaded types.
4408  SmallVector<Type *, 4> ArgTys;
4409  Intrinsic::MatchIntrinsicTypesResult Res =
4410      Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
4411  Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
4412         "Intrinsic has incorrect return type!", IF);
4413  Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
4414         "Intrinsic has incorrect argument type!", IF);
4415
4416  // Verify if the intrinsic call matches the vararg property.
4417  if (IsVarArg)
4418    Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4419           "Intrinsic was not defined with variable arguments!", IF);
4420  else
4421    Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4422           "Callsite was not defined with variable arguments!", IF);
4423
4424  // All descriptors should be absorbed by now.
4425  Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4426
4427  // Now that we have the intrinsic ID and the actual argument types (and we
4428  // know they are legal for the intrinsic!) get the intrinsic name through the
4429  // usual means.  This allows us to verify the mangling of argument types into
4430  // the name.
4431  const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4432  Assert(ExpectedName == IF->getName(),
4433         "Intrinsic name not mangled correctly for type arguments! "
4434         "Should be: " +
4435             ExpectedName,
4436         IF);
4437
4438  // If the intrinsic takes MDNode arguments, verify that they are either global
4439  // or are local to *this* function.
4440  for (Value *V : Call.args())
4441    if (auto *MD = dyn_cast<MetadataAsValue>(V))
4442      visitMetadataAsValue(*MD, Call.getCaller());
4443
4444  switch (ID) {
4445  default:
4446    break;
4447  case Intrinsic::assume: {
4448    for (auto &Elem : Call.bundle_op_infos()) {
4449      Assert(Elem.Tag->getKey() == "ignore" ||
4450                 Attribute::isExistingAttribute(Elem.Tag->getKey()),
4451             "tags must be valid attribute names");
4452      Assert(Elem.End - Elem.Begin <= 2, "to many arguments");
4453      Attribute::AttrKind Kind =
4454          Attribute::getAttrKindFromName(Elem.Tag->getKey());
4455      if (Kind == Attribute::None)
4456        break;
4457      if (Attribute::doesAttrKindHaveArgument(Kind)) {
4458        Assert(Elem.End - Elem.Begin == 2,
4459               "this attribute should have 2 arguments");
4460        Assert(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),
4461               "the second argument should be a constant integral value");
4462      } else if (isFuncOnlyAttr(Kind)) {
4463        Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument");
4464      } else if (!isFuncOrArgAttr(Kind)) {
4465        Assert((Elem.End - Elem.Begin) == 1,
4466               "this attribute should have one argument");
4467      }
4468    }
4469    break;
4470  }
4471  case Intrinsic::coro_id: {
4472    auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4473    if (isa<ConstantPointerNull>(InfoArg))
4474      break;
4475    auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4476    Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4477      "info argument of llvm.coro.begin must refer to an initialized "
4478      "constant");
4479    Constant *Init = GV->getInitializer();
4480    Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4481      "info argument of llvm.coro.begin must refer to either a struct or "
4482      "an array");
4483    break;
4484  }
4485#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC)                        \
4486  case Intrinsic::INTRINSIC:
4487#include "llvm/IR/ConstrainedOps.def"
4488    visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4489    break;
4490  case Intrinsic::dbg_declare: // llvm.dbg.declare
4491    Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
4492           "invalid llvm.dbg.declare intrinsic call 1", Call);
4493    visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4494    break;
4495  case Intrinsic::dbg_addr: // llvm.dbg.addr
4496    visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4497    break;
4498  case Intrinsic::dbg_value: // llvm.dbg.value
4499    visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4500    break;
4501  case Intrinsic::dbg_label: // llvm.dbg.label
4502    visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4503    break;
4504  case Intrinsic::memcpy:
4505  case Intrinsic::memcpy_inline:
4506  case Intrinsic::memmove:
4507  case Intrinsic::memset: {
4508    const auto *MI = cast<MemIntrinsic>(&Call);
4509    auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4510      return Alignment == 0 || isPowerOf2_32(Alignment);
4511    };
4512    Assert(IsValidAlignment(MI->getDestAlignment()),
4513           "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4514           Call);
4515    if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4516      Assert(IsValidAlignment(MTI->getSourceAlignment()),
4517             "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4518             Call);
4519    }
4520
4521    break;
4522  }
4523  case Intrinsic::memcpy_element_unordered_atomic:
4524  case Intrinsic::memmove_element_unordered_atomic:
4525  case Intrinsic::memset_element_unordered_atomic: {
4526    const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4527
4528    ConstantInt *ElementSizeCI =
4529        cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4530    const APInt &ElementSizeVal = ElementSizeCI->getValue();
4531    Assert(ElementSizeVal.isPowerOf2(),
4532           "element size of the element-wise atomic memory intrinsic "
4533           "must be a power of 2",
4534           Call);
4535
4536    auto IsValidAlignment = [&](uint64_t Alignment) {
4537      return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4538    };
4539    uint64_t DstAlignment = AMI->getDestAlignment();
4540    Assert(IsValidAlignment(DstAlignment),
4541           "incorrect alignment of the destination argument", Call);
4542    if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4543      uint64_t SrcAlignment = AMT->getSourceAlignment();
4544      Assert(IsValidAlignment(SrcAlignment),
4545             "incorrect alignment of the source argument", Call);
4546    }
4547    break;
4548  }
4549  case Intrinsic::call_preallocated_setup: {
4550    auto *NumArgs = dyn_cast<ConstantInt>(Call.getArgOperand(0));
4551    Assert(NumArgs != nullptr,
4552           "llvm.call.preallocated.setup argument must be a constant");
4553    bool FoundCall = false;
4554    for (User *U : Call.users()) {
4555      auto *UseCall = dyn_cast<CallBase>(U);
4556      Assert(UseCall != nullptr,
4557             "Uses of llvm.call.preallocated.setup must be calls");
4558      const Function *Fn = UseCall->getCalledFunction();
4559      if (Fn && Fn->getIntrinsicID() == Intrinsic::call_preallocated_arg) {
4560        auto *AllocArgIndex = dyn_cast<ConstantInt>(UseCall->getArgOperand(1));
4561        Assert(AllocArgIndex != nullptr,
4562               "llvm.call.preallocated.alloc arg index must be a constant");
4563        auto AllocArgIndexInt = AllocArgIndex->getValue();
4564        Assert(AllocArgIndexInt.sge(0) &&
4565                   AllocArgIndexInt.slt(NumArgs->getValue()),
4566               "llvm.call.preallocated.alloc arg index must be between 0 and "
4567               "corresponding "
4568               "llvm.call.preallocated.setup's argument count");
4569      } else if (Fn && Fn->getIntrinsicID() ==
4570                           Intrinsic::call_preallocated_teardown) {
4571        // nothing to do
4572      } else {
4573        Assert(!FoundCall, "Can have at most one call corresponding to a "
4574                           "llvm.call.preallocated.setup");
4575        FoundCall = true;
4576        size_t NumPreallocatedArgs = 0;
4577        for (unsigned i = 0; i < UseCall->getNumArgOperands(); i++) {
4578          if (UseCall->paramHasAttr(i, Attribute::Preallocated)) {
4579            ++NumPreallocatedArgs;
4580          }
4581        }
4582        Assert(NumPreallocatedArgs != 0,
4583               "cannot use preallocated intrinsics on a call without "
4584               "preallocated arguments");
4585        Assert(NumArgs->equalsInt(NumPreallocatedArgs),
4586               "llvm.call.preallocated.setup arg size must be equal to number "
4587               "of preallocated arguments "
4588               "at call site",
4589               Call, *UseCall);
4590        // getOperandBundle() cannot be called if more than one of the operand
4591        // bundle exists. There is already a check elsewhere for this, so skip
4592        // here if we see more than one.
4593        if (UseCall->countOperandBundlesOfType(LLVMContext::OB_preallocated) >
4594            1) {
4595          return;
4596        }
4597        auto PreallocatedBundle =
4598            UseCall->getOperandBundle(LLVMContext::OB_preallocated);
4599        Assert(PreallocatedBundle,
4600               "Use of llvm.call.preallocated.setup outside intrinsics "
4601               "must be in \"preallocated\" operand bundle");
4602        Assert(PreallocatedBundle->Inputs.front().get() == &Call,
4603               "preallocated bundle must have token from corresponding "
4604               "llvm.call.preallocated.setup");
4605      }
4606    }
4607    break;
4608  }
4609  case Intrinsic::call_preallocated_arg: {
4610    auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
4611    Assert(Token && Token->getCalledFunction()->getIntrinsicID() ==
4612                        Intrinsic::call_preallocated_setup,
4613           "llvm.call.preallocated.arg token argument must be a "
4614           "llvm.call.preallocated.setup");
4615    Assert(Call.hasFnAttr(Attribute::Preallocated),
4616           "llvm.call.preallocated.arg must be called with a \"preallocated\" "
4617           "call site attribute");
4618    break;
4619  }
4620  case Intrinsic::call_preallocated_teardown: {
4621    auto *Token = dyn_cast<CallBase>(Call.getArgOperand(0));
4622    Assert(Token && Token->getCalledFunction()->getIntrinsicID() ==
4623                        Intrinsic::call_preallocated_setup,
4624           "llvm.call.preallocated.teardown token argument must be a "
4625           "llvm.call.preallocated.setup");
4626    break;
4627  }
4628  case Intrinsic::gcroot:
4629  case Intrinsic::gcwrite:
4630  case Intrinsic::gcread:
4631    if (ID == Intrinsic::gcroot) {
4632      AllocaInst *AI =
4633          dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
4634      Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
4635      Assert(isa<Constant>(Call.getArgOperand(1)),
4636             "llvm.gcroot parameter #2 must be a constant.", Call);
4637      if (!AI->getAllocatedType()->isPointerTy()) {
4638        Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
4639               "llvm.gcroot parameter #1 must either be a pointer alloca, "
4640               "or argument #2 must be a non-null constant.",
4641               Call);
4642      }
4643    }
4644
4645    Assert(Call.getParent()->getParent()->hasGC(),
4646           "Enclosing function does not use GC.", Call);
4647    break;
4648  case Intrinsic::init_trampoline:
4649    Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
4650           "llvm.init_trampoline parameter #2 must resolve to a function.",
4651           Call);
4652    break;
4653  case Intrinsic::prefetch:
4654    Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
4655           cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
4656           "invalid arguments to llvm.prefetch", Call);
4657    break;
4658  case Intrinsic::stackprotector:
4659    Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
4660           "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
4661    break;
4662  case Intrinsic::localescape: {
4663    BasicBlock *BB = Call.getParent();
4664    Assert(BB == &BB->getParent()->front(),
4665           "llvm.localescape used outside of entry block", Call);
4666    Assert(!SawFrameEscape,
4667           "multiple calls to llvm.localescape in one function", Call);
4668    for (Value *Arg : Call.args()) {
4669      if (isa<ConstantPointerNull>(Arg))
4670        continue; // Null values are allowed as placeholders.
4671      auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4672      Assert(AI && AI->isStaticAlloca(),
4673             "llvm.localescape only accepts static allocas", Call);
4674    }
4675    FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
4676    SawFrameEscape = true;
4677    break;
4678  }
4679  case Intrinsic::localrecover: {
4680    Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
4681    Function *Fn = dyn_cast<Function>(FnArg);
4682    Assert(Fn && !Fn->isDeclaration(),
4683           "llvm.localrecover first "
4684           "argument must be function defined in this module",
4685           Call);
4686    auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
4687    auto &Entry = FrameEscapeInfo[Fn];
4688    Entry.second = unsigned(
4689        std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4690    break;
4691  }
4692
4693  case Intrinsic::experimental_gc_statepoint:
4694    if (auto *CI = dyn_cast<CallInst>(&Call))
4695      Assert(!CI->isInlineAsm(),
4696             "gc.statepoint support for inline assembly unimplemented", CI);
4697    Assert(Call.getParent()->getParent()->hasGC(),
4698           "Enclosing function does not use GC.", Call);
4699
4700    verifyStatepoint(Call);
4701    break;
4702  case Intrinsic::experimental_gc_result: {
4703    Assert(Call.getParent()->getParent()->hasGC(),
4704           "Enclosing function does not use GC.", Call);
4705    // Are we tied to a statepoint properly?
4706    const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
4707    const Function *StatepointFn =
4708        StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
4709    Assert(StatepointFn && StatepointFn->isDeclaration() &&
4710               StatepointFn->getIntrinsicID() ==
4711                   Intrinsic::experimental_gc_statepoint,
4712           "gc.result operand #1 must be from a statepoint", Call,
4713           Call.getArgOperand(0));
4714
4715    // Assert that result type matches wrapped callee.
4716    const Value *Target = StatepointCall->getArgOperand(2);
4717    auto *PT = cast<PointerType>(Target->getType());
4718    auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4719    Assert(Call.getType() == TargetFuncType->getReturnType(),
4720           "gc.result result type does not match wrapped callee", Call);
4721    break;
4722  }
4723  case Intrinsic::experimental_gc_relocate: {
4724    Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
4725
4726    Assert(isa<PointerType>(Call.getType()->getScalarType()),
4727           "gc.relocate must return a pointer or a vector of pointers", Call);
4728
4729    // Check that this relocate is correctly tied to the statepoint
4730
4731    // This is case for relocate on the unwinding path of an invoke statepoint
4732    if (LandingPadInst *LandingPad =
4733            dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
4734
4735      const BasicBlock *InvokeBB =
4736          LandingPad->getParent()->getUniquePredecessor();
4737
4738      // Landingpad relocates should have only one predecessor with invoke
4739      // statepoint terminator
4740      Assert(InvokeBB, "safepoints should have unique landingpads",
4741             LandingPad->getParent());
4742      Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4743             InvokeBB);
4744      Assert(isa<GCStatepointInst>(InvokeBB->getTerminator()),
4745             "gc relocate should be linked to a statepoint", InvokeBB);
4746    } else {
4747      // In all other cases relocate should be tied to the statepoint directly.
4748      // This covers relocates on a normal return path of invoke statepoint and
4749      // relocates of a call statepoint.
4750      auto Token = Call.getArgOperand(0);
4751      Assert(isa<GCStatepointInst>(Token),
4752             "gc relocate is incorrectly tied to the statepoint", Call, Token);
4753    }
4754
4755    // Verify rest of the relocate arguments.
4756    const CallBase &StatepointCall =
4757      *cast<GCRelocateInst>(Call).getStatepoint();
4758
4759    // Both the base and derived must be piped through the safepoint.
4760    Value *Base = Call.getArgOperand(1);
4761    Assert(isa<ConstantInt>(Base),
4762           "gc.relocate operand #2 must be integer offset", Call);
4763
4764    Value *Derived = Call.getArgOperand(2);
4765    Assert(isa<ConstantInt>(Derived),
4766           "gc.relocate operand #3 must be integer offset", Call);
4767
4768    const uint64_t BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4769    const uint64_t DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4770
4771    // Check the bounds
4772    if (auto Opt = StatepointCall.getOperandBundle(LLVMContext::OB_gc_live)) {
4773      Assert(BaseIndex < Opt->Inputs.size(),
4774             "gc.relocate: statepoint base index out of bounds", Call);
4775      Assert(DerivedIndex < Opt->Inputs.size(),
4776             "gc.relocate: statepoint derived index out of bounds", Call);
4777    } else {
4778      Assert(BaseIndex < StatepointCall.arg_size(),
4779             "gc.relocate: statepoint base index out of bounds", Call);
4780      Assert(DerivedIndex < StatepointCall.arg_size(),
4781             "gc.relocate: statepoint derived index out of bounds", Call);
4782
4783      // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4784      // section of the statepoint's argument.
4785      Assert(StatepointCall.arg_size() > 0,
4786             "gc.statepoint: insufficient arguments");
4787      Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
4788             "gc.statement: number of call arguments must be constant integer");
4789      const uint64_t NumCallArgs =
4790        cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
4791      Assert(StatepointCall.arg_size() > NumCallArgs + 5,
4792             "gc.statepoint: mismatch in number of call arguments");
4793      Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
4794             "gc.statepoint: number of transition arguments must be "
4795             "a constant integer");
4796      const uint64_t NumTransitionArgs =
4797          cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
4798              ->getZExtValue();
4799      const uint64_t DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4800      Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
4801             "gc.statepoint: number of deoptimization arguments must be "
4802             "a constant integer");
4803      const uint64_t NumDeoptArgs =
4804          cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
4805              ->getZExtValue();
4806      const uint64_t GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4807      const uint64_t GCParamArgsEnd = StatepointCall.arg_size();
4808      Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4809             "gc.relocate: statepoint base index doesn't fall within the "
4810             "'gc parameters' section of the statepoint call",
4811             Call);
4812      Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4813             "gc.relocate: statepoint derived index doesn't fall within the "
4814             "'gc parameters' section of the statepoint call",
4815             Call);
4816    }
4817
4818    // Relocated value must be either a pointer type or vector-of-pointer type,
4819    // but gc_relocate does not need to return the same pointer type as the
4820    // relocated pointer. It can be casted to the correct type later if it's
4821    // desired. However, they must have the same address space and 'vectorness'
4822    GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
4823    Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
4824           "gc.relocate: relocated value must be a gc pointer", Call);
4825
4826    auto ResultType = Call.getType();
4827    auto DerivedType = Relocate.getDerivedPtr()->getType();
4828    Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4829           "gc.relocate: vector relocates to vector and pointer to pointer",
4830           Call);
4831    Assert(
4832        ResultType->getPointerAddressSpace() ==
4833            DerivedType->getPointerAddressSpace(),
4834        "gc.relocate: relocating a pointer shouldn't change its address space",
4835        Call);
4836    break;
4837  }
4838  case Intrinsic::eh_exceptioncode:
4839  case Intrinsic::eh_exceptionpointer: {
4840    Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
4841           "eh.exceptionpointer argument must be a catchpad", Call);
4842    break;
4843  }
4844  case Intrinsic::get_active_lane_mask: {
4845    Assert(Call.getType()->isVectorTy(), "get_active_lane_mask: must return a "
4846           "vector", Call);
4847    auto *ElemTy = Call.getType()->getScalarType();
4848    Assert(ElemTy->isIntegerTy(1), "get_active_lane_mask: element type is not "
4849           "i1", Call);
4850    break;
4851  }
4852  case Intrinsic::masked_load: {
4853    Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
4854           Call);
4855
4856    Value *Ptr = Call.getArgOperand(0);
4857    ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
4858    Value *Mask = Call.getArgOperand(2);
4859    Value *PassThru = Call.getArgOperand(3);
4860    Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
4861           Call);
4862    Assert(Alignment->getValue().isPowerOf2(),
4863           "masked_load: alignment must be a power of 2", Call);
4864
4865    // DataTy is the overloaded type
4866    Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4867    Assert(DataTy == Call.getType(),
4868           "masked_load: return must match pointer type", Call);
4869    Assert(PassThru->getType() == DataTy,
4870           "masked_load: pass through and data type must match", Call);
4871    Assert(cast<VectorType>(Mask->getType())->getElementCount() ==
4872               cast<VectorType>(DataTy)->getElementCount(),
4873           "masked_load: vector mask must be same length as data", Call);
4874    break;
4875  }
4876  case Intrinsic::masked_store: {
4877    Value *Val = Call.getArgOperand(0);
4878    Value *Ptr = Call.getArgOperand(1);
4879    ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
4880    Value *Mask = Call.getArgOperand(3);
4881    Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
4882           Call);
4883    Assert(Alignment->getValue().isPowerOf2(),
4884           "masked_store: alignment must be a power of 2", Call);
4885
4886    // DataTy is the overloaded type
4887    Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4888    Assert(DataTy == Val->getType(),
4889           "masked_store: storee must match pointer type", Call);
4890    Assert(cast<VectorType>(Mask->getType())->getElementCount() ==
4891               cast<VectorType>(DataTy)->getElementCount(),
4892           "masked_store: vector mask must be same length as data", Call);
4893    break;
4894  }
4895
4896  case Intrinsic::masked_gather: {
4897    const APInt &Alignment =
4898        cast<ConstantInt>(Call.getArgOperand(1))->getValue();
4899    Assert(Alignment.isNullValue() || Alignment.isPowerOf2(),
4900           "masked_gather: alignment must be 0 or a power of 2", Call);
4901    break;
4902  }
4903  case Intrinsic::masked_scatter: {
4904    const APInt &Alignment =
4905        cast<ConstantInt>(Call.getArgOperand(2))->getValue();
4906    Assert(Alignment.isNullValue() || Alignment.isPowerOf2(),
4907           "masked_scatter: alignment must be 0 or a power of 2", Call);
4908    break;
4909  }
4910
4911  case Intrinsic::experimental_guard: {
4912    Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
4913    Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4914           "experimental_guard must have exactly one "
4915           "\"deopt\" operand bundle");
4916    break;
4917  }
4918
4919  case Intrinsic::experimental_deoptimize: {
4920    Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
4921           Call);
4922    Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4923           "experimental_deoptimize must have exactly one "
4924           "\"deopt\" operand bundle");
4925    Assert(Call.getType() == Call.getFunction()->getReturnType(),
4926           "experimental_deoptimize return type must match caller return type");
4927
4928    if (isa<CallInst>(Call)) {
4929      auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
4930      Assert(RI,
4931             "calls to experimental_deoptimize must be followed by a return");
4932
4933      if (!Call.getType()->isVoidTy() && RI)
4934        Assert(RI->getReturnValue() == &Call,
4935               "calls to experimental_deoptimize must be followed by a return "
4936               "of the value computed by experimental_deoptimize");
4937    }
4938
4939    break;
4940  }
4941  case Intrinsic::sadd_sat:
4942  case Intrinsic::uadd_sat:
4943  case Intrinsic::ssub_sat:
4944  case Intrinsic::usub_sat: {
4945    Value *Op1 = Call.getArgOperand(0);
4946    Value *Op2 = Call.getArgOperand(1);
4947    Assert(Op1->getType()->isIntOrIntVectorTy(),
4948           "first operand of [us][add|sub]_sat must be an int type or vector "
4949           "of ints");
4950    Assert(Op2->getType()->isIntOrIntVectorTy(),
4951           "second operand of [us][add|sub]_sat must be an int type or vector "
4952           "of ints");
4953    break;
4954  }
4955  case Intrinsic::smul_fix:
4956  case Intrinsic::smul_fix_sat:
4957  case Intrinsic::umul_fix:
4958  case Intrinsic::umul_fix_sat:
4959  case Intrinsic::sdiv_fix:
4960  case Intrinsic::sdiv_fix_sat:
4961  case Intrinsic::udiv_fix:
4962  case Intrinsic::udiv_fix_sat: {
4963    Value *Op1 = Call.getArgOperand(0);
4964    Value *Op2 = Call.getArgOperand(1);
4965    Assert(Op1->getType()->isIntOrIntVectorTy(),
4966           "first operand of [us][mul|div]_fix[_sat] must be an int type or "
4967           "vector of ints");
4968    Assert(Op2->getType()->isIntOrIntVectorTy(),
4969           "second operand of [us][mul|div]_fix[_sat] must be an int type or "
4970           "vector of ints");
4971
4972    auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
4973    Assert(Op3->getType()->getBitWidth() <= 32,
4974           "third argument of [us][mul|div]_fix[_sat] must fit within 32 bits");
4975
4976    if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat ||
4977        ID == Intrinsic::sdiv_fix || ID == Intrinsic::sdiv_fix_sat) {
4978      Assert(
4979          Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4980          "the scale of s[mul|div]_fix[_sat] must be less than the width of "
4981          "the operands");
4982    } else {
4983      Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
4984             "the scale of u[mul|div]_fix[_sat] must be less than or equal "
4985             "to the width of the operands");
4986    }
4987    break;
4988  }
4989  case Intrinsic::lround:
4990  case Intrinsic::llround:
4991  case Intrinsic::lrint:
4992  case Intrinsic::llrint: {
4993    Type *ValTy = Call.getArgOperand(0)->getType();
4994    Type *ResultTy = Call.getType();
4995    Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
4996           "Intrinsic does not support vectors", &Call);
4997    break;
4998  }
4999  case Intrinsic::bswap: {
5000    Type *Ty = Call.getType();
5001    unsigned Size = Ty->getScalarSizeInBits();
5002    Assert(Size % 16 == 0, "bswap must be an even number of bytes", &Call);
5003    break;
5004  }
5005  case Intrinsic::matrix_multiply:
5006  case Intrinsic::matrix_transpose:
5007  case Intrinsic::matrix_column_major_load:
5008  case Intrinsic::matrix_column_major_store: {
5009    Function *IF = Call.getCalledFunction();
5010    ConstantInt *Stride = nullptr;
5011    ConstantInt *NumRows;
5012    ConstantInt *NumColumns;
5013    VectorType *ResultTy;
5014    Type *Op0ElemTy = nullptr;
5015    Type *Op1ElemTy = nullptr;
5016    switch (ID) {
5017    case Intrinsic::matrix_multiply:
5018      NumRows = cast<ConstantInt>(Call.getArgOperand(2));
5019      NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5020      ResultTy = cast<VectorType>(Call.getType());
5021      Op0ElemTy =
5022          cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5023      Op1ElemTy =
5024          cast<VectorType>(Call.getArgOperand(1)->getType())->getElementType();
5025      break;
5026    case Intrinsic::matrix_transpose:
5027      NumRows = cast<ConstantInt>(Call.getArgOperand(1));
5028      NumColumns = cast<ConstantInt>(Call.getArgOperand(2));
5029      ResultTy = cast<VectorType>(Call.getType());
5030      Op0ElemTy =
5031          cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5032      break;
5033    case Intrinsic::matrix_column_major_load:
5034      Stride = dyn_cast<ConstantInt>(Call.getArgOperand(1));
5035      NumRows = cast<ConstantInt>(Call.getArgOperand(3));
5036      NumColumns = cast<ConstantInt>(Call.getArgOperand(4));
5037      ResultTy = cast<VectorType>(Call.getType());
5038      Op0ElemTy =
5039          cast<PointerType>(Call.getArgOperand(0)->getType())->getElementType();
5040      break;
5041    case Intrinsic::matrix_column_major_store:
5042      Stride = dyn_cast<ConstantInt>(Call.getArgOperand(2));
5043      NumRows = cast<ConstantInt>(Call.getArgOperand(4));
5044      NumColumns = cast<ConstantInt>(Call.getArgOperand(5));
5045      ResultTy = cast<VectorType>(Call.getArgOperand(0)->getType());
5046      Op0ElemTy =
5047          cast<VectorType>(Call.getArgOperand(0)->getType())->getElementType();
5048      Op1ElemTy =
5049          cast<PointerType>(Call.getArgOperand(1)->getType())->getElementType();
5050      break;
5051    default:
5052      llvm_unreachable("unexpected intrinsic");
5053    }
5054
5055    Assert(ResultTy->getElementType()->isIntegerTy() ||
5056           ResultTy->getElementType()->isFloatingPointTy(),
5057           "Result type must be an integer or floating-point type!", IF);
5058
5059    Assert(ResultTy->getElementType() == Op0ElemTy,
5060           "Vector element type mismatch of the result and first operand "
5061           "vector!", IF);
5062
5063    if (Op1ElemTy)
5064      Assert(ResultTy->getElementType() == Op1ElemTy,
5065             "Vector element type mismatch of the result and second operand "
5066             "vector!", IF);
5067
5068    Assert(ResultTy->getNumElements() ==
5069               NumRows->getZExtValue() * NumColumns->getZExtValue(),
5070           "Result of a matrix operation does not fit in the returned vector!");
5071
5072    if (Stride)
5073      Assert(Stride->getZExtValue() >= NumRows->getZExtValue(),
5074             "Stride must be greater or equal than the number of rows!", IF);
5075
5076    break;
5077  }
5078  };
5079}
5080
5081/// Carefully grab the subprogram from a local scope.
5082///
5083/// This carefully grabs the subprogram from a local scope, avoiding the
5084/// built-in assertions that would typically fire.
5085static DISubprogram *getSubprogram(Metadata *LocalScope) {
5086  if (!LocalScope)
5087    return nullptr;
5088
5089  if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
5090    return SP;
5091
5092  if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
5093    return getSubprogram(LB->getRawScope());
5094
5095  // Just return null; broken scope chains are checked elsewhere.
5096  assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
5097  return nullptr;
5098}
5099
5100void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
5101  unsigned NumOperands;
5102  bool HasRoundingMD;
5103  switch (FPI.getIntrinsicID()) {
5104#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
5105  case Intrinsic::INTRINSIC:                                                   \
5106    NumOperands = NARG;                                                        \
5107    HasRoundingMD = ROUND_MODE;                                                \
5108    break;
5109#include "llvm/IR/ConstrainedOps.def"
5110  default:
5111    llvm_unreachable("Invalid constrained FP intrinsic!");
5112  }
5113  NumOperands += (1 + HasRoundingMD);
5114  // Compare intrinsics carry an extra predicate metadata operand.
5115  if (isa<ConstrainedFPCmpIntrinsic>(FPI))
5116    NumOperands += 1;
5117  Assert((FPI.getNumArgOperands() == NumOperands),
5118         "invalid arguments for constrained FP intrinsic", &FPI);
5119
5120  switch (FPI.getIntrinsicID()) {
5121  case Intrinsic::experimental_constrained_lrint:
5122  case Intrinsic::experimental_constrained_llrint: {
5123    Type *ValTy = FPI.getArgOperand(0)->getType();
5124    Type *ResultTy = FPI.getType();
5125    Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5126           "Intrinsic does not support vectors", &FPI);
5127  }
5128    break;
5129
5130  case Intrinsic::experimental_constrained_lround:
5131  case Intrinsic::experimental_constrained_llround: {
5132    Type *ValTy = FPI.getArgOperand(0)->getType();
5133    Type *ResultTy = FPI.getType();
5134    Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
5135           "Intrinsic does not support vectors", &FPI);
5136    break;
5137  }
5138
5139  case Intrinsic::experimental_constrained_fcmp:
5140  case Intrinsic::experimental_constrained_fcmps: {
5141    auto Pred = cast<ConstrainedFPCmpIntrinsic>(&FPI)->getPredicate();
5142    Assert(CmpInst::isFPPredicate(Pred),
5143           "invalid predicate for constrained FP comparison intrinsic", &FPI);
5144    break;
5145  }
5146
5147  case Intrinsic::experimental_constrained_fptosi:
5148  case Intrinsic::experimental_constrained_fptoui: {
5149    Value *Operand = FPI.getArgOperand(0);
5150    uint64_t NumSrcElem = 0;
5151    Assert(Operand->getType()->isFPOrFPVectorTy(),
5152           "Intrinsic first argument must be floating point", &FPI);
5153    if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5154      NumSrcElem = OperandT->getNumElements();
5155    }
5156
5157    Operand = &FPI;
5158    Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
5159           "Intrinsic first argument and result disagree on vector use", &FPI);
5160    Assert(Operand->getType()->isIntOrIntVectorTy(),
5161           "Intrinsic result must be an integer", &FPI);
5162    if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
5163      Assert(NumSrcElem == OperandT->getNumElements(),
5164             "Intrinsic first argument and result vector lengths must be equal",
5165             &FPI);
5166    }
5167  }
5168    break;
5169
5170  case Intrinsic::experimental_constrained_sitofp:
5171  case Intrinsic::experimental_constrained_uitofp: {
5172    Value *Operand = FPI.getArgOperand(0);
5173    uint64_t NumSrcElem = 0;
5174    Assert(Operand->getType()->isIntOrIntVectorTy(),
5175           "Intrinsic first argument must be integer", &FPI);
5176    if (auto *OperandT = dyn_cast<VectorType>(