aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2011-07-17 15:40:56 +0000
committerDimitry Andric <dim@FreeBSD.org>2011-07-17 15:40:56 +0000
commit180abc3db9ae3b4fc63cd65b15697e6ffcc8a657 (patch)
tree2097d084eb235c0b12c0bff3445f4ec7bbaa8a12 /lib
parent29cafa66ad3878dbb9f82615f19fa0bded2e443c (diff)
downloadsrc-180abc3db9ae3b4fc63cd65b15697e6ffcc8a657.tar.gz
src-180abc3db9ae3b4fc63cd65b15697e6ffcc8a657.zip
Vendor import of clang trunk r135360:vendor/clang/clang-r135360
Notes
Notes: svn path=/vendor/clang/dist/; revision=224135 svn path=/vendor/clang/clang-r135360/; revision=224136; tag=vendor/clang/clang-r135360
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp541
-rw-r--r--lib/ARCMigrate/ARCMTActions.cpp53
-rw-r--r--lib/ARCMigrate/CMakeLists.txt24
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp291
-rw-r--r--lib/ARCMigrate/Internals.h148
-rw-r--r--lib/ARCMigrate/Makefile18
-rw-r--r--lib/ARCMigrate/TransARCAssign.cpp75
-rw-r--r--lib/ARCMigrate/TransAutoreleasePool.cpp436
-rw-r--r--lib/ARCMigrate/TransBlockObjCVariable.cpp143
-rw-r--r--lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp211
-rw-r--r--lib/ARCMigrate/TransProperties.cpp364
-rw-r--r--lib/ARCMigrate/TransRetainReleaseDealloc.cpp219
-rw-r--r--lib/ARCMigrate/TransUnbridgedCasts.cpp203
-rw-r--r--lib/ARCMigrate/TransUnusedInitDelegate.cpp74
-rw-r--r--lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp198
-rw-r--r--lib/ARCMigrate/TransformActions.cpp699
-rw-r--r--lib/ARCMigrate/Transforms.cpp296
-rw-r--r--lib/ARCMigrate/Transforms.h92
-rw-r--r--lib/AST/ASTContext.cpp293
-rw-r--r--lib/AST/ASTDiagnostic.cpp74
-rw-r--r--lib/AST/ASTImporter.cpp20
-rw-r--r--lib/AST/Decl.cpp110
-rw-r--r--lib/AST/DeclBase.cpp35
-rw-r--r--lib/AST/DeclCXX.cpp54
-rw-r--r--lib/AST/DeclObjC.cpp58
-rw-r--r--lib/AST/DeclPrinter.cpp5
-rw-r--r--lib/AST/DeclarationName.cpp22
-rw-r--r--lib/AST/Expr.cpp256
-rw-r--r--lib/AST/ExprCXX.cpp60
-rw-r--r--lib/AST/ExprClassification.cpp15
-rw-r--r--lib/AST/ExprConstant.cpp68
-rw-r--r--lib/AST/ExternalASTSource.cpp4
-rw-r--r--lib/AST/ItaniumMangle.cpp447
-rw-r--r--lib/AST/NestedNameSpecifier.cpp22
-rw-r--r--lib/AST/ParentMap.cpp9
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp9
-rw-r--r--lib/AST/Stmt.cpp33
-rw-r--r--lib/AST/StmtPrinter.cpp26
-rw-r--r--lib/AST/StmtProfile.cpp308
-rw-r--r--lib/AST/TemplateBase.cpp43
-rw-r--r--lib/AST/TemplateName.cpp40
-rw-r--r--lib/AST/Type.cpp547
-rw-r--r--lib/AST/TypePrinter.cpp117
-rw-r--r--lib/Analysis/CFG.cpp6
-rw-r--r--lib/Analysis/CocoaConventions.cpp65
-rw-r--r--lib/Analysis/FormatString.cpp3
-rw-r--r--lib/Analysis/ReachableCode.cpp6
-rw-r--r--lib/Analysis/UninitializedValues.cpp46
-rw-r--r--lib/Basic/Builtins.cpp22
-rw-r--r--lib/Basic/Diagnostic.cpp16
-rw-r--r--lib/Basic/DiagnosticIDs.cpp81
-rw-r--r--lib/Basic/IdentifierTable.cpp9
-rw-r--r--lib/Basic/SourceManager.cpp121
-rw-r--r--lib/Basic/TargetInfo.cpp39
-rw-r--r--lib/Basic/Targets.cpp348
-rw-r--r--lib/CMakeLists.txt1
-rw-r--r--lib/CodeGen/ABIInfo.h12
-rw-r--r--lib/CodeGen/BackendUtil.cpp46
-rw-r--r--lib/CodeGen/CGBlocks.cpp316
-rw-r--r--lib/CodeGen/CGBlocks.h3
-rw-r--r--lib/CodeGen/CGBuiltin.cpp348
-rw-r--r--lib/CodeGen/CGCXXABI.cpp2
-rw-r--r--lib/CodeGen/CGCXXABI.h2
-rw-r--r--lib/CodeGen/CGCall.cpp555
-rw-r--r--lib/CodeGen/CGCall.h47
-rw-r--r--lib/CodeGen/CGClass.cpp390
-rw-r--r--lib/CodeGen/CGCleanup.cpp35
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp51
-rw-r--r--lib/CodeGen/CGDebugInfo.h8
-rw-r--r--lib/CodeGen/CGDecl.cpp770
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp162
-rw-r--r--lib/CodeGen/CGException.cpp212
-rw-r--r--lib/CodeGen/CGExpr.cpp450
-rw-r--r--lib/CodeGen/CGExprAgg.cpp319
-rw-r--r--lib/CodeGen/CGExprCXX.cpp147
-rw-r--r--lib/CodeGen/CGExprComplex.cpp93
-rw-r--r--lib/CodeGen/CGExprConstant.cpp42
-rw-r--r--lib/CodeGen/CGExprScalar.cpp494
-rw-r--r--lib/CodeGen/CGObjC.cpp1255
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp224
-rw-r--r--lib/CodeGen/CGObjCMac.cpp791
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp28
-rw-r--r--lib/CodeGen/CGObjCRuntime.h8
-rw-r--r--lib/CodeGen/CGRTTI.cpp4
-rw-r--r--lib/CodeGen/CGRecordLayout.h16
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp69
-rw-r--r--lib/CodeGen/CGStmt.cpp32
-rw-r--r--lib/CodeGen/CGTemporaries.cpp9
-rw-r--r--lib/CodeGen/CGVTT.cpp3
-rw-r--r--lib/CodeGen/CGVTables.cpp5
-rw-r--r--lib/CodeGen/CGVTables.h2
-rw-r--r--lib/CodeGen/CGValue.h81
-rw-r--r--lib/CodeGen/CodeGenAction.cpp12
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp262
-rw-r--r--lib/CodeGen/CodeGenFunction.h414
-rw-r--r--lib/CodeGen/CodeGenModule.cpp58
-rw-r--r--lib/CodeGen/CodeGenModule.h125
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp617
-rw-r--r--lib/CodeGen/CodeGenTypes.h130
-rw-r--r--lib/CodeGen/GlobalDecl.h127
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp123
-rw-r--r--lib/CodeGen/TargetInfo.cpp274
-rw-r--r--lib/CodeGen/TargetInfo.h20
-rw-r--r--lib/Driver/Arg.cpp2
-rw-r--r--lib/Driver/ArgList.cpp4
-rw-r--r--lib/Driver/Driver.cpp41
-rw-r--r--lib/Driver/Option.cpp16
-rw-r--r--lib/Driver/ToolChain.cpp34
-rw-r--r--lib/Driver/ToolChains.cpp161
-rw-r--r--lib/Driver/ToolChains.h26
-rw-r--r--lib/Driver/Tools.cpp267
-rw-r--r--lib/Driver/Tools.h2
-rw-r--r--lib/Frontend/ASTUnit.cpp85
-rw-r--r--lib/Frontend/BoostConAction.cpp39
-rw-r--r--lib/Frontend/CMakeLists.txt1
-rw-r--r--lib/Frontend/CacheTokens.cpp7
-rw-r--r--lib/Frontend/CompilerInstance.cpp3
-rw-r--r--lib/Frontend/CompilerInvocation.cpp123
-rw-r--r--lib/Frontend/DependencyFile.cpp55
-rw-r--r--lib/Frontend/FrontendAction.cpp46
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp217
-rw-r--r--lib/Frontend/InitPreprocessor.cpp170
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp48
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp115
-rw-r--r--lib/FrontendTool/CMakeLists.txt3
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp17
-rw-r--r--lib/Headers/float.h42
-rw-r--r--lib/Headers/stdarg.h2
-rw-r--r--lib/Lex/HeaderSearch.cpp6
-rw-r--r--lib/Lex/Lexer.cpp84
-rw-r--r--lib/Lex/MacroArgs.cpp11
-rw-r--r--lib/Lex/MacroArgs.h7
-rw-r--r--lib/Lex/MacroInfo.cpp32
-rw-r--r--lib/Lex/PPDirectives.cpp22
-rw-r--r--lib/Lex/PPLexerChange.cpp4
-rw-r--r--lib/Lex/PPMacroExpansion.cpp72
-rw-r--r--lib/Lex/Pragma.cpp30
-rw-r--r--lib/Lex/PreprocessingRecord.cpp11
-rw-r--r--lib/Lex/Preprocessor.cpp18
-rw-r--r--lib/Lex/TokenLexer.cpp182
-rwxr-xr-xlib/Makefile4
-rw-r--r--lib/Parse/ParseAST.cpp9
-rw-r--r--lib/Parse/ParseDecl.cpp498
-rw-r--r--lib/Parse/ParseDeclCXX.cpp37
-rw-r--r--lib/Parse/ParseExpr.cpp156
-rw-r--r--lib/Parse/ParseExprCXX.cpp49
-rw-r--r--lib/Parse/ParseObjc.cpp46
-rw-r--r--lib/Parse/ParseStmt.cpp23
-rw-r--r--lib/Parse/ParseTemplate.cpp4
-rw-r--r--lib/Parse/ParseTentative.cpp3
-rw-r--r--lib/Parse/Parser.cpp26
-rw-r--r--lib/Rewrite/FixItRewriter.cpp3
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp2
-rw-r--r--lib/Rewrite/RewriteObjC.cpp20
-rw-r--r--lib/Rewrite/Rewriter.cpp73
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp84
-rw-r--r--lib/Sema/AttributeList.cpp6
-rw-r--r--lib/Sema/CMakeLists.txt1
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp8
-rw-r--r--lib/Sema/DeclSpec.cpp80
-rw-r--r--lib/Sema/DelayedDiagnostic.cpp3
-rw-r--r--lib/Sema/JumpDiagnostics.cpp224
-rw-r--r--lib/Sema/Sema.cpp59
-rw-r--r--lib/Sema/SemaCXXCast.cpp162
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp58
-rw-r--r--lib/Sema/SemaChecking.cpp541
-rw-r--r--lib/Sema/SemaCodeComplete.cpp154
-rw-r--r--lib/Sema/SemaDecl.cpp444
-rw-r--r--lib/Sema/SemaDeclAttr.cpp1115
-rw-r--r--lib/Sema/SemaDeclCXX.cpp575
-rw-r--r--lib/Sema/SemaDeclObjC.cpp540
-rw-r--r--lib/Sema/SemaExpr.cpp2769
-rw-r--r--lib/Sema/SemaExprCXX.cpp439
-rw-r--r--lib/Sema/SemaExprMember.cpp1594
-rw-r--r--lib/Sema/SemaExprObjC.cpp627
-rw-r--r--lib/Sema/SemaInit.cpp410
-rw-r--r--lib/Sema/SemaLookup.cpp990
-rw-r--r--lib/Sema/SemaObjCProperty.cpp432
-rw-r--r--lib/Sema/SemaOverload.cpp367
-rw-r--r--lib/Sema/SemaStmt.cpp77
-rw-r--r--lib/Sema/SemaTemplate.cpp134
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp328
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp151
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp42
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp16
-rw-r--r--lib/Sema/SemaType.cpp848
-rw-r--r--lib/Sema/TreeTransform.h175
-rw-r--r--lib/Sema/TypeLocBuilder.h27
-rw-r--r--lib/Serialization/ASTReader.cpp77
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp1
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp263
-rw-r--r--lib/Serialization/ASTWriter.cpp63
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp22
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp228
-rw-r--r--lib/Serialization/ChainedIncludesSource.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp756
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/Checkers.td8
-rw-r--r--lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp6
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp27
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp79
-rw-r--r--lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp20
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp110
-rw-r--r--lib/StaticAnalyzer/Core/CFRefCount.cpp142
-rw-r--r--lib/StaticAnalyzer/Core/Environment.cpp3
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp78
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp36
209 files changed, 25449 insertions, 9638 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
new file mode 100644
index 000000000000..f1d947da677e
--- /dev/null
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -0,0 +1,541 @@
+//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/DiagnosticCategories.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/ADT/Triple.h"
+
+using namespace clang;
+using namespace arcmt;
+using llvm::StringRef;
+
+bool CapturedDiagList::clearDiagnostic(llvm::ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ if (range.isInvalid())
+ return false;
+
+ bool cleared = false;
+ ListTy::iterator I = List.begin();
+ while (I != List.end()) {
+ FullSourceLoc diagLoc = I->getLocation();
+ if ((IDs.empty() || // empty means clear all diagnostics in the range.
+ std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
+ (diagLoc == range.getEnd() ||
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ cleared = true;
+ ListTy::iterator eraseS = I++;
+ while (I != List.end() && I->getLevel() == Diagnostic::Note)
+ ++I;
+ // Clear the diagnostic and any notes following it.
+ List.erase(eraseS, I);
+ continue;
+ }
+
+ ++I;
+ }
+
+ return cleared;
+}
+
+bool CapturedDiagList::hasDiagnostic(llvm::ArrayRef<unsigned> IDs,
+ SourceRange range) const {
+ if (range.isInvalid())
+ return false;
+
+ ListTy::const_iterator I = List.begin();
+ while (I != List.end()) {
+ FullSourceLoc diagLoc = I->getLocation();
+ if ((IDs.empty() || // empty means any diagnostic in the range.
+ std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
+ (diagLoc == range.getEnd() ||
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ return true;
+ }
+
+ ++I;
+ }
+
+ return false;
+}
+
+void CapturedDiagList::reportDiagnostics(Diagnostic &Diags) const {
+ for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I)
+ Diags.Report(*I);
+}
+
+bool CapturedDiagList::hasErrors() const {
+ for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I)
+ if (I->getLevel() >= Diagnostic::Error)
+ return true;
+
+ return false;
+}
+
+namespace {
+
+class CaptureDiagnosticClient : public DiagnosticClient {
+ Diagnostic &Diags;
+ CapturedDiagList &CapturedDiags;
+public:
+ CaptureDiagnosticClient(Diagnostic &diags,
+ CapturedDiagList &capturedDiags)
+ : Diags(diags), CapturedDiags(capturedDiags) { }
+
+ virtual void HandleDiagnostic(Diagnostic::Level level,
+ const DiagnosticInfo &Info) {
+ if (arcmt::isARCDiagnostic(Info.getID(), Diags) ||
+ level >= Diagnostic::Error || level == Diagnostic::Note) {
+ CapturedDiags.push_back(StoredDiagnostic(level, Info));
+ return;
+ }
+
+ // Non-ARC warnings are ignored.
+ Diags.setLastDiagnosticIgnored();
+ }
+};
+
+} // end anonymous namespace
+
+static inline llvm::StringRef SimulatorVersionDefineName() {
+ return "__IPHONE_OS_VERSION_MIN_REQUIRED=";
+}
+
+/// \brief Parse the simulator version define:
+/// __IPHONE_OS_VERSION_MIN_REQUIRED=([0-9])([0-9][0-9])([0-9][0-9])
+// and return the grouped values as integers, e.g:
+// __IPHONE_OS_VERSION_MIN_REQUIRED=40201
+// will return Major=4, Minor=2, Micro=1.
+static bool GetVersionFromSimulatorDefine(llvm::StringRef define,
+ unsigned &Major, unsigned &Minor,
+ unsigned &Micro) {
+ assert(define.startswith(SimulatorVersionDefineName()));
+ llvm::StringRef name, version;
+ llvm::tie(name, version) = define.split('=');
+ if (version.empty())
+ return false;
+ std::string verstr = version.str();
+ char *end;
+ unsigned num = (unsigned) strtol(verstr.c_str(), &end, 10);
+ if (*end != '\0')
+ return false;
+ Major = num / 10000;
+ num = num % 10000;
+ Minor = num / 100;
+ Micro = num % 100;
+ return true;
+}
+
+static bool HasARCRuntime(CompilerInvocation &origCI) {
+ // This duplicates some functionality from Darwin::AddDeploymentTarget
+ // but this function is well defined, so keep it decoupled from the driver
+ // and avoid unrelated complications.
+
+ for (unsigned i = 0, e = origCI.getPreprocessorOpts().Macros.size();
+ i != e; ++i) {
+ StringRef define = origCI.getPreprocessorOpts().Macros[i].first;
+ bool isUndef = origCI.getPreprocessorOpts().Macros[i].second;
+ if (isUndef)
+ continue;
+ if (!define.startswith(SimulatorVersionDefineName()))
+ continue;
+ unsigned Major, Minor, Micro;
+ if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
+ Major < 10 && Minor < 100 && Micro < 100)
+ return Major >= 5;
+ }
+
+ llvm::Triple triple(origCI.getTargetOpts().Triple);
+
+ if (triple.getOS() == llvm::Triple::IOS)
+ return triple.getOSMajorVersion() >= 5;
+
+ if (triple.getOS() == llvm::Triple::Darwin)
+ return triple.getOSMajorVersion() >= 11;
+
+ if (triple.getOS() == llvm::Triple::MacOSX) {
+ unsigned Major, Minor, Micro;
+ triple.getOSVersion(Major, Minor, Micro);
+ return Major > 10 || (Major == 10 && Minor >= 7);
+ }
+
+ return false;
+}
+
+CompilerInvocation *createInvocationForMigration(CompilerInvocation &origCI) {
+ llvm::OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(new CompilerInvocation(origCI));
+ CInvok->getPreprocessorOpts().ImplicitPCHInclude = std::string();
+ CInvok->getPreprocessorOpts().ImplicitPTHInclude = std::string();
+ std::string define = getARCMTMacroName();
+ define += '=';
+ CInvok->getPreprocessorOpts().addMacroDef(define);
+ CInvok->getLangOpts().ObjCAutoRefCount = true;
+ CInvok->getDiagnosticOpts().ErrorLimit = 0;
+ CInvok->getDiagnosticOpts().Warnings.push_back(
+ "error=arc-unsafe-retained-assign");
+ CInvok->getLangOpts().ObjCRuntimeHasWeak = HasARCRuntime(origCI);
+
+ return CInvok.take();
+}
+
+//===----------------------------------------------------------------------===//
+// checkForManualIssues.
+//===----------------------------------------------------------------------===//
+
+bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
+ llvm::StringRef Filename, InputKind Kind,
+ DiagnosticClient *DiagClient) {
+ if (!origCI.getLangOpts().ObjC1)
+ return false;
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations();
+ assert(!transforms.empty());
+
+ llvm::OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(createInvocationForMigration(origCI));
+ CInvok->getFrontendOpts().Inputs.clear();
+ CInvok->getFrontendOpts().Inputs.push_back(std::make_pair(Kind, Filename));
+
+ CapturedDiagList capturedDiags;
+
+ assert(DiagClient);
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<Diagnostic> Diags(
+ new Diagnostic(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ // Filter of all diagnostics.
+ CaptureDiagnosticClient errRec(*Diags, capturedDiags);
+ Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
+
+ llvm::OwningPtr<ASTUnit> Unit(
+ ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags));
+ if (!Unit)
+ return true;
+
+ // Don't filter diagnostics anymore.
+ Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
+
+ ASTContext &Ctx = Unit->getASTContext();
+
+ if (Diags->hasFatalErrorOccurred()) {
+ Diags->Reset();
+ DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+ capturedDiags.reportDiagnostics(*Diags);
+ DiagClient->EndSourceFile();
+ return true;
+ }
+
+ // After parsing of source files ended, we want to reuse the
+ // diagnostics objects to emit further diagnostics.
+ // We call BeginSourceFile because DiagnosticClient requires that
+ // diagnostics with source range information are emitted only in between
+ // BeginSourceFile() and EndSourceFile().
+ DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+
+ // No macros will be added since we are just checking and we won't modify
+ // source code.
+ std::vector<SourceLocation> ARCMTMacroLocs;
+
+ TransformActions testAct(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
+ MigrationPass pass(Ctx, Unit->getSema(), testAct, ARCMTMacroLocs);
+
+ for (unsigned i=0, e = transforms.size(); i != e; ++i)
+ transforms[i](pass);
+
+ capturedDiags.reportDiagnostics(*Diags);
+
+ DiagClient->EndSourceFile();
+
+ // If we are migrating code that gets the '-fobjc-arc' flag, make sure
+ // to remove it so that we don't get errors from normal compilation.
+ origCI.getLangOpts().ObjCAutoRefCount = false;
+
+ return capturedDiags.hasErrors();
+}
+
+//===----------------------------------------------------------------------===//
+// applyTransformations.
+//===----------------------------------------------------------------------===//
+
+static bool applyTransforms(CompilerInvocation &origCI,
+ llvm::StringRef Filename, InputKind Kind,
+ DiagnosticClient *DiagClient,
+ llvm::StringRef outputDir) {
+ if (!origCI.getLangOpts().ObjC1)
+ return false;
+
+ // Make sure checking is successful first.
+ CompilerInvocation CInvokForCheck(origCI);
+ if (arcmt::checkForManualIssues(CInvokForCheck, Filename, Kind, DiagClient))
+ return true;
+
+ CompilerInvocation CInvok(origCI);
+ CInvok.getFrontendOpts().Inputs.clear();
+ CInvok.getFrontendOpts().Inputs.push_back(std::make_pair(Kind, Filename));
+
+ MigrationProcess migration(CInvok, DiagClient, outputDir);
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations();
+ assert(!transforms.empty());
+
+ for (unsigned i=0, e = transforms.size(); i != e; ++i) {
+ bool err = migration.applyTransform(transforms[i]);
+ if (err) return true;
+ }
+
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<Diagnostic> Diags(
+ new Diagnostic(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ if (outputDir.empty()) {
+ origCI.getLangOpts().ObjCAutoRefCount = true;
+ return migration.getRemapper().overwriteOriginal(*Diags);
+ } else {
+ // If we are migrating code that gets the '-fobjc-arc' flag, make sure
+ // to remove it so that we don't get errors from normal compilation.
+ origCI.getLangOpts().ObjCAutoRefCount = false;
+ return migration.getRemapper().flushToDisk(outputDir, *Diags);
+ }
+}
+
+bool arcmt::applyTransformations(CompilerInvocation &origCI,
+ llvm::StringRef Filename, InputKind Kind,
+ DiagnosticClient *DiagClient) {
+ return applyTransforms(origCI, Filename, Kind, DiagClient, llvm::StringRef());
+}
+
+bool arcmt::migrateWithTemporaryFiles(CompilerInvocation &origCI,
+ llvm::StringRef Filename, InputKind Kind,
+ DiagnosticClient *DiagClient,
+ llvm::StringRef outputDir) {
+ assert(!outputDir.empty() && "Expected output directory path");
+ return applyTransforms(origCI, Filename, Kind, DiagClient, outputDir);
+}
+
+bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
+ remap,
+ llvm::StringRef outputDir,
+ DiagnosticClient *DiagClient) {
+ assert(!outputDir.empty());
+
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<Diagnostic> Diags(
+ new Diagnostic(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ FileRemapper remapper;
+ bool err = remapper.initFromDisk(outputDir, *Diags,
+ /*ignoreIfFilesChanged=*/true);
+ if (err)
+ return true;
+
+ CompilerInvocation CI;
+ remapper.applyMappings(CI);
+ remap = CI.getPreprocessorOpts().RemappedFiles;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// CollectTransformActions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARCMTMacroTrackerPPCallbacks : public PPCallbacks {
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+public:
+ ARCMTMacroTrackerPPCallbacks(std::vector<SourceLocation> &ARCMTMacroLocs)
+ : ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo *MI) {
+ if (MacroNameTok.getIdentifierInfo()->getName() == getARCMTMacroName())
+ ARCMTMacroLocs.push_back(MacroNameTok.getLocation());
+ }
+};
+
+class ARCMTMacroTrackerAction : public ASTFrontendAction {
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+public:
+ ARCMTMacroTrackerAction(std::vector<SourceLocation> &ARCMTMacroLocs)
+ : ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ CI.getPreprocessor().addPPCallbacks(
+ new ARCMTMacroTrackerPPCallbacks(ARCMTMacroLocs));
+ return new ASTConsumer();
+ }
+};
+
+class RewritesApplicator : public TransformActions::RewriteReceiver {
+ Rewriter &rewriter;
+ ASTContext &Ctx;
+ MigrationProcess::RewriteListener *Listener;
+
+public:
+ RewritesApplicator(Rewriter &rewriter, ASTContext &ctx,
+ MigrationProcess::RewriteListener *listener)
+ : rewriter(rewriter), Ctx(ctx), Listener(listener) {
+ if (Listener)
+ Listener->start(ctx);
+ }
+ ~RewritesApplicator() {
+ if (Listener)
+ Listener->finish();
+ }
+
+ virtual void insert(SourceLocation loc, llvm::StringRef text) {
+ bool err = rewriter.InsertText(loc, text, /*InsertAfter=*/true,
+ /*indentNewLines=*/true);
+ if (!err && Listener)
+ Listener->insert(loc, text);
+ }
+
+ virtual void remove(CharSourceRange range) {
+ Rewriter::RewriteOptions removeOpts;
+ removeOpts.IncludeInsertsAtBeginOfRange = false;
+ removeOpts.IncludeInsertsAtEndOfRange = false;
+ removeOpts.RemoveLineIfEmpty = true;
+
+ bool err = rewriter.RemoveText(range, removeOpts);
+ if (!err && Listener)
+ Listener->remove(range);
+ }
+
+ virtual void increaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) {
+ rewriter.IncreaseIndentation(range, parentIndent);
+ }
+};
+
+} // end anonymous namespace.
+
+/// \brief Anchor for VTable.
+MigrationProcess::RewriteListener::~RewriteListener() { }
+
+MigrationProcess::MigrationProcess(const CompilerInvocation &CI,
+ DiagnosticClient *diagClient,
+ llvm::StringRef outputDir)
+ : OrigCI(CI), DiagClient(diagClient) {
+ if (!outputDir.empty()) {
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<Diagnostic> Diags(
+ new Diagnostic(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true);
+ }
+}
+
+bool MigrationProcess::applyTransform(TransformFn trans,
+ RewriteListener *listener) {
+ llvm::OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(createInvocationForMigration(OrigCI));
+ CInvok->getDiagnosticOpts().IgnoreWarnings = true;
+
+ Remapper.applyMappings(*CInvok);
+
+ CapturedDiagList capturedDiags;
+ std::vector<SourceLocation> ARCMTMacroLocs;
+
+ assert(DiagClient);
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<Diagnostic> Diags(
+ new Diagnostic(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ // Filter of all diagnostics.
+ CaptureDiagnosticClient errRec(*Diags, capturedDiags);
+ Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
+
+ llvm::OwningPtr<ARCMTMacroTrackerAction> ASTAction;
+ ASTAction.reset(new ARCMTMacroTrackerAction(ARCMTMacroLocs));
+
+ llvm::OwningPtr<ASTUnit> Unit(
+ ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags,
+ ASTAction.get()));
+ if (!Unit)
+ return true;
+ Unit->setOwnsRemappedFileBuffers(false); // FileRemapper manages that.
+
+ // Don't filter diagnostics anymore.
+ Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
+
+ ASTContext &Ctx = Unit->getASTContext();
+
+ if (Diags->hasFatalErrorOccurred()) {
+ Diags->Reset();
+ DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+ capturedDiags.reportDiagnostics(*Diags);
+ DiagClient->EndSourceFile();
+ return true;
+ }
+
+ // After parsing of source files ended, we want to reuse the
+ // diagnostics objects to emit further diagnostics.
+ // We call BeginSourceFile because DiagnosticClient requires that
+ // diagnostics with source range information are emitted only in between
+ // BeginSourceFile() and EndSourceFile().
+ DiagClient->BeginSourceFile(Ctx.getLangOptions(), &Unit->getPreprocessor());
+
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOptions());
+ TransformActions TA(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
+ MigrationPass pass(Ctx, Unit->getSema(), TA, ARCMTMacroLocs);
+
+ trans(pass);
+
+ {
+ RewritesApplicator applicator(rewriter, Ctx, listener);
+ TA.applyRewrites(applicator);
+ }
+
+ DiagClient->EndSourceFile();
+
+ if (DiagClient->getNumErrors())
+ return true;
+
+ for (Rewriter::buffer_iterator
+ I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
+ FileID FID = I->first;
+ RewriteBuffer &buf = I->second;
+ const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ assert(file);
+ std::string newFname = file->getName();
+ newFname += "-trans";
+ llvm::SmallString<512> newText;
+ llvm::raw_svector_ostream vecOS(newText);
+ buf.write(vecOS);
+ vecOS.flush();
+ llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
+ llvm::StringRef(newText.data(), newText.size()), newFname);
+ llvm::SmallString<64> filePath(file->getName());
+ Unit->getFileManager().FixupRelativePath(filePath);
+ Remapper.remap(filePath.str(), memBuf);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// isARCDiagnostic.
+//===----------------------------------------------------------------------===//
+
+bool arcmt::isARCDiagnostic(unsigned diagID, Diagnostic &Diag) {
+ return Diag.getDiagnosticIDs()->getCategoryNumberForDiag(diagID) ==
+ diag::DiagCat_Automatic_Reference_Counting_Issue;
+}
diff --git a/lib/ARCMigrate/ARCMTActions.cpp b/lib/ARCMigrate/ARCMTActions.cpp
new file mode 100644
index 000000000000..345c7452420e
--- /dev/null
+++ b/lib/ARCMigrate/ARCMTActions.cpp
@@ -0,0 +1,53 @@
+//===--- ARCMTActions.cpp - ARC Migrate Tool Frontend Actions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Frontend/CompilerInstance.h"
+
+using namespace clang;
+using namespace arcmt;
+
+bool CheckAction::BeginInvocation(CompilerInstance &CI) {
+ if (arcmt::checkForManualIssues(CI.getInvocation(), getCurrentFile(),
+ getCurrentFileKind(),
+ CI.getDiagnostics().getClient()))
+ return false; // errors, stop the action.
+
+ // We only want to see warnings reported from arcmt::checkForManualIssues.
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+CheckAction::CheckAction(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+bool ModifyAction::BeginInvocation(CompilerInstance &CI) {
+ return !arcmt::applyTransformations(CI.getInvocation(),
+ getCurrentFile(), getCurrentFileKind(),
+ CI.getDiagnostics().getClient());
+}
+
+ModifyAction::ModifyAction(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+bool MigrateAction::BeginInvocation(CompilerInstance &CI) {
+ return !arcmt::migrateWithTemporaryFiles(CI.getInvocation(),
+ getCurrentFile(),
+ getCurrentFileKind(),
+ CI.getDiagnostics().getClient(),
+ MigrateDir);
+}
+
+MigrateAction::MigrateAction(FrontendAction *WrappedAction,
+ llvm::StringRef migrateDir)
+ : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir) {
+ if (MigrateDir.empty())
+ MigrateDir = "."; // user current directory if none is given.
+}
diff --git a/lib/ARCMigrate/CMakeLists.txt b/lib/ARCMigrate/CMakeLists.txt
new file mode 100644
index 000000000000..5f2711e36f26
--- /dev/null
+++ b/lib/ARCMigrate/CMakeLists.txt
@@ -0,0 +1,24 @@
+set(LLVM_USED_LIBS clangBasic clangAST clangParse clangFrontend clangRewrite)
+
+add_clang_library(clangARCMigrate
+ ARCMT.cpp
+ ARCMTActions.cpp
+ FileRemapper.cpp
+ TransARCAssign.cpp
+ TransAutoreleasePool.cpp
+ TransBlockObjCVariable.cpp
+ TransEmptyStatementsAndDealloc.cpp
+ TransformActions.cpp
+ Transforms.cpp
+ TransProperties.cpp
+ TransRetainReleaseDealloc.cpp
+ TransUnbridgedCasts.cpp
+ TransUnusedInitDelegate.cpp
+ TransZeroOutPropsInDealloc.cpp
+ )
+
+add_dependencies(clangARCMigrate
+ ClangAttrClasses
+ ClangAttrList
+ ClangDeclNodes
+ ClangStmtNodes)
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
new file mode 100644
index 000000000000..db26c29a3dda
--- /dev/null
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -0,0 +1,291 @@
+//===--- FileRemapper.cpp - File Remapping Helper -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/FileRemapper.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+
+using namespace clang;
+using namespace arcmt;
+
+FileRemapper::FileRemapper() {
+ FileMgr.reset(new FileManager(FileSystemOptions()));
+}
+
+FileRemapper::~FileRemapper() {
+ clear();
+}
+
+void FileRemapper::clear(llvm::StringRef outputDir) {
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I)
+ resetTarget(I->second);
+ FromToMappings.clear();
+ assert(ToFromMappings.empty());
+ if (!outputDir.empty()) {
+ std::string infoFile = getRemapInfoFile(outputDir);
+ bool existed;
+ llvm::sys::fs::remove(infoFile, existed);
+ }
+}
+
+std::string FileRemapper::getRemapInfoFile(llvm::StringRef outputDir) {
+ assert(!outputDir.empty());
+ llvm::sys::Path dir(outputDir);
+ llvm::sys::Path infoFile = dir;
+ infoFile.appendComponent("remap");
+ return infoFile.str();
+}
+
+bool FileRemapper::initFromDisk(llvm::StringRef outputDir, Diagnostic &Diag,
+ bool ignoreIfFilesChanged) {
+ assert(FromToMappings.empty() &&
+ "initFromDisk should be called before any remap calls");
+ std::string infoFile = getRemapInfoFile(outputDir);
+ bool fileExists = false;
+ llvm::sys::fs::exists(infoFile, fileExists);
+ if (!fileExists)
+ return false;
+
+ std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
+
+ std::ifstream fin(infoFile.c_str());
+ if (!fin.good())
+ return report(std::string("Error opening file: ") + infoFile, Diag);
+
+ while (true) {
+ std::string fromFilename, toFilename;
+ uint64_t timeModified;
+
+ fin >> fromFilename >> timeModified >> toFilename;
+ if (fin.eof())
+ break;
+ if (!fin.good())
+ return report(std::string("Error in format of file: ") + infoFile, Diag);
+
+ const FileEntry *origFE = FileMgr->getFile(fromFilename);
+ if (!origFE) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report(std::string("File does not exist: ") + fromFilename, Diag);
+ }
+ const FileEntry *newFE = FileMgr->getFile(toFilename);
+ if (!newFE) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report(std::string("File does not exist: ") + toFilename, Diag);
+ }
+
+ if ((uint64_t)origFE->getModificationTime() != timeModified) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report(std::string("File was modified: ") + fromFilename, Diag);
+ }
+
+ pairs.push_back(std::make_pair(origFE, newFE));
+ }
+
+ for (unsigned i = 0, e = pairs.size(); i != e; ++i)
+ remap(pairs[i].first, pairs[i].second);
+
+ return false;
+}
+
+bool FileRemapper::flushToDisk(llvm::StringRef outputDir, Diagnostic &Diag) {
+ using namespace llvm::sys;
+
+ bool existed;
+ if (fs::create_directory(outputDir, existed) != llvm::errc::success)
+ return report(std::string("Could not create directory: ") + outputDir.str(),
+ Diag);
+
+ std::string errMsg;
+ std::string infoFile = getRemapInfoFile(outputDir);
+ llvm::raw_fd_ostream infoOut(infoFile.c_str(), errMsg,
+ llvm::raw_fd_ostream::F_Binary);
+ if (!errMsg.empty())
+ return report(errMsg, Diag);
+
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+
+ const FileEntry *origFE = I->first;
+ llvm::SmallString<200> origPath = llvm::StringRef(origFE->getName());
+ fs::make_absolute(origPath);
+ infoOut << origPath << '\n';
+ infoOut << (uint64_t)origFE->getModificationTime() << '\n';
+
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ llvm::SmallString<200> newPath = llvm::StringRef(FE->getName());
+ fs::make_absolute(newPath);
+ infoOut << newPath << '\n';
+ } else {
+
+ llvm::SmallString<64> tempPath;
+ tempPath = path::filename(origFE->getName());
+ tempPath += "-%%%%%%%%";
+ tempPath += path::extension(origFE->getName());
+ int fd;
+ if (fs::unique_file(tempPath.str(), fd, tempPath) != llvm::errc::success)
+ return report(std::string("Could not create file: ") + tempPath.c_str(),
+ Diag);
+
+ llvm::raw_fd_ostream newOut(fd, /*shouldClose=*/true);
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ newOut.write(mem->getBufferStart(), mem->getBufferSize());
+ newOut.close();
+
+ const FileEntry *newE = FileMgr->getFile(tempPath);
+ remap(origFE, newE);
+ infoOut << newE->getName() << '\n';
+ }
+ }
+
+ infoOut.close();
+ return false;
+}
+
+bool FileRemapper::overwriteOriginal(Diagnostic &Diag,
+ llvm::StringRef outputDir) {
+ using namespace llvm::sys;
+
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ const FileEntry *origFE = I->first;
+ if (const FileEntry *newFE = I->second.dyn_cast<const FileEntry *>()) {
+ if (fs::copy_file(newFE->getName(), origFE->getName(),
+ fs::copy_option::overwrite_if_exists) != llvm::errc::success) {
+ std::string err = "Could not copy file '";
+ llvm::raw_string_ostream os(err);
+ os << "Could not copy file '" << newFE->getName() << "' to file '"
+ << origFE->getName() << "'";
+ os.flush();
+ return report(err, Diag);
+ }
+ } else {
+
+ bool fileExists = false;
+ fs::exists(origFE->getName(), fileExists);
+ if (!fileExists)
+ return report(std::string("File does not exist: ") + origFE->getName(),
+ Diag);
+
+ std::string errMsg;
+ llvm::raw_fd_ostream Out(origFE->getName(), errMsg,
+ llvm::raw_fd_ostream::F_Binary);
+ if (!errMsg.empty())
+ return report(errMsg, Diag);
+
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ Out.write(mem->getBufferStart(), mem->getBufferSize());
+ Out.close();
+ }
+ }
+
+ clear(outputDir);
+ return false;
+}
+
+void FileRemapper::applyMappings(CompilerInvocation &CI) const {
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ for (MappingsTy::const_iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ PPOpts.addRemappedFile(I->first->getName(), FE->getName());
+ } else {
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ PPOpts.addRemappedFile(I->first->getName(), mem);
+ }
+ }
+
+ PPOpts.RetainRemappedFileBuffers = true;
+}
+
+void FileRemapper::transferMappingsAndClear(CompilerInvocation &CI) {
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ PPOpts.addRemappedFile(I->first->getName(), FE->getName());
+ } else {
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ PPOpts.addRemappedFile(I->first->getName(), mem);
+ }
+ I->second = Target();
+ }
+
+ PPOpts.RetainRemappedFileBuffers = false;
+ clear();
+}
+
+void FileRemapper::remap(llvm::StringRef filePath, llvm::MemoryBuffer *memBuf) {
+ remap(getOriginalFile(filePath), memBuf);
+}
+
+void FileRemapper::remap(llvm::StringRef filePath, llvm::StringRef newPath) {
+ const FileEntry *file = getOriginalFile(filePath);
+ const FileEntry *newfile = FileMgr->getFile(newPath);
+ remap(file, newfile);
+}
+
+void FileRemapper::remap(const FileEntry *file, llvm::MemoryBuffer *memBuf) {
+ assert(file);
+ Target &targ = FromToMappings[file];
+ resetTarget(targ);
+ targ = memBuf;
+}
+
+void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
+ assert(file && newfile);
+ Target &targ = FromToMappings[file];
+ resetTarget(targ);
+ targ = newfile;
+ ToFromMappings[newfile] = file;
+}
+
+const FileEntry *FileRemapper::getOriginalFile(llvm::StringRef filePath) {
+ const FileEntry *file = FileMgr->getFile(filePath);
+ // If we are updating a file that overriden an original file,
+ // actually update the original file.
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ I = ToFromMappings.find(file);
+ if (I != ToFromMappings.end()) {
+ file = I->second;
+ assert(FromToMappings.find(file) != FromToMappings.end() &&
+ "Original file not in mappings!");
+ }
+ return file;
+}
+
+void FileRemapper::resetTarget(Target &targ) {
+ if (!targ)
+ return;
+
+ if (llvm::MemoryBuffer *oldmem = targ.dyn_cast<llvm::MemoryBuffer *>()) {
+ delete oldmem;
+ } else {
+ const FileEntry *toFE = targ.get<const FileEntry *>();
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ I = ToFromMappings.find(toFE);
+ if (I != ToFromMappings.end())
+ ToFromMappings.erase(I);
+ }
+}
+
+bool FileRemapper::report(const std::string &err, Diagnostic &Diag) {
+ unsigned ID = Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
+ err);
+ Diag.Report(ID);
+ return true;
+}
diff --git a/lib/ARCMigrate/Internals.h b/lib/ARCMigrate/Internals.h
new file mode 100644
index 000000000000..4f9b138a06ce
--- /dev/null
+++ b/lib/ARCMigrate/Internals.h
@@ -0,0 +1,148 @@
+//===-- Internals.h - Implementation Details---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
+#define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
+
+#include "clang/ARCMigrate/ARCMT.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace clang {
+ class Sema;
+ class Stmt;
+
+namespace arcmt {
+
+class CapturedDiagList {
+ typedef std::list<StoredDiagnostic> ListTy;
+ ListTy List;
+
+public:
+ void push_back(const StoredDiagnostic &diag) { List.push_back(diag); }
+
+ bool clearDiagnostic(llvm::ArrayRef<unsigned> IDs, SourceRange range);
+ bool hasDiagnostic(llvm::ArrayRef<unsigned> IDs, SourceRange range) const;
+
+ void reportDiagnostics(Diagnostic &diags) const;
+
+ bool hasErrors() const;
+};
+
+class TransformActions {
+ Diagnostic &Diags;
+ CapturedDiagList &CapturedDiags;
+ void *Impl; // TransformActionsImpl.
+
+public:
+ TransformActions(Diagnostic &diag, CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP);
+ ~TransformActions();
+
+ void startTransaction();
+ bool commitTransaction();
+ void abortTransaction();
+
+ void insert(SourceLocation loc, llvm::StringRef text);
+ void insertAfterToken(SourceLocation loc, llvm::StringRef text);
+ void remove(SourceRange range);
+ void removeStmt(Stmt *S);
+ void replace(SourceRange range, llvm::StringRef text);
+ void replace(SourceRange range, SourceRange replacementRange);
+ void replaceStmt(Stmt *S, llvm::StringRef text);
+ void replaceText(SourceLocation loc, llvm::StringRef text,
+ llvm::StringRef replacementText);
+ void increaseIndentation(SourceRange range,
+ SourceLocation parentIndent);
+
+ bool clearDiagnostic(llvm::ArrayRef<unsigned> IDs, SourceRange range);
+ bool clearAllDiagnostics(SourceRange range) {
+ return clearDiagnostic(llvm::ArrayRef<unsigned>(), range);
+ }
+ bool clearDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
+ unsigned IDs[] = { ID1, ID2 };
+ return clearDiagnostic(IDs, range);
+ }
+ bool clearDiagnostic(unsigned ID1, unsigned ID2, unsigned ID3,
+ SourceRange range) {
+ unsigned IDs[] = { ID1, ID2, ID3 };
+ return clearDiagnostic(IDs, range);
+ }
+
+ bool hasDiagnostic(unsigned ID, SourceRange range) {
+ return CapturedDiags.hasDiagnostic(ID, range);
+ }
+
+ bool hasDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
+ unsigned IDs[] = { ID1, ID2 };
+ return CapturedDiags.hasDiagnostic(IDs, range);
+ }
+
+ void reportError(llvm::StringRef error, SourceLocation loc,
+ SourceRange range = SourceRange());
+ void reportNote(llvm::StringRef note, SourceLocation loc,
+ SourceRange range = SourceRange());
+
+ class RewriteReceiver {
+ public:
+ virtual ~RewriteReceiver();
+
+ virtual void insert(SourceLocation loc, llvm::StringRef text) = 0;
+ virtual void remove(CharSourceRange range) = 0;
+ virtual void increaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) = 0;
+ };
+
+ void applyRewrites(RewriteReceiver &receiver);
+};
+
+class Transaction {
+ TransformActions &TA;
+ bool Aborted;
+
+public:
+ Transaction(TransformActions &TA) : TA(TA), Aborted(false) {
+ TA.startTransaction();
+ }
+
+ ~Transaction() {
+ if (!isAborted())
+ TA.commitTransaction();
+ }
+
+ void abort() {
+ TA.abortTransaction();
+ Aborted = true;
+ }
+
+ bool isAborted() const { return Aborted; }
+};
+
+class MigrationPass {
+public:
+ ASTContext &Ctx;
+ Sema &SemaRef;
+ TransformActions &TA;
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+ MigrationPass(ASTContext &Ctx, Sema &sema, TransformActions &TA,
+ std::vector<SourceLocation> &ARCMTMacroLocs)
+ : Ctx(Ctx), SemaRef(sema), TA(TA), ARCMTMacroLocs(ARCMTMacroLocs) { }
+};
+
+bool isARCDiagnostic(unsigned diagID, Diagnostic &Diag);
+
+static inline llvm::StringRef getARCMTMacroName() {
+ return "__IMPL_ARCMT_REMOVED_EXPR__";
+}
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/lib/ARCMigrate/Makefile b/lib/ARCMigrate/Makefile
new file mode 100644
index 000000000000..5232c5e5aff4
--- /dev/null
+++ b/lib/ARCMigrate/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/ARCMigrate/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements code transformation to ARC mode.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+LIBRARYNAME := clangARCMigrate
+
+include $(CLANG_LEVEL)/Makefile
+
diff --git a/lib/ARCMigrate/TransARCAssign.cpp b/lib/ARCMigrate/TransARCAssign.cpp
new file mode 100644
index 000000000000..8c00df5daa03
--- /dev/null
+++ b/lib/ARCMigrate/TransARCAssign.cpp
@@ -0,0 +1,75 @@
+//===--- TransARCAssign.cpp - Tranformations to ARC mode ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// makeAssignARCSafe:
+//
+// Add '__strong' where appropriate.
+//
+// for (id x in collection) {
+// x = 0;
+// }
+// ---->
+// for (__strong id x in collection) {
+// x = 0;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class ARCAssignChecker : public RecursiveASTVisitor<ARCAssignChecker> {
+ MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> ModifiedVars;
+
+public:
+ ARCAssignChecker(MigrationPass &pass) : Pass(pass) { }
+
+ bool VisitBinaryOperator(BinaryOperator *Exp) {
+ Expr *E = Exp->getLHS();
+ SourceLocation OrigLoc = E->getExprLoc();
+ SourceLocation Loc = OrigLoc;
+ DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (declRef && isa<VarDecl>(declRef->getDecl())) {
+ ASTContext &Ctx = Pass.Ctx;
+ Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(Ctx, &Loc);
+ if (IsLV != Expr::MLV_ConstQualified)
+ return true;
+ VarDecl *var = cast<VarDecl>(declRef->getDecl());
+ if (var->isARCPseudoStrong()) {
+ Transaction Trans(Pass.TA);
+ if (Pass.TA.clearDiagnostic(diag::err_typecheck_arr_assign_enumeration,
+ Exp->getOperatorLoc())) {
+ if (!ModifiedVars.count(var)) {
+ TypeLoc TLoc = var->getTypeSourceInfo()->getTypeLoc();
+ Pass.TA.insert(TLoc.getBeginLoc(), "__strong ");
+ ModifiedVars.insert(var);
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::makeAssignARCSafe(MigrationPass &pass) {
+ ARCAssignChecker assignCheck(pass);
+ assignCheck.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransAutoreleasePool.cpp b/lib/ARCMigrate/TransAutoreleasePool.cpp
new file mode 100644
index 000000000000..5b8485432c52
--- /dev/null
+++ b/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -0,0 +1,436 @@
+//===--- TransAutoreleasePool.cpp - Tranformations to ARC mode ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteAutoreleasePool:
+//
+// Calls to NSAutoreleasePools will be rewritten as an @autorelease scope.
+//
+// NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+// ...
+// [pool release];
+// ---->
+// @autorelease {
+// ...
+// }
+//
+// An NSAutoreleasePool will not be touched if:
+// - There is not a corresponding -release/-drain in the same scope
+// - Not all references of the NSAutoreleasePool variable can be removed
+// - There is a variable that is declared inside the intended @autorelease scope
+// which is also used outside it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class ReleaseCollector : public RecursiveASTVisitor<ReleaseCollector> {
+ Decl *Dcl;
+ llvm::SmallVectorImpl<ObjCMessageExpr *> &Releases;
+
+public:
+ ReleaseCollector(Decl *D, llvm::SmallVectorImpl<ObjCMessageExpr *> &releases)
+ : Dcl(D), Releases(releases) { }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (!E->isInstanceMessage())
+ return true;
+ if (E->getMethodFamily() != OMF_release)
+ return true;
+ Expr *instance = E->getInstanceReceiver()->IgnoreParenCasts();
+ if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(instance)) {
+ if (DE->getDecl() == Dcl)
+ Releases.push_back(E);
+ }
+ return true;
+ }
+};
+
+}
+
+namespace {
+
+class AutoreleasePoolRewriter
+ : public RecursiveASTVisitor<AutoreleasePoolRewriter> {
+public:
+ AutoreleasePoolRewriter(MigrationPass &pass)
+ : Body(0), Pass(pass) {
+ PoolII = &pass.Ctx.Idents.get("NSAutoreleasePool");
+ DrainSel = pass.Ctx.Selectors.getNullarySelector(
+ &pass.Ctx.Idents.get("drain"));
+ }
+
+ void transformBody(Stmt *body) {
+ Body = body;
+ TraverseStmt(body);
+ }
+
+ ~AutoreleasePoolRewriter() {
+ llvm::SmallVector<VarDecl *, 8> VarsToHandle;
+
+ for (std::map<VarDecl *, PoolVarInfo>::iterator
+ I = PoolVars.begin(), E = PoolVars.end(); I != E; ++I) {
+ VarDecl *var = I->first;
+ PoolVarInfo &info = I->second;
+
+ // Check that we can handle/rewrite all references of the pool.
+
+ clearRefsIn(info.Dcl, info.Refs);
+ for (llvm::SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ clearRefsIn(*scope.Begin, info.Refs);
+ clearRefsIn(*scope.End, info.Refs);
+ clearRefsIn(scope.Releases.begin(), scope.Releases.end(), info.Refs);
+ }
+
+ // Even if one reference is not handled we will not do anything about that
+ // pool variable.
+ if (info.Refs.empty())
+ VarsToHandle.push_back(var);
+ }
+
+ for (unsigned i = 0, e = VarsToHandle.size(); i != e; ++i) {
+ PoolVarInfo &info = PoolVars[VarsToHandle[i]];
+
+ Transaction Trans(Pass.TA);
+
+ clearUnavailableDiags(info.Dcl);
+ Pass.TA.removeStmt(info.Dcl);
+
+ // Add "@autoreleasepool { }"
+ for (llvm::SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ clearUnavailableDiags(*scope.Begin);
+ clearUnavailableDiags(*scope.End);
+ if (scope.IsFollowedBySimpleReturnStmt) {
+ // Include the return in the scope.
+ Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
+ Pass.TA.removeStmt(*scope.End);
+ Stmt::child_iterator retI = scope.End;
+ ++retI;
+ SourceLocation afterSemi = findLocationAfterSemi((*retI)->getLocEnd(),
+ Pass.Ctx);
+ assert(afterSemi.isValid() &&
+ "Didn't we check before setting IsFollowedBySimpleReturnStmt "
+ "to true?");
+ Pass.TA.insertAfterToken(afterSemi, "\n}");
+ Pass.TA.increaseIndentation(
+ SourceRange(scope.getIndentedRange().getBegin(),
+ (*retI)->getLocEnd()),
+ scope.CompoundParent->getLocStart());
+ } else {
+ Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
+ Pass.TA.replaceStmt(*scope.End, "}");
+ Pass.TA.increaseIndentation(scope.getIndentedRange(),
+ scope.CompoundParent->getLocStart());
+ }
+ }
+
+ // Remove rest of pool var references.
+ for (llvm::SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ for (llvm::SmallVectorImpl<ObjCMessageExpr *>::iterator
+ relI = scope.Releases.begin(),
+ relE = scope.Releases.end(); relI != relE; ++relI) {
+ clearUnavailableDiags(*relI);
+ Pass.TA.removeStmt(*relI);
+ }
+ }
+ }
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ llvm::SmallVector<PoolScope, 4> Scopes;
+
+ for (Stmt::child_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ Stmt *child = getEssential(*I);
+ if (DeclStmt *DclS = dyn_cast<DeclStmt>(child)) {
+ if (DclS->isSingleDecl()) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(DclS->getSingleDecl())) {
+ if (isNSAutoreleasePool(VD->getType())) {
+ PoolVarInfo &info = PoolVars[VD];
+ info.Dcl = DclS;
+ collectRefs(VD, S, info.Refs);
+ // Does this statement follow the pattern:
+ // NSAutoreleasePool * pool = [NSAutoreleasePool new];
+ if (isPoolCreation(VD->getInit())) {
+ Scopes.push_back(PoolScope());
+ Scopes.back().PoolVar = VD;
+ Scopes.back().CompoundParent = S;
+ Scopes.back().Begin = I;
+ }
+ }
+ }
+ }
+ } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(child)) {
+ if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(bop->getLHS())) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(dref->getDecl())) {
+ // Does this statement follow the pattern:
+ // pool = [NSAutoreleasePool new];
+ if (isNSAutoreleasePool(VD->getType()) &&
+ isPoolCreation(bop->getRHS())) {
+ Scopes.push_back(PoolScope());
+ Scopes.back().PoolVar = VD;
+ Scopes.back().CompoundParent = S;
+ Scopes.back().Begin = I;
+ }
+ }
+ }
+ }
+
+ if (Scopes.empty())
+ continue;
+
+ if (isPoolDrain(Scopes.back().PoolVar, child)) {
+ PoolScope &scope = Scopes.back();
+ scope.End = I;
+ handlePoolScope(scope, S);
+ Scopes.pop_back();
+ }
+ }
+ return true;
+ }
+
+private:
+ void clearUnavailableDiags(Stmt *S) {
+ if (S)
+ Pass.TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ S->getSourceRange());
+ }
+
+ struct PoolScope {
+ VarDecl *PoolVar;
+ CompoundStmt *CompoundParent;
+ Stmt::child_iterator Begin;
+ Stmt::child_iterator End;
+ bool IsFollowedBySimpleReturnStmt;
+ llvm::SmallVector<ObjCMessageExpr *, 4> Releases;
+
+ PoolScope() : PoolVar(0), CompoundParent(0), Begin(), End(),
+ IsFollowedBySimpleReturnStmt(false) { }
+
+ SourceRange getIndentedRange() const {
+ Stmt::child_iterator rangeS = Begin;
+ ++rangeS;
+ if (rangeS == End)
+ return SourceRange();
+ Stmt::child_iterator rangeE = Begin;
+ for (Stmt::child_iterator I = rangeS; I != End; ++I)
+ ++rangeE;
+ return SourceRange((*rangeS)->getLocStart(), (*rangeE)->getLocEnd());
+ }
+ };
+
+ class NameReferenceChecker : public RecursiveASTVisitor<NameReferenceChecker>{
+ ASTContext &Ctx;
+ SourceRange ScopeRange;
+ SourceLocation &referenceLoc, &declarationLoc;
+
+ public:
+ NameReferenceChecker(ASTContext &ctx, PoolScope &scope,
+ SourceLocation &referenceLoc,
+ SourceLocation &declarationLoc)
+ : Ctx(ctx), referenceLoc(referenceLoc),
+ declarationLoc(declarationLoc) {
+ ScopeRange = SourceRange((*scope.Begin)->getLocStart(),
+ (*scope.End)->getLocStart());
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ return checkRef(E->getLocation(), E->getDecl()->getLocation());
+ }
+
+ bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ return checkRef(E->getLocation(), E->getDecl()->getLocation());
+ }
+
+ bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ return checkRef(TL.getBeginLoc(), TL.getTypedefNameDecl()->getLocation());
+ }
+
+ bool VisitTagTypeLoc(TagTypeLoc TL) {
+ return checkRef(TL.getBeginLoc(), TL.getDecl()->getLocation());
+ }
+
+ private:
+ bool checkRef(SourceLocation refLoc, SourceLocation declLoc) {
+ if (isInScope(declLoc)) {
+ referenceLoc = refLoc;
+ declarationLoc = declLoc;
+ return false;
+ }
+ return true;
+ }
+
+ bool isInScope(SourceLocation loc) {
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isBeforeInTranslationUnit(loc, ScopeRange.getBegin()))
+ return false;
+ return SM.isBeforeInTranslationUnit(loc, ScopeRange.getEnd());
+ }
+ };
+
+ void handlePoolScope(PoolScope &scope, CompoundStmt *compoundS) {
+ // Check that all names declared inside the scope are not used
+ // outside the scope.
+ {
+ bool nameUsedOutsideScope = false;
+ SourceLocation referenceLoc, declarationLoc;
+ Stmt::child_iterator SI = scope.End, SE = compoundS->body_end();
+ ++SI;
+ // Check if the autoreleasepool scope is followed by a simple return
+ // statement, in which case we will include the return in the scope.
+ if (SI != SE)
+ if (ReturnStmt *retS = dyn_cast<ReturnStmt>(*SI))
+ if ((retS->getRetValue() == 0 ||
+ isa<DeclRefExpr>(retS->getRetValue()->IgnoreParenCasts())) &&
+ findLocationAfterSemi(retS->getLocEnd(), Pass.Ctx).isValid()) {
+ scope.IsFollowedBySimpleReturnStmt = true;
+ ++SI; // the return will be included in scope, don't check it.
+ }
+
+ for (; SI != SE; ++SI) {
+ nameUsedOutsideScope = !NameReferenceChecker(Pass.Ctx, scope,
+ referenceLoc,
+ declarationLoc).TraverseStmt(*SI);
+ if (nameUsedOutsideScope)
+ break;
+ }
+
+ // If not all references were cleared it means some variables/typenames/etc
+ // declared inside the pool scope are used outside of it.
+ // We won't try to rewrite the pool.
+ if (nameUsedOutsideScope) {
+ Pass.TA.reportError("a name is referenced outside the "
+ "NSAutoreleasePool scope that it was declared in", referenceLoc);
+ Pass.TA.reportNote("name declared here", declarationLoc);
+ Pass.TA.reportNote("intended @autoreleasepool scope begins here",
+ (*scope.Begin)->getLocStart());
+ Pass.TA.reportNote("intended @autoreleasepool scope ends here",
+ (*scope.End)->getLocStart());
+ return;
+ }
+ }
+
+ // Collect all releases of the pool; they will be removed.
+ {
+ ReleaseCollector releaseColl(scope.PoolVar, scope.Releases);
+ Stmt::child_iterator I = scope.Begin;
+ ++I;
+ for (; I != scope.End; ++I)
+ releaseColl.TraverseStmt(*I);
+ }
+
+ PoolVars[scope.PoolVar].Scopes.push_back(scope);
+ }
+
+ bool isPoolCreation(Expr *E) {
+ if (!E) return false;
+ E = getEssential(E);
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E);
+ if (!ME) return false;
+ if (ME->getMethodFamily() == OMF_new &&
+ ME->getReceiverKind() == ObjCMessageExpr::Class &&
+ isNSAutoreleasePool(ME->getReceiverInterface()))
+ return true;
+ if (ME->getReceiverKind() == ObjCMessageExpr::Instance &&
+ ME->getMethodFamily() == OMF_init) {
+ Expr *rec = getEssential(ME->getInstanceReceiver());
+ if (ObjCMessageExpr *recME = dyn_cast_or_null<ObjCMessageExpr>(rec)) {
+ if (recME->getMethodFamily() == OMF_alloc &&
+ recME->getReceiverKind() == ObjCMessageExpr::Class &&
+ isNSAutoreleasePool(recME->getReceiverInterface()))
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool isPoolDrain(VarDecl *poolVar, Stmt *S) {
+ if (!S) return false;
+ S = getEssential(S);
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+ if (!ME) return false;
+ if (ME->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Expr *rec = getEssential(ME->getInstanceReceiver());
+ if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(rec))
+ if (dref->getDecl() == poolVar)
+ return ME->getMethodFamily() == OMF_release ||
+ ME->getSelector() == DrainSel;
+ }
+
+ return false;
+ }
+
+ bool isNSAutoreleasePool(ObjCInterfaceDecl *IDecl) {
+ return IDecl && IDecl->getIdentifier() == PoolII;
+ }
+
+ bool isNSAutoreleasePool(QualType Ty) {
+ QualType pointee = Ty->getPointeeType();
+ if (pointee.isNull())
+ return false;
+ if (const ObjCInterfaceType *interT = pointee->getAs<ObjCInterfaceType>())
+ return isNSAutoreleasePool(interT->getDecl());
+ return false;
+ }
+
+ static Expr *getEssential(Expr *E) {
+ return cast<Expr>(getEssential((Stmt*)E));
+ }
+ static Stmt *getEssential(Stmt *S) {
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S))
+ S = EWC->getSubExpr();
+ if (Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParenCasts();
+ return S;
+ }
+
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ IdentifierInfo *PoolII;
+ Selector DrainSel;
+
+ struct PoolVarInfo {
+ DeclStmt *Dcl;
+ ExprSet Refs;
+ llvm::SmallVector<PoolScope, 2> Scopes;
+
+ PoolVarInfo() : Dcl(0) { }
+ };
+
+ std::map<VarDecl *, PoolVarInfo> PoolVars;
+};
+
+} // anonymous namespace
+
+void trans::rewriteAutoreleasePool(MigrationPass &pass) {
+ BodyTransform<AutoreleasePoolRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransBlockObjCVariable.cpp b/lib/ARCMigrate/TransBlockObjCVariable.cpp
new file mode 100644
index 000000000000..0e342b7a8f8c
--- /dev/null
+++ b/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -0,0 +1,143 @@
+//===--- TransBlockObjCVariable.cpp - Tranformations to ARC mode ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteBlockObjCVariable:
+//
+// Adding __block to an obj-c variable could be either because the the variable
+// is used for output storage or the user wanted to break a retain cycle.
+// This transformation checks whether a reference of the variable for the block
+// is actually needed (it is assigned to or its address is taken) or not.
+// If the reference is not needed it will assume __block was added to break a
+// cycle so it will remove '__block' and add __weak/__unsafe_unretained.
+// e.g
+//
+// __block Foo *x;
+// bar(^ { [x cake]; });
+// ---->
+// __weak Foo *x;
+// bar(^ { [x cake]; });
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class RootBlockObjCVarRewriter :
+ public RecursiveASTVisitor<RootBlockObjCVarRewriter> {
+ MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> CheckedVars;
+
+ class BlockVarChecker : public RecursiveASTVisitor<BlockVarChecker> {
+ VarDecl *Var;
+
+ typedef RecursiveASTVisitor<BlockVarChecker> base;
+ public:
+ BlockVarChecker(VarDecl *var) : Var(var) { }
+
+ bool TraverseImplicitCastExpr(ImplicitCastExpr *castE) {
+ if (BlockDeclRefExpr *
+ ref = dyn_cast<BlockDeclRefExpr>(castE->getSubExpr())) {
+ if (ref->getDecl() == Var) {
+ if (castE->getCastKind() == CK_LValueToRValue)
+ return true; // Using the value of the variable.
+ if (castE->getCastKind() == CK_NoOp && castE->isLValue() &&
+ Var->getASTContext().getLangOptions().CPlusPlus)
+ return true; // Binding to const C++ reference.
+ }
+ }
+
+ return base::TraverseImplicitCastExpr(castE);
+ }
+
+ bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ if (E->getDecl() == Var)
+ return false; // The reference of the variable, and not just its value,
+ // is needed.
+ return true;
+ }
+ };
+
+public:
+ RootBlockObjCVarRewriter(MigrationPass &pass) : Pass(pass) { }
+
+ bool VisitBlockDecl(BlockDecl *block) {
+ llvm::SmallVector<VarDecl *, 4> BlockVars;
+
+ for (BlockDecl::capture_iterator
+ I = block->capture_begin(), E = block->capture_end(); I != E; ++I) {
+ VarDecl *var = I->getVariable();
+ if (I->isByRef() &&
+ !isAlreadyChecked(var) &&
+ var->getType()->isObjCObjectPointerType() &&
+ isImplicitStrong(var->getType())) {
+ BlockVars.push_back(var);
+ }
+ }
+
+ for (unsigned i = 0, e = BlockVars.size(); i != e; ++i) {
+ VarDecl *var = BlockVars[i];
+ CheckedVars.insert(var);
+
+ BlockVarChecker checker(var);
+ bool onlyValueOfVarIsNeeded = checker.TraverseStmt(block->getBody());
+ if (onlyValueOfVarIsNeeded) {
+ BlocksAttr *attr = var->getAttr<BlocksAttr>();
+ if(!attr)
+ continue;
+ bool useWeak = canApplyWeak(Pass.Ctx, var->getType());
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ Transaction Trans(Pass.TA);
+ Pass.TA.replaceText(SM.getInstantiationLoc(attr->getLocation()),
+ "__block",
+ useWeak ? "__weak" : "__unsafe_unretained");
+ }
+
+ }
+
+ return true;
+ }
+
+private:
+ bool isAlreadyChecked(VarDecl *VD) {
+ return CheckedVars.count(VD);
+ }
+
+ bool isImplicitStrong(QualType ty) {
+ if (isa<AttributedType>(ty.getTypePtr()))
+ return false;
+ return ty.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong;
+ }
+};
+
+class BlockObjCVarRewriter : public RecursiveASTVisitor<BlockObjCVarRewriter> {
+ MigrationPass &Pass;
+
+public:
+ BlockObjCVarRewriter(MigrationPass &pass) : Pass(pass) { }
+
+ bool TraverseBlockDecl(BlockDecl *block) {
+ RootBlockObjCVarRewriter(Pass).TraverseDecl(block);
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::rewriteBlockObjCVariable(MigrationPass &pass) {
+ BlockObjCVarRewriter trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
new file mode 100644
index 000000000000..d0bc332ff160
--- /dev/null
+++ b/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -0,0 +1,211 @@
+//===--- TransEmptyStatements.cpp - Tranformations to ARC mode ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeEmptyStatementsAndDealloc:
+//
+// Removes empty statements that are leftovers from previous transformations.
+// e.g for
+//
+// [x retain];
+//
+// removeRetainReleaseDealloc will leave an empty ";" that removeEmptyStatements
+// will remove.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/StmtVisitor.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+/// \brief Returns true if the statement became empty due to previous
+/// transformations.
+class EmptyChecker : public StmtVisitor<EmptyChecker, bool> {
+ ASTContext &Ctx;
+ llvm::DenseSet<unsigned> &MacroLocs;
+
+public:
+ EmptyChecker(ASTContext &ctx, llvm::DenseSet<unsigned> &macroLocs)
+ : Ctx(ctx), MacroLocs(macroLocs) { }
+
+ bool VisitNullStmt(NullStmt *S) {
+ return isMacroLoc(S->getLeadingEmptyMacroLoc());
+ }
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ if (S->body_empty())
+ return false; // was already empty, not because of transformations.
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I)
+ if (!Visit(*I))
+ return false;
+ return true;
+ }
+ bool VisitIfStmt(IfStmt *S) {
+ if (S->getConditionVariable())
+ return false;
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getThen() || !Visit(S->getThen()))
+ return false;
+ if (S->getElse() && !Visit(S->getElse()))
+ return false;
+ return true;
+ }
+ bool VisitWhileStmt(WhileStmt *S) {
+ if (S->getConditionVariable())
+ return false;
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitDoStmt(DoStmt *S) {
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ Expr *Exp = S->getCollection();
+ if (!Exp)
+ return false;
+ if (hasSideEffects(Exp, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ if (!S->getSubStmt())
+ return false;
+ return Visit(S->getSubStmt());
+ }
+
+private:
+ bool isMacroLoc(SourceLocation loc) {
+ if (loc.isInvalid()) return false;
+ return MacroLocs.count(loc.getRawEncoding());
+ }
+};
+
+class EmptyStatementsRemover :
+ public RecursiveASTVisitor<EmptyStatementsRemover> {
+ MigrationPass &Pass;
+ llvm::DenseSet<unsigned> &MacroLocs;
+
+public:
+ EmptyStatementsRemover(MigrationPass &pass,
+ llvm::DenseSet<unsigned> &macroLocs)
+ : Pass(pass), MacroLocs(macroLocs) { }
+
+ bool TraverseStmtExpr(StmtExpr *E) {
+ CompoundStmt *S = E->getSubStmt();
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ if (I != E - 1)
+ check(*I);
+ TraverseStmt(*I);
+ }
+ return true;
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I)
+ check(*I);
+ return true;
+ }
+
+ bool isMacroLoc(SourceLocation loc) {
+ if (loc.isInvalid()) return false;
+ return MacroLocs.count(loc.getRawEncoding());
+ }
+
+ ASTContext &getContext() { return Pass.Ctx; }
+
+private:
+ void check(Stmt *S) {
+ if (!S) return;
+ if (EmptyChecker(Pass.Ctx, MacroLocs).Visit(S)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(S);
+ }
+ }
+};
+
+} // anonymous namespace
+
+static bool isBodyEmpty(CompoundStmt *body,
+ ASTContext &Ctx, llvm::DenseSet<unsigned> &MacroLocs) {
+ for (CompoundStmt::body_iterator
+ I = body->body_begin(), E = body->body_end(); I != E; ++I)
+ if (!EmptyChecker(Ctx, MacroLocs).Visit(*I))
+ return false;
+
+ return true;
+}
+
+static void removeDeallocMethod(MigrationPass &pass,
+ llvm::DenseSet<unsigned> &MacroLocs) {
+ ASTContext &Ctx = pass.Ctx;
+ TransformActions &TA = pass.TA;
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+
+ typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
+ impl_iterator;
+ for (impl_iterator I = impl_iterator(DC->decls_begin()),
+ E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ for (ObjCImplementationDecl::instmeth_iterator
+ MI = (*I)->instmeth_begin(),
+ ME = (*I)->instmeth_end(); MI != ME; ++MI) {
+ ObjCMethodDecl *MD = *MI;
+ if (MD->getMethodFamily() == OMF_dealloc) {
+ if (MD->hasBody() &&
+ isBodyEmpty(MD->getCompoundBody(), Ctx, MacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(MD->getSourceRange());
+ }
+ break;
+ }
+ }
+ }
+}
+
+void trans::removeEmptyStatementsAndDealloc(MigrationPass &pass) {
+ llvm::DenseSet<unsigned> MacroLocs;
+ for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i)
+ MacroLocs.insert(pass.ARCMTMacroLocs[i].getRawEncoding());
+
+ EmptyStatementsRemover(pass, MacroLocs)
+ .TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+
+ removeDeallocMethod(pass, MacroLocs);
+
+ for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i) {
+ Transaction Trans(pass.TA);
+ pass.TA.remove(pass.ARCMTMacroLocs[i]);
+ }
+}
diff --git a/lib/ARCMigrate/TransProperties.cpp b/lib/ARCMigrate/TransProperties.cpp
new file mode 100644
index 000000000000..872c95e1a444
--- /dev/null
+++ b/lib/ARCMigrate/TransProperties.cpp
@@ -0,0 +1,364 @@
+//===--- TransProperties.cpp - Tranformations to ARC mode -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteProperties:
+//
+// - Adds strong/weak/unsafe_unretained ownership specifier to properties that
+// are missing one.
+// - Migrates properties from (retain) to (strong) and (assign) to
+// (unsafe_unretained/weak).
+// - If a property is synthesized, adds the ownership specifier in the ivar
+// backing the property.
+//
+// @interface Foo : NSObject {
+// NSObject *x;
+// }
+// @property (assign) id x;
+// @end
+// ---->
+// @interface Foo : NSObject {
+// NSObject *__weak x;
+// }
+// @property (weak) id x;
+// @end
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class PropertiesRewriter {
+ MigrationPass &Pass;
+
+ struct PropData {
+ ObjCPropertyDecl *PropD;
+ ObjCIvarDecl *IvarD;
+ ObjCPropertyImplDecl *ImplD;
+
+ PropData(ObjCPropertyDecl *propD) : PropD(propD), IvarD(0), ImplD(0) { }
+ };
+
+ typedef llvm::SmallVector<PropData, 2> PropsTy;
+ typedef std::map<unsigned, PropsTy> AtPropDeclsTy;
+ AtPropDeclsTy AtProps;
+
+public:
+ PropertiesRewriter(MigrationPass &pass) : Pass(pass) { }
+
+ void doTransform(ObjCImplementationDecl *D) {
+ ObjCInterfaceDecl *iface = D->getClassInterface();
+ if (!iface)
+ return;
+
+ for (ObjCInterfaceDecl::prop_iterator
+ propI = iface->prop_begin(),
+ propE = iface->prop_end(); propI != propE; ++propI) {
+ if (propI->getAtLoc().isInvalid())
+ continue;
+ PropsTy &props = AtProps[propI->getAtLoc().getRawEncoding()];
+ props.push_back(*propI);
+ }
+
+ typedef DeclContext::specific_decl_iterator<ObjCPropertyImplDecl>
+ prop_impl_iterator;
+ for (prop_impl_iterator
+ I = prop_impl_iterator(D->decls_begin()),
+ E = prop_impl_iterator(D->decls_end()); I != E; ++I) {
+ ObjCPropertyImplDecl *implD = *I;
+ if (implD->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+ ObjCPropertyDecl *propD = implD->getPropertyDecl();
+ if (!propD || propD->isInvalidDecl())
+ continue;
+ ObjCIvarDecl *ivarD = implD->getPropertyIvarDecl();
+ if (!ivarD || ivarD->isInvalidDecl())
+ continue;
+ unsigned rawAtLoc = propD->getAtLoc().getRawEncoding();
+ AtPropDeclsTy::iterator findAtLoc = AtProps.find(rawAtLoc);
+ if (findAtLoc == AtProps.end())
+ continue;
+
+ PropsTy &props = findAtLoc->second;
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (I->PropD == propD) {
+ I->IvarD = ivarD;
+ I->ImplD = implD;
+ break;
+ }
+ }
+ }
+
+ for (AtPropDeclsTy::iterator
+ I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
+ PropsTy &props = I->second;
+ QualType ty = getPropertyType(props);
+ if (!ty->isObjCRetainableType())
+ continue;
+ if (hasIvarWithExplicitOwnership(props))
+ continue;
+
+ Transaction Trans(Pass.TA);
+ rewriteProperty(props, atLoc);
+ }
+ }
+
+private:
+ void rewriteProperty(PropsTy &props, SourceLocation atLoc) const {
+ ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+
+ if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_weak))
+ return;
+
+ if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) {
+ rewriteAttribute("retain", "strong", atLoc);
+ return;
+ }
+
+ if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign)
+ return rewriteAssign(props, atLoc);
+
+ return maybeAddWeakOrUnsafeUnretainedAttr(props, atLoc);
+ }
+
+ void rewriteAssign(PropsTy &props, SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props));
+
+ bool rewroteAttr = rewriteAttribute("assign",
+ canUseWeak ? "weak" : "unsafe_unretained",
+ atLoc);
+ if (!rewroteAttr)
+ canUseWeak = false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD))
+ Pass.TA.insert(I->IvarD->getLocation(),
+ canUseWeak ? "__weak " : "__unsafe_unretained ");
+ if (I->ImplD)
+ Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
+ I->ImplD->getLocation());
+ }
+ }
+
+ void maybeAddWeakOrUnsafeUnretainedAttr(PropsTy &props,
+ SourceLocation atLoc) const {
+ ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+ if ((propAttrs & ObjCPropertyDecl::OBJC_PR_readonly) &&
+ hasNoBackingIvars(props))
+ return;
+
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props));
+ bool addedAttr = addAttribute(canUseWeak ? "weak" : "unsafe_unretained",
+ atLoc);
+ if (!addedAttr)
+ canUseWeak = false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD))
+ Pass.TA.insert(I->IvarD->getLocation(),
+ canUseWeak ? "__weak " : "__unsafe_unretained ");
+ if (I->ImplD) {
+ Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
+ I->ImplD->getLocation());
+ Pass.TA.clearDiagnostic(
+ diag::err_arc_objc_property_default_assign_on_object,
+ I->ImplD->getLocation());
+ }
+ }
+ }
+
+ bool rewriteAttribute(llvm::StringRef fromAttr, llvm::StringRef toAttr,
+ SourceLocation atLoc) const {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ llvm::StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOptions(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (llvm::StringRef(tok.getRawIdentifierData(), tok.getLength())
+ != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::l_paren)) return false;
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren))
+ return false;
+
+ while (1) {
+ if (tok.isNot(tok::raw_identifier)) return false;
+ llvm::StringRef ident(tok.getRawIdentifierData(), tok.getLength());
+ if (ident == fromAttr) {
+ Pass.TA.replaceText(tok.getLocation(), fromAttr, toAttr);
+ return true;
+ }
+
+ do {
+ lexer.LexFromRawLexer(tok);
+ } while (tok.isNot(tok::comma) && tok.isNot(tok::r_paren));
+ if (tok.is(tok::r_paren))
+ break;
+ lexer.LexFromRawLexer(tok);
+ }
+
+ return false;
+ }
+
+ bool addAttribute(llvm::StringRef attr, SourceLocation atLoc) const {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ llvm::StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOptions(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (llvm::StringRef(tok.getRawIdentifierData(), tok.getLength())
+ != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+
+ if (tok.isNot(tok::l_paren)) {
+ Pass.TA.insert(tok.getLocation(), std::string("(") + attr.str() + ") ");
+ return true;
+ }
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren)) {
+ Pass.TA.insert(tok.getLocation(), attr);
+ return true;
+ }
+
+ if (tok.isNot(tok::raw_identifier)) return false;
+
+ Pass.TA.insert(tok.getLocation(), std::string(attr) + ", ");
+ return true;
+ }
+
+ bool hasIvarWithExplicitOwnership(PropsTy &props) const {
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (isa<AttributedType>(I->IvarD->getType()))
+ return true;
+ if (I->IvarD->getType().getLocalQualifiers().getObjCLifetime()
+ != Qualifiers::OCL_Strong)
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool hasNoBackingIvars(PropsTy &props) const {
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ if (I->IvarD)
+ return false;
+
+ return true;
+ }
+
+ bool isUserDeclared(ObjCIvarDecl *ivarD) const {
+ return ivarD && !ivarD->getSynthesize();
+ }
+
+ QualType getPropertyType(PropsTy &props) const {
+ assert(!props.empty());
+ QualType ty = props[0].PropD->getType();
+
+#ifndef NDEBUG
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ assert(ty == I->PropD->getType());
+#endif
+
+ return ty;
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind
+ getPropertyAttrs(PropsTy &props) const {
+ assert(!props.empty());
+ ObjCPropertyDecl::PropertyAttributeKind
+ attrs = props[0].PropD->getPropertyAttributesAsWritten();
+
+#ifndef NDEBUG
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ assert(attrs == I->PropD->getPropertyAttributesAsWritten());
+#endif
+
+ return attrs;
+ }
+};
+
+class ImplementationChecker :
+ public RecursiveASTVisitor<ImplementationChecker> {
+ MigrationPass &Pass;
+
+public:
+ ImplementationChecker(MigrationPass &pass) : Pass(pass) { }
+
+ bool TraverseObjCImplementationDecl(ObjCImplementationDecl *D) {
+ PropertiesRewriter(Pass).doTransform(D);
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::rewriteProperties(MigrationPass &pass) {
+ ImplementationChecker(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
new file mode 100644
index 000000000000..ed6ed0adfdf2
--- /dev/null
+++ b/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -0,0 +1,219 @@
+//===--- TransRetainReleaseDealloc.cpp - Tranformations to ARC mode -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeRetainReleaseDealloc:
+//
+// Removes retain/release/autorelease/dealloc messages.
+//
+// return [[foo retain] autorelease];
+// ---->
+// return foo;
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class RetainReleaseDeallocRemover :
+ public RecursiveASTVisitor<RetainReleaseDeallocRemover> {
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ ExprSet Removables;
+ llvm::OwningPtr<ParentMap> StmtMap;
+
+ Selector DelegateSel;
+
+public:
+ RetainReleaseDeallocRemover(MigrationPass &pass)
+ : Body(0), Pass(pass) {
+ DelegateSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("delegate"));
+ }
+
+ void transformBody(Stmt *body) {
+ Body = body;
+ collectRemovables(body, Removables);
+ StmtMap.reset(new ParentMap(body));
+ TraverseStmt(body);
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ switch (E->getMethodFamily()) {
+ default:
+ return true;
+ case OMF_autorelease:
+ if (isRemovable(E)) {
+ // An unused autorelease is badness. If we remove it the receiver
+ // will likely die immediately while previously it was kept alive
+ // by the autorelease pool. This is bad practice in general, leave it
+ // and emit an error to force the user to restructure his code.
+ Pass.TA.reportError("it is not safe to remove an unused 'autorelease' "
+ "message; its receiver may be destroyed immediately",
+ E->getLocStart(), E->getSourceRange());
+ return true;
+ }
+ // Pass through.
+ case OMF_retain:
+ case OMF_release:
+ if (E->getReceiverKind() == ObjCMessageExpr::Instance)
+ if (Expr *rec = E->getInstanceReceiver()) {
+ rec = rec->IgnoreParenImpCasts();
+ if (rec->getType().getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+ (E->getMethodFamily() != OMF_retain || isRemovable(E))) {
+ std::string err = "it is not safe to remove '";
+ err += E->getSelector().getAsString() + "' message on "
+ "an __unsafe_unretained type";
+ Pass.TA.reportError(err, rec->getLocStart());
+ return true;
+ }
+
+ if (isGlobalVar(rec) &&
+ (E->getMethodFamily() != OMF_retain || isRemovable(E))) {
+ std::string err = "it is not safe to remove '";
+ err += E->getSelector().getAsString() + "' message on "
+ "a global variable";
+ Pass.TA.reportError(err, rec->getLocStart());
+ return true;
+ }
+
+ if (E->getMethodFamily() == OMF_release && isDelegateMessage(rec)) {
+ Pass.TA.reportError("it is not safe to remove 'retain' "
+ "message on the result of a 'delegate' message; "
+ "the object that was passed to 'setDelegate:' may not be "
+ "properly retained", rec->getLocStart());
+ return true;
+ }
+ }
+ case OMF_dealloc:
+ break;
+ }
+
+ switch (E->getReceiverKind()) {
+ default:
+ return true;
+ case ObjCMessageExpr::SuperInstance: {
+ Transaction Trans(Pass.TA);
+ clearDiagnostics(E->getSuperLoc());
+ if (tryRemoving(E))
+ return true;
+ Pass.TA.replace(E->getSourceRange(), "self");
+ return true;
+ }
+ case ObjCMessageExpr::Instance:
+ break;
+ }
+
+ Expr *rec = E->getInstanceReceiver();
+ if (!rec) return true;
+
+ Transaction Trans(Pass.TA);
+ clearDiagnostics(rec->getExprLoc());
+
+ if (E->getMethodFamily() == OMF_release &&
+ isRemovable(E) && isInAtFinally(E)) {
+ // Change the -release to "receiver = nil" in a finally to avoid a leak
+ // when an exception is thrown.
+ Pass.TA.replace(E->getSourceRange(), rec->getSourceRange());
+ if (Pass.Ctx.Idents.get("nil").hasMacroDefinition())
+ Pass.TA.insertAfterToken(rec->getLocEnd(), " = nil");
+ else
+ Pass.TA.insertAfterToken(rec->getLocEnd(), " = 0");
+ return true;
+ }
+
+ if (!hasSideEffects(E, Pass.Ctx)) {
+ if (tryRemoving(E))
+ return true;
+ }
+ Pass.TA.replace(E->getSourceRange(), rec->getSourceRange());
+
+ return true;
+ }
+
+private:
+ void clearDiagnostics(SourceLocation loc) const {
+ Pass.TA.clearDiagnostic(diag::err_arc_illegal_explicit_message,
+ diag::err_unavailable,
+ diag::err_unavailable_message,
+ loc);
+ }
+
+ bool isDelegateMessage(Expr *E) const {
+ if (!E) return false;
+
+ E = E->IgnoreParenCasts();
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E))
+ return (ME->isInstanceMessage() && ME->getSelector() == DelegateSel);
+
+ if (ObjCPropertyRefExpr *propE = dyn_cast<ObjCPropertyRefExpr>(E))
+ return propE->getGetterSelector() == DelegateSel;
+
+ return false;
+ }
+
+ bool isInAtFinally(Expr *E) const {
+ assert(E);
+ Stmt *S = E;
+ while (S) {
+ if (isa<ObjCAtFinallyStmt>(S))
+ return true;
+ S = StmtMap->getParent(S);
+ }
+
+ return false;
+ }
+
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+
+ bool tryRemoving(Expr *E) const {
+ if (isRemovable(E)) {
+ Pass.TA.removeStmt(E);
+ return true;
+ }
+
+ Stmt *parent = StmtMap->getParent(E);
+
+ if (ImplicitCastExpr *castE = dyn_cast_or_null<ImplicitCastExpr>(parent))
+ return tryRemoving(castE);
+
+ if (ParenExpr *parenE = dyn_cast_or_null<ParenExpr>(parent))
+ return tryRemoving(parenE);
+
+ if (BinaryOperator *
+ bopE = dyn_cast_or_null<BinaryOperator>(parent)) {
+ if (bopE->getOpcode() == BO_Comma && bopE->getLHS() == E &&
+ isRemovable(bopE)) {
+ Pass.TA.replace(bopE->getSourceRange(), bopE->getRHS()->getSourceRange());
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+};
+
+} // anonymous namespace
+
+void trans::removeRetainReleaseDealloc(MigrationPass &pass) {
+ BodyTransform<RetainReleaseDeallocRemover> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransUnbridgedCasts.cpp b/lib/ARCMigrate/TransUnbridgedCasts.cpp
new file mode 100644
index 000000000000..1cacd6d84e6d
--- /dev/null
+++ b/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -0,0 +1,203 @@
+//===--- TransUnbridgedCasts.cpp - Tranformations to ARC mode -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteUnbridgedCasts:
+//
+// A cast of non-objc pointer to an objc one is checked. If the non-objc pointer
+// is from a file-level variable, __bridge cast is used to convert it.
+// For the result of a function call that we know is +1/+0,
+// __bridge/__bridge_transfer is used.
+//
+// NSString *str = (NSString *)kUTTypePlainText;
+// str = b ? kUTTypeRTF : kUTTypePlainText;
+// NSString *_uuidString = (NSString *)CFUUIDCreateString(kCFAllocatorDefault,
+// _uuid);
+// ---->
+// NSString *str = (__bridge NSString *)kUTTypePlainText;
+// str = (__bridge NSString *)(b ? kUTTypeRTF : kUTTypePlainText);
+// NSString *_uuidString = (__bridge_transfer NSString *)
+// CFUUIDCreateString(kCFAllocatorDefault, _uuid);
+//
+// For a C pointer to ObjC, for casting 'self', __bridge is used.
+//
+// CFStringRef str = (CFStringRef)self;
+// ---->
+// CFStringRef str = (__bridge CFStringRef)self;
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{
+ MigrationPass &Pass;
+ IdentifierInfo *SelfII;
+public:
+ UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass) {
+ SelfII = &Pass.Ctx.Idents.get("self");
+ }
+
+ bool VisitCastExpr(CastExpr *E) {
+ if (E->getCastKind() != CK_AnyPointerToObjCPointerCast
+ && E->getCastKind() != CK_BitCast)
+ return true;
+
+ QualType castType = E->getType();
+ Expr *castExpr = E->getSubExpr();
+ QualType castExprType = castExpr->getType();
+
+ if (castType->isObjCObjectPointerType() &&
+ castExprType->isObjCObjectPointerType())
+ return true;
+ if (!castType->isObjCObjectPointerType() &&
+ !castExprType->isObjCObjectPointerType())
+ return true;
+
+ bool exprRetainable = castExprType->isObjCIndirectLifetimeType();
+ bool castRetainable = castType->isObjCIndirectLifetimeType();
+ if (exprRetainable == castRetainable) return true;
+
+ if (castExpr->isNullPointerConstant(Pass.Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ SourceLocation loc = castExpr->getExprLoc();
+ if (loc.isValid() && Pass.Ctx.getSourceManager().isInSystemHeader(loc))
+ return true;
+
+ if (castType->isObjCObjectPointerType())
+ transformNonObjCToObjCCast(E);
+ else
+ transformObjCToNonObjCCast(E);
+
+ return true;
+ }
+
+private:
+ void transformNonObjCToObjCCast(CastExpr *E) {
+ if (!E) return;
+
+ // Global vars are assumed that are cast as unretained.
+ if (isGlobalVar(E))
+ if (E->getSubExpr()->getType()->isPointerType()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+
+ // If the cast is directly over the result of a Core Foundation function
+ // try to figure out whether it should be cast as retained or unretained.
+ Expr *inner = E->IgnoreParenCasts();
+ if (CallExpr *callE = dyn_cast<CallExpr>(inner)) {
+ if (FunctionDecl *FD = callE->getDirectCallee()) {
+ if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/true);
+ return;
+ }
+ if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ if (FD->isGlobal() &&
+ FD->getIdentifier() &&
+ ento::cocoa::isRefType(E->getSubExpr()->getType(), "CF",
+ FD->getIdentifier()->getName())) {
+ StringRef fname = FD->getIdentifier()->getName();
+ if (fname.endswith("Retain") ||
+ fname.find("Create") != StringRef::npos ||
+ fname.find("Copy") != StringRef::npos) {
+ castToObjCObject(E, /*retained=*/true);
+ return;
+ }
+
+ if (fname.find("Get") != StringRef::npos) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ void castToObjCObject(CastExpr *E, bool retained) {
+ rewriteToBridgedCast(E, retained ? OBC_BridgeTransfer : OBC_Bridge);
+ }
+
+ void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind) {
+ TransformActions &TA = Pass.TA;
+
+ // We will remove the compiler diagnostic.
+ if (!TA.hasDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ E->getLocStart()))
+ return;
+
+ StringRef bridge;
+ switch(Kind) {
+ case OBC_Bridge:
+ bridge = "__bridge "; break;
+ case OBC_BridgeTransfer:
+ bridge = "__bridge_transfer "; break;
+ case OBC_BridgeRetained:
+ bridge = "__bridge_retained "; break;
+ }
+
+ Transaction Trans(TA);
+ TA.clearDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ E->getLocStart());
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) {
+ TA.insertAfterToken(CCE->getLParenLoc(), bridge);
+ } else {
+ SourceLocation insertLoc = E->getSubExpr()->getLocStart();
+ llvm::SmallString<128> newCast;
+ newCast += '(';
+ newCast += bridge;
+ newCast += E->getType().getAsString(Pass.Ctx.PrintingPolicy);
+ newCast += ')';
+
+ if (isa<ParenExpr>(E->getSubExpr())) {
+ TA.insert(insertLoc, newCast.str());
+ } else {
+ newCast += '(';
+ TA.insert(insertLoc, newCast.str());
+ TA.insertAfterToken(E->getLocEnd(), ")");
+ }
+ }
+ }
+
+ void transformObjCToNonObjCCast(CastExpr *E) {
+ if (isSelf(E->getSubExpr()))
+ return rewriteToBridgedCast(E, OBC_Bridge);
+ }
+
+ bool isSelf(Expr *E) {
+ E = E->IgnoreParenLValueCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (DRE->getDecl()->getIdentifier() == SelfII)
+ return true;
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+void trans::rewriteUnbridgedCasts(MigrationPass &pass) {
+ UnbridgedCastRewriter trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/lib/ARCMigrate/TransUnusedInitDelegate.cpp
new file mode 100644
index 000000000000..1019ab4ff1f6
--- /dev/null
+++ b/lib/ARCMigrate/TransUnusedInitDelegate.cpp
@@ -0,0 +1,74 @@
+//===--- TransUnusedInitDelegate.cpp - Tranformations to ARC mode ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Transformations:
+//===----------------------------------------------------------------------===//
+//
+// rewriteUnusedInitDelegate:
+//
+// Rewrites an unused result of calling a delegate initialization, to assigning
+// the result to self.
+// e.g
+// [self init];
+// ---->
+// self = [self init];
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class UnusedInitRewriter : public RecursiveASTVisitor<UnusedInitRewriter> {
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ ExprSet Removables;
+
+public:
+ UnusedInitRewriter(MigrationPass &pass)
+ : Body(0), Pass(pass) { }
+
+ void transformBody(Stmt *body) {
+ Body = body;
+ collectRemovables(body, Removables);
+ TraverseStmt(body);
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ if (ME->isDelegateInitCall() &&
+ isRemovable(ME) &&
+ Pass.TA.hasDiagnostic(diag::err_arc_unused_init_message,
+ ME->getExprLoc())) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.clearDiagnostic(diag::err_arc_unused_init_message,
+ ME->getExprLoc());
+ Pass.TA.insert(ME->getExprLoc(), "self = ");
+ }
+ return true;
+ }
+
+private:
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+};
+
+} // anonymous namespace
+
+void trans::rewriteUnusedInitDelegate(MigrationPass &pass) {
+ BodyTransform<UnusedInitRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
new file mode 100644
index 000000000000..07ccf70d4dfb
--- /dev/null
+++ b/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -0,0 +1,198 @@
+//===--- TransZeroOutPropsInDealloc.cpp - Tranformations to ARC mode ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeZeroOutPropsInDealloc:
+//
+// Removes zero'ing out "strong" @synthesized properties in a -dealloc method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+namespace {
+
+class ZeroOutInDeallocRemover :
+ public RecursiveASTVisitor<ZeroOutInDeallocRemover> {
+ typedef RecursiveASTVisitor<ZeroOutInDeallocRemover> base;
+
+ MigrationPass &Pass;
+
+ llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*> SynthesizedProperties;
+ ImplicitParamDecl *SelfD;
+ ExprSet Removables;
+
+public:
+ ZeroOutInDeallocRemover(MigrationPass &pass) : Pass(pass), SelfD(0) { }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ ASTContext &Ctx = Pass.Ctx;
+ TransformActions &TA = Pass.TA;
+
+ if (ME->getReceiverKind() != ObjCMessageExpr::Instance)
+ return true;
+ Expr *receiver = ME->getInstanceReceiver();
+ if (!receiver)
+ return true;
+
+ DeclRefExpr *refE = dyn_cast<DeclRefExpr>(receiver->IgnoreParenCasts());
+ if (!refE || refE->getDecl() != SelfD)
+ return true;
+
+ bool BackedBySynthesizeSetter = false;
+ for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator
+ P = SynthesizedProperties.begin(),
+ E = SynthesizedProperties.end(); P != E; ++P) {
+ ObjCPropertyDecl *PropDecl = P->first;
+ if (PropDecl->getSetterName() == ME->getSelector()) {
+ BackedBySynthesizeSetter = true;
+ break;
+ }
+ }
+ if (!BackedBySynthesizeSetter)
+ return true;
+
+ // Remove the setter message if RHS is null
+ Transaction Trans(TA);
+ Expr *RHS = ME->getArg(0);
+ bool RHSIsNull =
+ RHS->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull);
+ if (RHSIsNull && isRemovable(ME))
+ TA.removeStmt(ME);
+
+ return true;
+ }
+
+ bool VisitBinaryOperator(BinaryOperator *BOE) {
+ if (isZeroingPropIvar(BOE) && isRemovable(BOE)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(BOE);
+ }
+
+ return true;
+ }
+
+ bool TraverseObjCMethodDecl(ObjCMethodDecl *D) {
+ if (D->getMethodFamily() != OMF_dealloc)
+ return true;
+ if (!D->hasBody())
+ return true;
+
+ ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(D->getDeclContext());
+ if (!IMD)
+ return true;
+
+ SelfD = D->getSelfDecl();
+ collectRemovables(D->getBody(), Removables);
+
+ // For a 'dealloc' method use, find all property implementations in
+ // this class implementation.
+ for (ObjCImplDecl::propimpl_iterator
+ I = IMD->propimpl_begin(), EI = IMD->propimpl_end(); I != EI; ++I) {
+ ObjCPropertyImplDecl *PID = *I;
+ if (PID->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
+ if (!(setterM && setterM->isDefined())) {
+ ObjCPropertyDecl::PropertyAttributeKind AttrKind =
+ PD->getPropertyAttributes();
+ if (AttrKind &
+ (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_strong))
+ SynthesizedProperties[PD] = PID;
+ }
+ }
+ }
+
+ // Now, remove all zeroing of ivars etc.
+ base::TraverseObjCMethodDecl(D);
+
+ // clear out for next method.
+ SynthesizedProperties.clear();
+ SelfD = 0;
+ Removables.clear();
+ return true;
+ }
+
+ bool TraverseFunctionDecl(FunctionDecl *D) { return true; }
+ bool TraverseBlockDecl(BlockDecl *block) { return true; }
+ bool TraverseBlockExpr(BlockExpr *block) { return true; }
+
+private:
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+
+ bool isZeroingPropIvar(Expr *E) {
+ BinaryOperator *BOE = dyn_cast_or_null<BinaryOperator>(E);
+ if (!BOE) return false;
+
+ if (BOE->getOpcode() == BO_Comma)
+ return isZeroingPropIvar(BOE->getLHS()) &&
+ isZeroingPropIvar(BOE->getRHS());
+
+ if (BOE->getOpcode() != BO_Assign)
+ return false;
+
+ ASTContext &Ctx = Pass.Ctx;
+
+ Expr *LHS = BOE->getLHS();
+ if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(LHS)) {
+ ObjCIvarDecl *IVDecl = IV->getDecl();
+ if (!IVDecl->getType()->isObjCObjectPointerType())
+ return false;
+ bool IvarBacksPropertySynthesis = false;
+ for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator
+ P = SynthesizedProperties.begin(),
+ E = SynthesizedProperties.end(); P != E; ++P) {
+ ObjCPropertyImplDecl *PropImpDecl = P->second;
+ if (PropImpDecl && PropImpDecl->getPropertyIvarDecl() == IVDecl) {
+ IvarBacksPropertySynthesis = true;
+ break;
+ }
+ }
+ if (!IvarBacksPropertySynthesis)
+ return false;
+ }
+ else if (ObjCPropertyRefExpr *PropRefExp = dyn_cast<ObjCPropertyRefExpr>(LHS)) {
+ // TODO: Using implicit property decl.
+ if (PropRefExp->isImplicitProperty())
+ return false;
+ if (ObjCPropertyDecl *PDecl = PropRefExp->getExplicitProperty()) {
+ if (!SynthesizedProperties.count(PDecl))
+ return false;
+ }
+ }
+ else
+ return false;
+
+ Expr *RHS = BOE->getRHS();
+ bool RHSIsNull = RHS->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull);
+ if (RHSIsNull)
+ return true;
+
+ return isZeroingPropIvar(RHS);
+ }
+};
+
+} // anonymous namespace
+
+void trans::removeZeroOutPropsInDealloc(MigrationPass &pass) {
+ ZeroOutInDeallocRemover trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/ARCMigrate/TransformActions.cpp b/lib/ARCMigrate/TransformActions.cpp
new file mode 100644
index 000000000000..c99940b494b9
--- /dev/null
+++ b/lib/ARCMigrate/TransformActions.cpp
@@ -0,0 +1,699 @@
+//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/AST/Expr.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using llvm::StringRef;
+
+namespace {
+
+/// \brief Collects transformations and merges them before applying them with
+/// with applyRewrites(). E.g. if the same source range
+/// is requested to be removed twice, only one rewriter remove will be invoked.
+/// Rewrites happen in "transactions"; if one rewrite in the transaction cannot
+/// be done (e.g. it resides in a macro) all rewrites in the transaction are
+/// aborted.
+/// FIXME: "Transactional" rewrites support should be baked in the Rewriter.
+class TransformActionsImpl {
+ CapturedDiagList &CapturedDiags;
+ ASTContext &Ctx;
+ Preprocessor &PP;
+
+ bool IsInTransaction;
+
+ enum ActionKind {
+ Act_Insert, Act_InsertAfterToken,
+ Act_Remove, Act_RemoveStmt,
+ Act_Replace, Act_ReplaceText,
+ Act_IncreaseIndentation,
+ Act_ClearDiagnostic
+ };
+
+ struct ActionData {
+ ActionKind Kind;
+ SourceLocation Loc;
+ SourceRange R1, R2;
+ llvm::StringRef Text1, Text2;
+ Stmt *S;
+ llvm::SmallVector<unsigned, 2> DiagIDs;
+ };
+
+ std::vector<ActionData> CachedActions;
+
+ enum RangeComparison {
+ Range_Before,
+ Range_After,
+ Range_Contains,
+ Range_Contained,
+ Range_ExtendsBegin,
+ Range_ExtendsEnd
+ };
+
+ /// \brief A range to remove. It is a character range.
+ struct CharRange {
+ FullSourceLoc Begin, End;
+
+ CharRange(CharSourceRange range, SourceManager &srcMgr, Preprocessor &PP) {
+ SourceLocation beginLoc = range.getBegin(), endLoc = range.getEnd();
+ assert(beginLoc.isValid() && endLoc.isValid());
+ if (range.isTokenRange()) {
+ Begin = FullSourceLoc(srcMgr.getInstantiationLoc(beginLoc), srcMgr);
+ End = FullSourceLoc(getLocForEndOfToken(endLoc, srcMgr, PP), srcMgr);
+ } else {
+ Begin = FullSourceLoc(srcMgr.getInstantiationLoc(beginLoc), srcMgr);
+ End = FullSourceLoc(srcMgr.getInstantiationLoc(endLoc), srcMgr);
+ }
+ assert(Begin.isValid() && End.isValid());
+ }
+
+ RangeComparison compareWith(const CharRange &RHS) const {
+ if (End.isBeforeInTranslationUnitThan(RHS.Begin))
+ return Range_Before;
+ if (RHS.End.isBeforeInTranslationUnitThan(Begin))
+ return Range_After;
+ if (!Begin.isBeforeInTranslationUnitThan(RHS.Begin) &&
+ !RHS.End.isBeforeInTranslationUnitThan(End))
+ return Range_Contained;
+ if (Begin.isBeforeInTranslationUnitThan(RHS.Begin) &&
+ RHS.End.isBeforeInTranslationUnitThan(End))
+ return Range_Contains;
+ if (Begin.isBeforeInTranslationUnitThan(RHS.Begin))
+ return Range_ExtendsBegin;
+ else
+ return Range_ExtendsEnd;
+ }
+
+ static RangeComparison compare(SourceRange LHS, SourceRange RHS,
+ SourceManager &SrcMgr, Preprocessor &PP) {
+ return CharRange(CharSourceRange::getTokenRange(LHS), SrcMgr, PP)
+ .compareWith(CharRange(CharSourceRange::getTokenRange(RHS),
+ SrcMgr, PP));
+ }
+ };
+
+ typedef llvm::SmallVector<StringRef, 2> TextsVec;
+ typedef std::map<FullSourceLoc, TextsVec, FullSourceLoc::BeforeThanCompare>
+ InsertsMap;
+ InsertsMap Inserts;
+ /// \brief A list of ranges to remove. They are always sorted and they never
+ /// intersect with each other.
+ std::list<CharRange> Removals;
+
+ llvm::DenseSet<Stmt *> StmtRemovals;
+
+ std::vector<std::pair<CharRange, SourceLocation> > IndentationRanges;
+
+ /// \brief Keeps text passed to transformation methods.
+ llvm::StringMap<bool> UniqueText;
+
+public:
+ TransformActionsImpl(CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP)
+ : CapturedDiags(capturedDiags), Ctx(ctx), PP(PP), IsInTransaction(false) { }
+
+ void startTransaction();
+ bool commitTransaction();
+ void abortTransaction();
+
+ bool isInTransaction() const { return IsInTransaction; }
+
+ void insert(SourceLocation loc, llvm::StringRef text);
+ void insertAfterToken(SourceLocation loc, llvm::StringRef text);
+ void remove(SourceRange range);
+ void removeStmt(Stmt *S);
+ void replace(SourceRange range, llvm::StringRef text);
+ void replace(SourceRange range, SourceRange replacementRange);
+ void replaceStmt(Stmt *S, llvm::StringRef text);
+ void replaceText(SourceLocation loc, llvm::StringRef text,
+ llvm::StringRef replacementText);
+ void increaseIndentation(SourceRange range,
+ SourceLocation parentIndent);
+
+ bool clearDiagnostic(llvm::ArrayRef<unsigned> IDs, SourceRange range);
+
+ void applyRewrites(TransformActions::RewriteReceiver &receiver);
+
+private:
+ bool canInsert(SourceLocation loc);
+ bool canInsertAfterToken(SourceLocation loc);
+ bool canRemoveRange(SourceRange range);
+ bool canReplaceRange(SourceRange range, SourceRange replacementRange);
+ bool canReplaceText(SourceLocation loc, llvm::StringRef text);
+
+ void commitInsert(SourceLocation loc, StringRef text);
+ void commitInsertAfterToken(SourceLocation loc, StringRef text);
+ void commitRemove(SourceRange range);
+ void commitRemoveStmt(Stmt *S);
+ void commitReplace(SourceRange range, SourceRange replacementRange);
+ void commitReplaceText(SourceLocation loc, llvm::StringRef text,
+ llvm::StringRef replacementText);
+ void commitIncreaseIndentation(SourceRange range,SourceLocation parentIndent);
+ void commitClearDiagnostic(llvm::ArrayRef<unsigned> IDs, SourceRange range);
+
+ void addRemoval(CharSourceRange range);
+ void addInsertion(SourceLocation loc, StringRef text);
+
+ /// \brief Stores text passed to the transformation methods to keep the string
+ /// "alive". Since the vast majority of text will be the same, we also unique
+ /// the strings using a StringMap.
+ StringRef getUniqueText(StringRef text);
+
+ /// \brief Computes the source location just past the end of the token at
+ /// the given source location. If the location points at a macro, the whole
+ /// macro expansion is skipped.
+ static SourceLocation getLocForEndOfToken(SourceLocation loc,
+ SourceManager &SM,Preprocessor &PP);
+};
+
+} // anonymous namespace
+
+void TransformActionsImpl::startTransaction() {
+ assert(!IsInTransaction &&
+ "Cannot start a transaction in the middle of another one");
+ IsInTransaction = true;
+}
+
+bool TransformActionsImpl::commitTransaction() {
+ assert(IsInTransaction && "No transaction started");
+
+ if (CachedActions.empty()) {
+ IsInTransaction = false;
+ return false;
+ }
+
+ // Verify that all actions are possible otherwise abort the whole transaction.
+ bool AllActionsPossible = true;
+ for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) {
+ ActionData &act = CachedActions[i];
+ switch (act.Kind) {
+ case Act_Insert:
+ if (!canInsert(act.Loc))
+ AllActionsPossible = false;
+ break;
+ case Act_InsertAfterToken:
+ if (!canInsertAfterToken(act.Loc))
+ AllActionsPossible = false;
+ break;
+ case Act_Remove:
+ if (!canRemoveRange(act.R1))
+ AllActionsPossible = false;
+ break;
+ case Act_RemoveStmt:
+ assert(act.S);
+ if (!canRemoveRange(act.S->getSourceRange()))
+ AllActionsPossible = false;
+ break;
+ case Act_Replace:
+ if (!canReplaceRange(act.R1, act.R2))
+ AllActionsPossible = false;
+ break;
+ case Act_ReplaceText:
+ if (!canReplaceText(act.Loc, act.Text1))
+ AllActionsPossible = false;
+ break;
+ case Act_IncreaseIndentation:
+ // This is not important, we don't care if it will fail.
+ break;
+ case Act_ClearDiagnostic:
+ // We are just checking source rewrites.
+ break;
+ }
+ if (!AllActionsPossible)
+ break;
+ }
+
+ if (!AllActionsPossible) {
+ abortTransaction();
+ return true;
+ }
+
+ for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) {
+ ActionData &act = CachedActions[i];
+ switch (act.Kind) {
+ case Act_Insert:
+ commitInsert(act.Loc, act.Text1);
+ break;
+ case Act_InsertAfterToken:
+ commitInsertAfterToken(act.Loc, act.Text1);
+ break;
+ case Act_Remove:
+ commitRemove(act.R1);
+ break;
+ case Act_RemoveStmt:
+ commitRemoveStmt(act.S);
+ break;
+ case Act_Replace:
+ commitReplace(act.R1, act.R2);
+ break;
+ case Act_ReplaceText:
+ commitReplaceText(act.Loc, act.Text1, act.Text2);
+ break;
+ case Act_IncreaseIndentation:
+ commitIncreaseIndentation(act.R1, act.Loc);
+ break;
+ case Act_ClearDiagnostic:
+ commitClearDiagnostic(act.DiagIDs, act.R1);
+ break;
+ }
+ }
+
+ CachedActions.clear();
+ IsInTransaction = false;
+ return false;
+}
+
+void TransformActionsImpl::abortTransaction() {
+ assert(IsInTransaction && "No transaction started");
+ CachedActions.clear();
+ IsInTransaction = false;
+}
+
+void TransformActionsImpl::insert(SourceLocation loc, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ ActionData data;
+ data.Kind = Act_Insert;
+ data.Loc = loc;
+ data.Text1 = text;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::insertAfterToken(SourceLocation loc, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ ActionData data;
+ data.Kind = Act_InsertAfterToken;
+ data.Loc = loc;
+ data.Text1 = text;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::remove(SourceRange range) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_Remove;
+ data.R1 = range;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::removeStmt(Stmt *S) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_RemoveStmt;
+ data.S = S->IgnoreImplicit(); // important for uniquing
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replace(SourceRange range, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ remove(range);
+ insert(range.getBegin(), text);
+}
+
+void TransformActionsImpl::replace(SourceRange range,
+ SourceRange replacementRange) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_Replace;
+ data.R1 = range;
+ data.R2 = replacementRange;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ text = getUniqueText(text);
+ replacementText = getUniqueText(replacementText);
+ ActionData data;
+ data.Kind = Act_ReplaceText;
+ data.Loc = loc;
+ data.Text1 = text;
+ data.Text2 = replacementText;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replaceStmt(Stmt *S, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ insert(S->getLocStart(), text);
+ removeStmt(S);
+}
+
+void TransformActionsImpl::increaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ if (range.isInvalid()) return;
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_IncreaseIndentation;
+ data.R1 = range;
+ data.Loc = parentIndent;
+ CachedActions.push_back(data);
+}
+
+bool TransformActionsImpl::clearDiagnostic(llvm::ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ if (!CapturedDiags.hasDiagnostic(IDs, range))
+ return false;
+
+ ActionData data;
+ data.Kind = Act_ClearDiagnostic;
+ data.R1 = range;
+ data.DiagIDs.append(IDs.begin(), IDs.end());
+ CachedActions.push_back(data);
+ return true;
+}
+
+bool TransformActionsImpl::canInsert(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isInSystemHeader(SM.getInstantiationLoc(loc)))
+ return false;
+
+ if (loc.isFileID())
+ return true;
+ return PP.isAtStartOfMacroExpansion(loc);
+}
+
+bool TransformActionsImpl::canInsertAfterToken(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isInSystemHeader(SM.getInstantiationLoc(loc)))
+ return false;
+
+ if (loc.isFileID())
+ return true;
+ return PP.isAtEndOfMacroExpansion(loc);
+}
+
+bool TransformActionsImpl::canRemoveRange(SourceRange range) {
+ return canInsert(range.getBegin()) && canInsertAfterToken(range.getEnd());
+}
+
+bool TransformActionsImpl::canReplaceRange(SourceRange range,
+ SourceRange replacementRange) {
+ return canRemoveRange(range) && canRemoveRange(replacementRange);
+}
+
+bool TransformActionsImpl::canReplaceText(SourceLocation loc, StringRef text) {
+ if (!canInsert(loc))
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getInstantiationLoc(loc);
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ llvm::StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ return file.substr(locInfo.second).startswith(text);
+}
+
+void TransformActionsImpl::commitInsert(SourceLocation loc, StringRef text) {
+ addInsertion(loc, text);
+}
+
+void TransformActionsImpl::commitInsertAfterToken(SourceLocation loc,
+ StringRef text) {
+ addInsertion(getLocForEndOfToken(loc, Ctx.getSourceManager(), PP), text);
+}
+
+void TransformActionsImpl::commitRemove(SourceRange range) {
+ addRemoval(CharSourceRange::getTokenRange(range));
+}
+
+void TransformActionsImpl::commitRemoveStmt(Stmt *S) {
+ assert(S);
+ if (StmtRemovals.count(S))
+ return; // already removed.
+
+ if (Expr *E = dyn_cast<Expr>(S)) {
+ commitRemove(E->getSourceRange());
+ commitInsert(E->getSourceRange().getBegin(), getARCMTMacroName());
+ } else
+ commitRemove(S->getSourceRange());
+
+ StmtRemovals.insert(S);
+}
+
+void TransformActionsImpl::commitReplace(SourceRange range,
+ SourceRange replacementRange) {
+ RangeComparison comp = CharRange::compare(replacementRange, range,
+ Ctx.getSourceManager(), PP);
+ assert(comp == Range_Contained);
+ if (comp != Range_Contained)
+ return; // Although we asserted, be extra safe for release build.
+ if (range.getBegin() != replacementRange.getBegin())
+ addRemoval(CharSourceRange::getCharRange(range.getBegin(),
+ replacementRange.getBegin()));
+ if (replacementRange.getEnd() != range.getEnd())
+ addRemoval(CharSourceRange::getTokenRange(
+ getLocForEndOfToken(replacementRange.getEnd(),
+ Ctx.getSourceManager(), PP),
+ range.getEnd()));
+}
+void TransformActionsImpl::commitReplaceText(SourceLocation loc,
+ StringRef text,
+ StringRef replacementText) {
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getInstantiationLoc(loc);
+ // canReplaceText already checked if loc points at text.
+ SourceLocation afterText = loc.getFileLocWithOffset(text.size());
+
+ addRemoval(CharSourceRange::getCharRange(loc, afterText));
+ commitInsert(loc, replacementText);
+}
+
+void TransformActionsImpl::commitIncreaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ SourceManager &SM = Ctx.getSourceManager();
+ IndentationRanges.push_back(
+ std::make_pair(CharRange(CharSourceRange::getTokenRange(range),
+ SM, PP),
+ SM.getInstantiationLoc(parentIndent)));
+}
+
+void TransformActionsImpl::commitClearDiagnostic(llvm::ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ CapturedDiags.clearDiagnostic(IDs, range);
+}
+
+void TransformActionsImpl::addInsertion(SourceLocation loc, StringRef text) {
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getInstantiationLoc(loc);
+ for (std::list<CharRange>::reverse_iterator
+ I = Removals.rbegin(), E = Removals.rend(); I != E; ++I) {
+ if (!SM.isBeforeInTranslationUnit(loc, I->End))
+ break;
+ if (I->Begin.isBeforeInTranslationUnitThan(loc))
+ return;
+ }
+
+ Inserts[FullSourceLoc(loc, SM)].push_back(text);
+}
+
+void TransformActionsImpl::addRemoval(CharSourceRange range) {
+ CharRange newRange(range, Ctx.getSourceManager(), PP);
+ if (newRange.Begin == newRange.End)
+ return;
+
+ Inserts.erase(Inserts.upper_bound(newRange.Begin),
+ Inserts.lower_bound(newRange.End));
+
+ std::list<CharRange>::iterator I = Removals.end();
+ while (I != Removals.begin()) {
+ std::list<CharRange>::iterator RI = I;
+ --RI;
+ RangeComparison comp = newRange.compareWith(*RI);
+ switch (comp) {
+ case Range_Before:
+ --I;
+ break;
+ case Range_After:
+ Removals.insert(I, newRange);
+ return;
+ case Range_Contained:
+ return;
+ case Range_Contains:
+ RI->End = newRange.End;
+ case Range_ExtendsBegin:
+ newRange.End = RI->End;
+ Removals.erase(RI);
+ break;
+ case Range_ExtendsEnd:
+ RI->End = newRange.End;
+ return;
+ }
+ }
+
+ Removals.insert(Removals.begin(), newRange);
+}
+
+void TransformActionsImpl::applyRewrites(
+ TransformActions::RewriteReceiver &receiver) {
+ for (InsertsMap::iterator I = Inserts.begin(), E = Inserts.end(); I!=E; ++I) {
+ SourceLocation loc = I->first;
+ for (TextsVec::iterator
+ TI = I->second.begin(), TE = I->second.end(); TI != TE; ++TI) {
+ receiver.insert(loc, *TI);
+ }
+ }
+
+ for (std::vector<std::pair<CharRange, SourceLocation> >::iterator
+ I = IndentationRanges.begin(), E = IndentationRanges.end(); I!=E; ++I) {
+ CharSourceRange range = CharSourceRange::getCharRange(I->first.Begin,
+ I->first.End);
+ receiver.increaseIndentation(range, I->second);
+ }
+
+ for (std::list<CharRange>::iterator
+ I = Removals.begin(), E = Removals.end(); I != E; ++I) {
+ CharSourceRange range = CharSourceRange::getCharRange(I->Begin, I->End);
+ receiver.remove(range);
+ }
+}
+
+/// \brief Stores text passed to the transformation methods to keep the string
+/// "alive". Since the vast majority of text will be the same, we also unique
+/// the strings using a StringMap.
+StringRef TransformActionsImpl::getUniqueText(StringRef text) {
+ llvm::StringMapEntry<bool> &entry = UniqueText.GetOrCreateValue(text);
+ return entry.getKey();
+}
+
+/// \brief Computes the source location just past the end of the token at
+/// the given source location. If the location points at a macro, the whole
+/// macro expansion is skipped.
+SourceLocation TransformActionsImpl::getLocForEndOfToken(SourceLocation loc,
+ SourceManager &SM,
+ Preprocessor &PP) {
+ if (loc.isMacroID())
+ loc = SM.getInstantiationRange(loc).second;
+ return PP.getLocForEndOfToken(loc);
+}
+
+TransformActions::RewriteReceiver::~RewriteReceiver() { }
+
+TransformActions::TransformActions(Diagnostic &diag,
+ CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP)
+ : Diags(diag), CapturedDiags(capturedDiags) {
+ Impl = new TransformActionsImpl(capturedDiags, ctx, PP);
+}
+
+TransformActions::~TransformActions() {
+ delete static_cast<TransformActionsImpl*>(Impl);
+}
+
+void TransformActions::startTransaction() {
+ static_cast<TransformActionsImpl*>(Impl)->startTransaction();
+}
+
+bool TransformActions::commitTransaction() {
+ return static_cast<TransformActionsImpl*>(Impl)->commitTransaction();
+}
+
+void TransformActions::abortTransaction() {
+ static_cast<TransformActionsImpl*>(Impl)->abortTransaction();
+}
+
+
+void TransformActions::insert(SourceLocation loc, llvm::StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->insert(loc, text);
+}
+
+void TransformActions::insertAfterToken(SourceLocation loc,
+ llvm::StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->insertAfterToken(loc, text);
+}
+
+void TransformActions::remove(SourceRange range) {
+ static_cast<TransformActionsImpl*>(Impl)->remove(range);
+}
+
+void TransformActions::removeStmt(Stmt *S) {
+ static_cast<TransformActionsImpl*>(Impl)->removeStmt(S);
+}
+
+void TransformActions::replace(SourceRange range, llvm::StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->replace(range, text);
+}
+
+void TransformActions::replace(SourceRange range,
+ SourceRange replacementRange) {
+ static_cast<TransformActionsImpl*>(Impl)->replace(range, replacementRange);
+}
+
+void TransformActions::replaceStmt(Stmt *S, llvm::StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->replaceStmt(S, text);
+}
+
+void TransformActions::replaceText(SourceLocation loc, llvm::StringRef text,
+ llvm::StringRef replacementText) {
+ static_cast<TransformActionsImpl*>(Impl)->replaceText(loc, text,
+ replacementText);
+}
+
+void TransformActions::increaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ static_cast<TransformActionsImpl*>(Impl)->increaseIndentation(range,
+ parentIndent);
+}
+
+bool TransformActions::clearDiagnostic(llvm::ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ return static_cast<TransformActionsImpl*>(Impl)->clearDiagnostic(IDs, range);
+}
+
+void TransformActions::applyRewrites(RewriteReceiver &receiver) {
+ static_cast<TransformActionsImpl*>(Impl)->applyRewrites(receiver);
+}
+
+void TransformActions::reportError(llvm::StringRef error, SourceLocation loc,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+ "Errors should be emitted out of a transaction");
+ // FIXME: Use a custom category name to distinguish rewriter errors.
+ std::string rewriteErr = "[rewriter] ";
+ rewriteErr += error;
+ unsigned diagID
+ = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
+ rewriteErr);
+ Diags.Report(loc, diagID) << range;
+}
+
+void TransformActions::reportNote(llvm::StringRef note, SourceLocation loc,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+ "Errors should be emitted out of a transaction");
+ // FIXME: Use a custom category name to distinguish rewriter errors.
+ std::string rewriteNote = "[rewriter] ";
+ rewriteNote += note;
+ unsigned diagID
+ = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
+ rewriteNote);
+ Diags.Report(loc, diagID) << range;
+}
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
new file mode 100644
index 000000000000..7bd95e54bc27
--- /dev/null
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -0,0 +1,296 @@
+//===--- Tranforms.cpp - Tranformations to ARC mode -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+using llvm::StringRef;
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+/// \brief True if the class is one that does not support weak.
+static bool isClassInWeakBlacklist(ObjCInterfaceDecl *cls) {
+ if (!cls)
+ return false;
+
+ bool inList = llvm::StringSwitch<bool>(cls->getName())
+ .Case("NSColorSpace", true)
+ .Case("NSFont", true)
+ .Case("NSFontPanel", true)
+ .Case("NSImage", true)
+ .Case("NSLazyBrowserCell", true)
+ .Case("NSWindow", true)
+ .Case("NSWindowController", true)
+ .Case("NSMenuView", true)
+ .Case("NSPersistentUIWindowInfo", true)
+ .Case("NSTableCellView", true)
+ .Case("NSATSTypeSetter", true)
+ .Case("NSATSGlyphStorage", true)
+ .Case("NSLineFragmentRenderingContext", true)
+ .Case("NSAttributeDictionary", true)
+ .Case("NSParagraphStyle", true)
+ .Case("NSTextTab", true)
+ .Case("NSSimpleHorizontalTypesetter", true)
+ .Case("_NSCachedAttributedString", true)
+ .Case("NSStringDrawingTextStorage", true)
+ .Case("NSTextView", true)
+ .Case("NSSubTextStorage", true)
+ .Default(false);
+
+ if (inList)
+ return true;
+
+ return isClassInWeakBlacklist(cls->getSuperClass());
+}
+
+bool trans::canApplyWeak(ASTContext &Ctx, QualType type) {
+ if (!Ctx.getLangOptions().ObjCRuntimeHasWeak)
+ return false;
+
+ QualType T = type;
+ while (const PointerType *ptr = T->getAs<PointerType>())
+ T = ptr->getPointeeType();
+ if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
+ if (!Class || Class->getName() == "NSObject")
+ return false; // id/NSObject is not safe for weak.
+ if (Class->isForwardDecl())
+ return false; // forward classes are not verifiable, therefore not safe.
+ if (Class->isArcWeakrefUnavailable())
+ return false;
+ if (isClassInWeakBlacklist(Class))
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// immediately after the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation trans::findLocationAfterSemi(SourceLocation loc,
+ ASTContext &Ctx) {
+ SourceManager &SM = Ctx.getSourceManager();
+ if (loc.isMacroID()) {
+ if (!Lexer::isAtEndOfMacroExpansion(loc, SM, Ctx.getLangOptions()))
+ return SourceLocation();
+ loc = SM.getInstantiationRange(loc).second;
+ }
+ loc = Lexer::getLocForEndOfToken(loc, /*Offset=*/0, SM, Ctx.getLangOptions());
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ llvm::StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return SourceLocation();
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Ctx.getLangOptions(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::semi))
+ return SourceLocation();
+
+ return tok.getLocation().getFileLocWithOffset(1);
+}
+
+bool trans::hasSideEffects(Expr *E, ASTContext &Ctx) {
+ if (!E || !E->HasSideEffects(Ctx))
+ return false;
+
+ E = E->IgnoreParenCasts();
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E);
+ if (!ME)
+ return true;
+ switch (ME->getMethodFamily()) {
+ case OMF_autorelease:
+ case OMF_dealloc:
+ case OMF_release:
+ case OMF_retain:
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::SuperInstance:
+ return false;
+ case ObjCMessageExpr::Instance:
+ return hasSideEffects(ME->getInstanceReceiver(), Ctx);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool trans::isGlobalVar(Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl()->getDeclContext()->isFileContext();
+ if (ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(E))
+ return isGlobalVar(condOp->getTrueExpr()) &&
+ isGlobalVar(condOp->getFalseExpr());
+
+ return false;
+}
+
+namespace {
+
+class ReferenceClear : public RecursiveASTVisitor<ReferenceClear> {
+ ExprSet &Refs;
+public:
+ ReferenceClear(ExprSet &refs) : Refs(refs) { }
+ bool VisitDeclRefExpr(DeclRefExpr *E) { Refs.erase(E); return true; }
+ bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) { Refs.erase(E); return true; }
+};
+
+class ReferenceCollector : public RecursiveASTVisitor<ReferenceCollector> {
+ ValueDecl *Dcl;
+ ExprSet &Refs;
+
+public:
+ ReferenceCollector(ValueDecl *D, ExprSet &refs)
+ : Dcl(D), Refs(refs) { }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl() == Dcl)
+ Refs.insert(E);
+ return true;
+ }
+
+ bool VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ if (E->getDecl() == Dcl)
+ Refs.insert(E);
+ return true;
+ }
+};
+
+class RemovablesCollector : public RecursiveASTVisitor<RemovablesCollector> {
+ ExprSet &Removables;
+
+public:
+ RemovablesCollector(ExprSet &removables)
+ : Removables(removables) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseStmtExpr(StmtExpr *E) {
+ CompoundStmt *S = E->getSubStmt();
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ if (I != E - 1)
+ mark(*I);
+ TraverseStmt(*I);
+ }
+ return true;
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I)
+ mark(*I);
+ return true;
+ }
+
+ bool VisitIfStmt(IfStmt *S) {
+ mark(S->getThen());
+ mark(S->getElse());
+ return true;
+ }
+
+ bool VisitWhileStmt(WhileStmt *S) {
+ mark(S->getBody());
+ return true;
+ }
+
+ bool VisitDoStmt(DoStmt *S) {
+ mark(S->getBody());
+ return true;
+ }
+
+ bool VisitForStmt(ForStmt *S) {
+ mark(S->getInit());
+ mark(S->getInc());
+ mark(S->getBody());
+ return true;
+ }
+
+private:
+ void mark(Stmt *S) {
+ if (!S) return;
+
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(S))
+ S = Label->getSubStmt();
+ S = S->IgnoreImplicit();
+ if (Expr *E = dyn_cast<Expr>(S))
+ Removables.insert(E);
+ }
+};
+
+} // end anonymous namespace
+
+void trans::clearRefsIn(Stmt *S, ExprSet &refs) {
+ ReferenceClear(refs).TraverseStmt(S);
+}
+
+void trans::collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs) {
+ ReferenceCollector(D, refs).TraverseStmt(S);
+}
+
+void trans::collectRemovables(Stmt *S, ExprSet &exprs) {
+ RemovablesCollector(exprs).TraverseStmt(S);
+}
+
+//===----------------------------------------------------------------------===//
+// getAllTransformations.
+//===----------------------------------------------------------------------===//
+
+static void independentTransforms(MigrationPass &pass) {
+ rewriteAutoreleasePool(pass);
+ rewriteProperties(pass);
+ removeRetainReleaseDealloc(pass);
+ rewriteUnusedInitDelegate(pass);
+ removeZeroOutPropsInDealloc(pass);
+ makeAssignARCSafe(pass);
+ rewriteUnbridgedCasts(pass);
+ rewriteBlockObjCVariable(pass);
+}
+
+std::vector<TransformFn> arcmt::getAllTransformations() {
+ std::vector<TransformFn> transforms;
+
+ transforms.push_back(independentTransforms);
+ // This depends on previous transformations removing various expressions.
+ transforms.push_back(removeEmptyStatementsAndDealloc);
+
+ return transforms;
+}
diff --git a/lib/ARCMigrate/Transforms.h b/lib/ARCMigrate/Transforms.h
new file mode 100644
index 000000000000..b47d6d8e9b8b
--- /dev/null
+++ b/lib/ARCMigrate/Transforms.h
@@ -0,0 +1,92 @@
+//===-- Transforms.h - Tranformations to ARC mode ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
+#define LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
+
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseSet.h"
+
+namespace clang {
+ class Decl;
+ class Stmt;
+ class BlockDecl;
+ class ObjCMethodDecl;
+ class FunctionDecl;
+
+namespace arcmt {
+ class MigrationPass;
+
+namespace trans {
+
+//===----------------------------------------------------------------------===//
+// Transformations.
+//===----------------------------------------------------------------------===//
+
+void rewriteAutoreleasePool(MigrationPass &pass);
+void rewriteUnbridgedCasts(MigrationPass &pass);
+void makeAssignARCSafe(MigrationPass &pass);
+void removeRetainReleaseDealloc(MigrationPass &pass);
+void removeZeroOutPropsInDealloc(MigrationPass &pass);
+void rewriteProperties(MigrationPass &pass);
+void rewriteBlockObjCVariable(MigrationPass &pass);
+void rewriteUnusedInitDelegate(MigrationPass &pass);
+
+void removeEmptyStatementsAndDealloc(MigrationPass &pass);
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+/// \brief Determine whether we can add weak to the given type.
+bool canApplyWeak(ASTContext &Ctx, QualType type);
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// immediately after the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation findLocationAfterSemi(SourceLocation loc, ASTContext &Ctx);
+
+bool hasSideEffects(Expr *E, ASTContext &Ctx);
+bool isGlobalVar(Expr *E);
+
+
+template <typename BODY_TRANS>
+class BodyTransform : public RecursiveASTVisitor<BodyTransform<BODY_TRANS> > {
+ MigrationPass &Pass;
+
+public:
+ BodyTransform(MigrationPass &pass) : Pass(pass) { }
+
+ bool TraverseStmt(Stmt *rootS) {
+ BODY_TRANS(Pass).transformBody(rootS);
+ return true;
+ }
+};
+
+typedef llvm::DenseSet<Expr *> ExprSet;
+
+void clearRefsIn(Stmt *S, ExprSet &refs);
+template <typename iterator>
+void clearRefsIn(iterator begin, iterator end, ExprSet &refs) {
+ for (; begin != end; ++begin)
+ clearRefsIn(*begin, refs);
+}
+
+void collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs);
+
+void collectRemovables(Stmt *S, ExprSet &exprs);
+
+} // end namespace trans
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 9094abad2ef1..6eada6e22f4e 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -219,11 +219,12 @@ ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
FunctionProtoTypes(this_()),
TemplateSpecializationTypes(this_()),
DependentTemplateSpecializationTypes(this_()),
+ SubstTemplateTemplateParmPacks(this_()),
GlobalNestedNameSpecifier(0), IsInt128Installed(false),
CFConstantStringTypeDecl(0), NSConstantStringTypeDecl(0),
- ObjCFastEnumerationStateTypeDecl(0), FILEDecl(0), jmp_bufDecl(0),
- sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0),
- cudaConfigureCallDecl(0),
+ ObjCFastEnumerationStateTypeDecl(0), FILEDecl(0),
+ jmp_bufDecl(0), sigjmp_bufDecl(0), BlockDescriptorType(0),
+ BlockDescriptorExtendedType(0), cudaConfigureCallDecl(0),
NullTypeSourceInfo(QualType()),
SourceMgr(SM), LangOpts(LOpts), ABI(createCXXABI(t)),
AddrSpaceMap(getAddressSpaceMap(t, LOpts)), Target(t),
@@ -288,8 +289,8 @@ ASTContext::setExternalSource(llvm::OwningPtr<ExternalASTSource> &Source) {
}
void ASTContext::PrintStats() const {
- fprintf(stderr, "*** AST Context Stats:\n");
- fprintf(stderr, " %d types total.\n", (int)Types.size());
+ llvm::errs() << "\n*** AST Context Stats:\n";
+ llvm::errs() << " " << Types.size() << " types total.\n";
unsigned counts[] = {
#define TYPE(Name, Parent) 0,
@@ -307,40 +308,42 @@ void ASTContext::PrintStats() const {
unsigned TotalBytes = 0;
#define TYPE(Name, Parent) \
if (counts[Idx]) \
- fprintf(stderr, " %d %s types\n", (int)counts[Idx], #Name); \
+ llvm::errs() << " " << counts[Idx] << " " << #Name \
+ << " types\n"; \
TotalBytes += counts[Idx] * sizeof(Name##Type); \
++Idx;
#define ABSTRACT_TYPE(Name, Parent)
#include "clang/AST/TypeNodes.def"
- fprintf(stderr, "Total bytes = %d\n", int(TotalBytes));
-
+ llvm::errs() << "Total bytes = " << TotalBytes << "\n";
+
// Implicit special member functions.
- fprintf(stderr, " %u/%u implicit default constructors created\n",
- NumImplicitDefaultConstructorsDeclared,
- NumImplicitDefaultConstructors);
- fprintf(stderr, " %u/%u implicit copy constructors created\n",
- NumImplicitCopyConstructorsDeclared,
- NumImplicitCopyConstructors);
+ llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
+ << NumImplicitDefaultConstructors
+ << " implicit default constructors created\n";
+ llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
+ << NumImplicitCopyConstructors
+ << " implicit copy constructors created\n";
if (getLangOptions().CPlusPlus)
- fprintf(stderr, " %u/%u implicit move constructors created\n",
- NumImplicitMoveConstructorsDeclared,
- NumImplicitMoveConstructors);
- fprintf(stderr, " %u/%u implicit copy assignment operators created\n",
- NumImplicitCopyAssignmentOperatorsDeclared,
- NumImplicitCopyAssignmentOperators);
+ llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
+ << NumImplicitMoveConstructors
+ << " implicit move constructors created\n";
+ llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
+ << NumImplicitCopyAssignmentOperators
+ << " implicit copy assignment operators created\n";
if (getLangOptions().CPlusPlus)
- fprintf(stderr, " %u/%u implicit move assignment operators created\n",
- NumImplicitMoveAssignmentOperatorsDeclared,
- NumImplicitMoveAssignmentOperators);
- fprintf(stderr, " %u/%u implicit destructors created\n",
- NumImplicitDestructorsDeclared, NumImplicitDestructors);
-
+ llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
+ << NumImplicitMoveAssignmentOperators
+ << " implicit move assignment operators created\n";
+ llvm::errs() << NumImplicitDestructorsDeclared << "/"
+ << NumImplicitDestructors
+ << " implicit destructors created\n";
+
if (ExternalSource.get()) {
- fprintf(stderr, "\n");
+ llvm::errs() << "\n";
ExternalSource->PrintStats();
}
-
+
BumpAlloc.PrintStats();
}
@@ -753,7 +756,7 @@ ASTContext::getTypeInfo(const Type *T) const {
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
- assert(false && "Should not see dependent types");
+ llvm_unreachable("Should not see dependent types");
break;
case Type::FunctionNoProto:
@@ -1089,8 +1092,12 @@ void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
E = OI->ivar_end(); I != E; ++I)
Ivars.push_back(*I);
}
- else
- ShallowCollectObjCIvars(OI, Ivars);
+ else {
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
+ for (ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar())
+ Ivars.push_back(Iv);
+ }
}
/// CollectInheritedProtocols - Collect all protocols in current class and
@@ -1881,7 +1888,7 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
/// the specified element type and size. VectorType must be a built-in type.
QualType
ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
- assert(vecType->isBuiltinType());
+ assert(vecType->isBuiltinType() || vecType->isDependentType());
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
@@ -2040,10 +2047,13 @@ ASTContext::getFunctionType(QualType ResultTy,
assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
}
- // FunctionProtoType objects are allocated with extra bytes after them
- // for two variable size arrays (for parameter and exception types) at the
- // end of them. Instead of the exception types, there could be a noexcept
- // expression and a context pointer.
+ // FunctionProtoType objects are allocated with extra bytes after
+ // them for three variable size arrays at the end:
+ // - parameter types
+ // - exception types
+ // - consumed-arguments flags
+ // Instead of the exception types, there could be a noexcept
+ // expression.
size_t Size = sizeof(FunctionProtoType) +
NumArgs * sizeof(QualType);
if (EPI.ExceptionSpecType == EST_Dynamic)
@@ -2051,6 +2061,9 @@ ASTContext::getFunctionType(QualType ResultTy,
else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
Size += sizeof(Expr*);
}
+ if (EPI.ConsumedArguments)
+ Size += NumArgs * sizeof(bool);
+
FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
FunctionProtoType::ExtProtoInfo newEPI = EPI;
newEPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallConv);
@@ -2791,7 +2804,12 @@ static QualType getDecltypeForExpr(const Expr *e, const ASTContext &Context) {
/// on canonical type's (which are always unique).
QualType ASTContext::getDecltypeType(Expr *e) const {
DecltypeType *dt;
- if (e->isTypeDependent()) {
+
+ // C++0x [temp.type]p2:
+ // If an expression e involves a template parameter, decltype(e) denotes a
+ // unique dependent type. Two such decltype-specifiers refer to the same
+ // type only if their expressions are equivalent (14.5.6.1).
+ if (e->isInstantiationDependent()) {
llvm::FoldingSetNodeID ID;
DependentDecltypeType::Profile(ID, *this, e);
@@ -2925,7 +2943,6 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const {
return CanQualType::CreateUnsafe(Result);
}
-
QualType ASTContext::getUnqualifiedArrayType(QualType type,
Qualifiers &quals) {
SplitQualType splitType = type.getSplitUnqualifiedType();
@@ -3027,11 +3044,21 @@ bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
DeclarationNameInfo
ASTContext::getNameForTemplate(TemplateName Name,
SourceLocation NameLoc) const {
- if (TemplateDecl *TD = Name.getAsTemplateDecl())
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template:
// DNInfo work in progress: CHECKME: what about DNLoc?
- return DeclarationNameInfo(TD->getDeclName(), NameLoc);
+ return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
+ NameLoc);
- if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
DeclarationName DName;
if (DTN->isIdentifier()) {
DName = DeclarationNames.getIdentifier(DTN->getIdentifier());
@@ -3046,36 +3073,64 @@ ASTContext::getNameForTemplate(TemplateName Name,
}
}
- OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
- assert(Storage);
- // DNInfo work in progress: CHECKME: what about DNLoc?
- return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return DeclarationNameInfo(subst->getParameter()->getDeclName(),
+ NameLoc);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
+ NameLoc);
+ }
+ }
+
+ llvm_unreachable("bad template name kind!");
}
TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
- if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template: {
+ TemplateDecl *Template = Name.getAsTemplateDecl();
if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(Template))
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
Template = getCanonicalTemplateTemplateParmDecl(TTP);
// The canonical template name is the canonical template declaration.
return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
}
- if (SubstTemplateTemplateParmPackStorage *SubstPack
- = Name.getAsSubstTemplateTemplateParmPack()) {
- TemplateTemplateParmDecl *CanonParam
- = getCanonicalTemplateTemplateParmDecl(SubstPack->getParameterPack());
- TemplateArgument CanonArgPack
- = getCanonicalTemplateArgument(SubstPack->getArgumentPack());
- return getSubstTemplateTemplateParmPack(CanonParam, CanonArgPack);
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("cannot canonicalize overloaded template");
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ assert(DTN && "Non-dependent template names must refer to template decls.");
+ return DTN->CanonicalTemplateName;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return getCanonicalTemplateName(subst->getReplacement());
}
-
- assert(!Name.getAsOverloadedTemplate());
- DependentTemplateName *DTN = Name.getAsDependentTemplateName();
- assert(DTN && "Non-dependent template names must refer to template decls.");
- return DTN->CanonicalTemplateName;
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *canonParameter
+ = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
+ TemplateArgument canonArgPack
+ = getCanonicalTemplateArgument(subst->getArgumentPack());
+ return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
+ }
+ }
+
+ llvm_unreachable("bad template name!");
}
bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
@@ -3260,6 +3315,31 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const {
VAT->getBracketsRange()));
}
+QualType ASTContext::getAdjustedParameterType(QualType T) {
+ // C99 6.7.5.3p7:
+ // A declaration of a parameter as "array of type" shall be
+ // adjusted to "qualified pointer to type", where the type
+ // qualifiers (if any) are those specified within the [ and ] of
+ // the array type derivation.
+ if (T->isArrayType())
+ return getArrayDecayedType(T);
+
+ // C99 6.7.5.3p8:
+ // A declaration of a parameter as "function returning type"
+ // shall be adjusted to "pointer to function returning type", as
+ // in 6.3.2.1.
+ if (T->isFunctionType())
+ return getPointerType(T);
+
+ return T;
+}
+
+QualType ASTContext::getSignatureParameterType(QualType T) {
+ T = getVariableArrayDecayedType(T);
+ T = getAdjustedParameterType(T);
+ return T.getUnqualifiedType();
+}
+
/// getArrayDecayedType - Return the properly qualified result of decaying the
/// specified array type to a pointer. This operation is non-trivial when
/// handling typedefs etc. The canonical type of "T" must be an array type,
@@ -3459,6 +3539,25 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
}
+/// \brief Recurses in pointer/array types until it finds an objc retainable
+/// type and returns its ownership.
+Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
+ while (!T.isNull()) {
+ if (T.getObjCLifetime() != Qualifiers::OCL_None)
+ return T.getObjCLifetime();
+ if (T->isArrayType())
+ T = getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return Qualifiers::OCL_None;
+}
+
/// getIntegerTypeOrder - Returns the highest ranked integer type:
/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
@@ -3725,11 +3824,7 @@ void ASTContext::setBlockDescriptorExtendedType(QualType T) {
}
bool ASTContext::BlockRequiresCopying(QualType Ty) const {
- if (Ty->isBlockPointerType())
- return true;
- if (isObjCNSObjectType(Ty))
- return true;
- if (Ty->isObjCObjectPointerType())
+ if (Ty->isObjCRetainableType())
return true;
if (getLangOptions().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -4180,17 +4275,7 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
if (!Ctx->getLangOptions().NeXTRuntime) {
const RecordDecl *RD = FD->getParent();
const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
- // FIXME: This same linear search is also used in ExprConstant - it might
- // be better if the FieldDecl stored its offset. We'd be increasing the
- // size of the object slightly, but saving some time every time it is used.
- unsigned i = 0;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- Field != FieldEnd; (void)++Field, ++i) {
- if (*Field == FD)
- break;
- }
- S += llvm::utostr(RL.getFieldOffset(i));
+ S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
if (T->isEnumeralType())
S += 'i';
else
@@ -4510,6 +4595,8 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
BE = CXXRec->bases_end(); BI != BE; ++BI) {
if (!BI->isVirtual()) {
CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
uint64_t offs = layout.getBaseClassOffsetInBits(base);
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, base));
@@ -4531,6 +4618,8 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
BI = CXXRec->vbases_begin(),
BE = CXXRec->vbases_end(); BI != BE; ++BI) {
CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
uint64_t offs = layout.getVBaseClassOffsetInBits(base);
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, base));
@@ -4594,8 +4683,8 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
// expands virtual bases each time one is encountered in the hierarchy,
// making the encoding type bigger than it really is.
getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false);
- if (!base->isEmpty())
- CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
+ assert(!base->isEmpty());
+ CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
} else {
FieldDecl *field = cast<FieldDecl>(dcl);
if (FD) {
@@ -4782,6 +4871,24 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
}
TemplateName
+ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
+ TemplateName replacement) const {
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
+
+ void *insertPos = 0;
+ SubstTemplateTemplateParmStorage *subst
+ = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
+
+ if (!subst) {
+ subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
+ SubstTemplateTemplateParms.InsertNode(subst, insertPos);
+ }
+
+ return TemplateName(subst);
+}
+
+TemplateName
ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
const TemplateArgument &ArgPack) const {
ASTContext &Self = const_cast<ASTContext &>(*this);
@@ -4793,7 +4900,7 @@ ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
= SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
if (!Subst) {
- Subst = new (*this) SubstTemplateTemplateParmPackStorage(Self, Param,
+ Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
ArgPack.pack_size(),
ArgPack.pack_begin());
SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
@@ -4826,20 +4933,6 @@ CanQualType ASTContext::getFromTargetType(unsigned Type) const {
// Type Predicates.
//===----------------------------------------------------------------------===//
-/// isObjCNSObjectType - Return true if this is an NSObject object using
-/// NSObject attribute on a c-style pointer type.
-/// FIXME - Make it work directly on types.
-/// FIXME: Move to Type.
-///
-bool ASTContext::isObjCNSObjectType(QualType Ty) const {
- if (const TypedefType *TDT = dyn_cast<TypedefType>(Ty)) {
- if (TypedefNameDecl *TD = TDT->getDecl())
- if (TD->getAttr<ObjCNSObjectAttr>())
- return true;
- }
- return false;
-}
-
/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
/// garbage collection attribute.
///
@@ -5348,6 +5441,10 @@ bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
}
+bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
+ return typesAreCompatible(LHS, RHS);
+}
+
bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
return !mergeTypes(LHS, RHS, true).isNull();
}
@@ -5452,6 +5549,9 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
return QualType();
+ if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
+ return QualType();
+
// It's noreturn if either type is.
// FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
@@ -5460,10 +5560,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (NoReturn != rbaseInfo.getNoReturn())
allRTypes = false;
- FunctionType::ExtInfo einfo(NoReturn,
- lbaseInfo.getHasRegParm(),
- lbaseInfo.getRegParm(),
- lbaseInfo.getCC());
+ FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
if (lproto && rproto) { // two C99 style function prototypes
assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
@@ -5584,7 +5681,8 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
// If any of these qualifiers are different, we have a type
// mismatch.
if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
- LQuals.getAddressSpace() != RQuals.getAddressSpace())
+ LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
+ LQuals.getObjCLifetime() != RQuals.getObjCLifetime())
return QualType();
// Exactly one GC qualifier difference is allowed: __strong is
@@ -6401,4 +6499,3 @@ size_t ASTContext::getSideTableAllocatedMemory() const {
bytes += InstantiatedFromUnnamedFieldDecl.getMemorySize();
return bytes;
}
-
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index 16d2f853606e..7c91b5cb7a00 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -129,7 +129,7 @@ break; \
/// \brief Convert the given type to a string suitable for printing as part of
/// a diagnostic.
///
-/// There are three main criteria when determining whether we should have an
+/// There are four main criteria when determining whether we should have an
/// a.k.a. clause when pretty-printing a type:
///
/// 1) Some types provide very minimal sugar that doesn't impede the
@@ -142,15 +142,44 @@ break; \
/// want to desugar these, even if we do produce an a.k.a. clause.
/// 3) Some types may have already been desugared previously in this diagnostic.
/// if this is the case, doing another "aka" would just be clutter.
+/// 4) Two different types within the same diagnostic have the same output
+/// string. In this case, force an a.k.a with the desugared type when
+/// doing so will provide additional information.
///
/// \param Context the context in which the type was allocated
/// \param Ty the type to print
+/// \param QualTypeVals pointer values to QualTypes which are used in the
+/// diagnostic message
static std::string
ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
const Diagnostic::ArgumentValue *PrevArgs,
- unsigned NumPrevArgs) {
+ unsigned NumPrevArgs,
+ llvm::SmallVectorImpl<intptr_t> &QualTypeVals) {
// FIXME: Playing with std::string is really slow.
+ bool ForceAKA = false;
+ QualType CanTy = Ty.getCanonicalType();
std::string S = Ty.getAsString(Context.PrintingPolicy);
+ std::string CanS = CanTy.getAsString(Context.PrintingPolicy);
+
+ for (llvm::SmallVectorImpl<intptr_t>::iterator I = QualTypeVals.begin(),
+ E = QualTypeVals.end(); I != E; ++I) {
+ QualType CompareTy =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(*I));
+ if (CompareTy == Ty)
+ continue; // Same types
+ QualType CompareCanTy = CompareTy.getCanonicalType();
+ if (CompareCanTy == CanTy)
+ continue; // Same canonical types
+ std::string CompareS = CompareTy.getAsString(Context.PrintingPolicy);
+ if (CompareS != S)
+ continue; // Original strings are different
+ std::string CompareCanS = CompareCanTy.getAsString(Context.PrintingPolicy);
+ if (CompareCanS == CanS)
+ continue; // No new info from canonical type
+
+ ForceAKA = true;
+ break;
+ }
// Check to see if we already desugared this type in this
// diagnostic. If so, don't do it again.
@@ -172,11 +201,15 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
if (!Repeated) {
bool ShouldAKA = false;
QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
- if (ShouldAKA) {
- S = "'" + S + "' (aka '";
- S += DesugaredTy.getAsString(Context.PrintingPolicy);
- S += "')";
- return S;
+ if (ShouldAKA || ForceAKA) {
+ if (DesugaredTy == Ty) {
+ DesugaredTy = Ty.getCanonicalType();
+ }
+ std::string akaStr = DesugaredTy.getAsString(Context.PrintingPolicy);
+ if (akaStr != S) {
+ S = "'" + S + "' (aka '" + akaStr + "')";
+ return S;
+ }
}
}
@@ -184,16 +217,18 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
return S;
}
-void clang::FormatASTNodeDiagnosticArgument(Diagnostic::ArgumentKind Kind,
- intptr_t Val,
- const char *Modifier,
- unsigned ModLen,
- const char *Argument,
- unsigned ArgLen,
- const Diagnostic::ArgumentValue *PrevArgs,
- unsigned NumPrevArgs,
- llvm::SmallVectorImpl<char> &Output,
- void *Cookie) {
+void clang::FormatASTNodeDiagnosticArgument(
+ Diagnostic::ArgumentKind Kind,
+ intptr_t Val,
+ const char *Modifier,
+ unsigned ModLen,
+ const char *Argument,
+ unsigned ArgLen,
+ const Diagnostic::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ llvm::SmallVectorImpl<char> &Output,
+ void *Cookie,
+ llvm::SmallVectorImpl<intptr_t> &QualTypeVals) {
ASTContext &Context = *static_cast<ASTContext*>(Cookie);
std::string S;
@@ -206,7 +241,8 @@ void clang::FormatASTNodeDiagnosticArgument(Diagnostic::ArgumentKind Kind,
"Invalid modifier for QualType argument");
QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val)));
- S = ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, NumPrevArgs);
+ S = ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, NumPrevArgs,
+ QualTypeVals);
NeedQuotes = false;
break;
}
@@ -257,7 +293,7 @@ void clang::FormatASTNodeDiagnosticArgument(Diagnostic::ArgumentKind Kind,
} else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
S = ConvertTypeToDiagnosticString(Context,
Context.getTypeDeclType(Type),
- PrevArgs, NumPrevArgs);
+ PrevArgs, NumPrevArgs, QualTypeVals);
} else {
// FIXME: Get these strings from some localized place
NamedDecl *ND = cast<NamedDecl>(DC);
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 100e604d1c46..f5e392f88d0b 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -4201,6 +4201,20 @@ TemplateName ASTImporter::Import(TemplateName From) {
return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
}
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = From.getAsSubstTemplateTemplateParm();
+ TemplateTemplateParmDecl *param
+ = cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
+ if (!param)
+ return TemplateName();
+
+ TemplateName replacement = Import(subst->getReplacement());
+ if (replacement.isNull()) return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParm(param, replacement);
+ }
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *SubstPack
@@ -4232,8 +4246,8 @@ SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
SourceManager &FromSM = FromContext.getSourceManager();
// For now, map everything down to its spelling location, so that we
- // don't have to import macro instantiations.
- // FIXME: Import macro instantiations!
+ // don't have to import macro expansions.
+ // FIXME: Import macro expansions!
FromLoc = FromSM.getSpellingLoc(FromLoc);
std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
SourceManager &ToSM = ToContext.getSourceManager();
@@ -4254,7 +4268,7 @@ FileID ASTImporter::Import(FileID FromID) {
SourceManager &FromSM = FromContext.getSourceManager();
SourceManager &ToSM = ToContext.getSourceManager();
const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID);
- assert(FromSLoc.isFile() && "Cannot handle macro instantiations yet");
+ assert(FromSLoc.isFile() && "Cannot handle macro expansions yet");
// Include location of this file.
SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 12357c07a794..4c323da7eee3 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -197,6 +197,16 @@ getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), F);
}
+static bool shouldConsiderTemplateLV(const FunctionDecl *fn,
+ const FunctionTemplateSpecializationInfo *spec) {
+ return !(spec->isExplicitSpecialization() &&
+ fn->hasAttr<VisibilityAttr>());
+}
+
+static bool shouldConsiderTemplateLV(const ClassTemplateSpecializationDecl *d) {
+ return !(d->isExplicitSpecialization() && d->hasAttr<VisibilityAttr>());
+}
+
static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
"Not a name having namespace scope");
@@ -231,6 +241,14 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
if (!FoundExtern)
return LinkageInfo::internal();
}
+ if (Var->getStorageClass() == SC_None) {
+ const VarDecl *PrevVar = Var->getPreviousDeclaration();
+ for (; PrevVar; PrevVar = PrevVar->getPreviousDeclaration())
+ if (PrevVar->getStorageClass() == SC_PrivateExtern)
+ break;
+ if (PrevVar)
+ return PrevVar->getLinkageAndVisibility();
+ }
} else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
// C++ [temp]p4:
// A non-member function template can have internal linkage; any
@@ -389,12 +407,16 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
Function->getType()->getLinkage() == UniqueExternalLinkage)
return LinkageInfo::uniqueExternal();
- if (FunctionTemplateSpecializationInfo *SpecInfo
+ // Consider LV from the template and the template arguments unless
+ // this is an explicit specialization with a visibility attribute.
+ if (FunctionTemplateSpecializationInfo *specInfo
= Function->getTemplateSpecializationInfo()) {
- LV.merge(getLVForDecl(SpecInfo->getTemplate(),
- F.onlyTemplateVisibility()));
- const TemplateArgumentList &TemplateArgs = *SpecInfo->TemplateArguments;
- LV.merge(getLVForTemplateArgumentList(TemplateArgs, F));
+ if (shouldConsiderTemplateLV(Function, specInfo)) {
+ LV.merge(getLVForDecl(specInfo->getTemplate(),
+ F.onlyTemplateVisibility()));
+ const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
+ LV.merge(getLVForTemplateArgumentList(templateArgs, F));
+ }
}
// - a named class (Clause 9), or an unnamed class defined in a
@@ -410,15 +432,17 @@ static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
// If this is a class template specialization, consider the
// linkage of the template and template arguments.
- if (const ClassTemplateSpecializationDecl *Spec
+ if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
- // From the template.
- LV.merge(getLVForDecl(Spec->getSpecializedTemplate(),
- F.onlyTemplateVisibility()));
-
- // The arguments at which the template was instantiated.
- const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
- LV.merge(getLVForTemplateArgumentList(TemplateArgs, F));
+ if (shouldConsiderTemplateLV(spec)) {
+ // From the template.
+ LV.merge(getLVForDecl(spec->getSpecializedTemplate(),
+ F.onlyTemplateVisibility()));
+
+ // The arguments at which the template was instantiated.
+ const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
+ LV.merge(getLVForTemplateArgumentList(TemplateArgs, F));
+ }
}
// Consider -fvisibility unless the type has C linkage.
@@ -519,14 +543,16 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
// If this is a method template specialization, use the linkage for
// the template parameters and arguments.
- if (FunctionTemplateSpecializationInfo *Spec
+ if (FunctionTemplateSpecializationInfo *spec
= MD->getTemplateSpecializationInfo()) {
- LV.merge(getLVForTemplateArgumentList(*Spec->TemplateArguments, F));
- if (F.ConsiderTemplateParameterTypes)
- LV.merge(getLVForTemplateParameterList(
- Spec->getTemplate()->getTemplateParameters()));
+ if (shouldConsiderTemplateLV(MD, spec)) {
+ LV.merge(getLVForTemplateArgumentList(*spec->TemplateArguments, F));
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(
+ spec->getTemplate()->getTemplateParameters()));
+ }
- TSK = Spec->getTemplateSpecializationKind();
+ TSK = spec->getTemplateSpecializationKind();
} else if (MemberSpecializationInfo *MSI =
MD->getMemberSpecializationInfo()) {
TSK = MSI->getTemplateSpecializationKind();
@@ -553,14 +579,16 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
// *do* apply -fvisibility to method declarations.
} else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- if (const ClassTemplateSpecializationDecl *Spec
+ if (const ClassTemplateSpecializationDecl *spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
- // Merge template argument/parameter information for member
- // class template specializations.
- LV.merge(getLVForTemplateArgumentList(Spec->getTemplateArgs(), F));
+ if (shouldConsiderTemplateLV(spec)) {
+ // Merge template argument/parameter information for member
+ // class template specializations.
+ LV.merge(getLVForTemplateArgumentList(spec->getTemplateArgs(), F));
if (F.ConsiderTemplateParameterTypes)
LV.merge(getLVForTemplateParameterList(
- Spec->getSpecializedTemplate()->getTemplateParameters()));
+ spec->getSpecializedTemplate()->getTemplateParameters()));
+ }
}
// Static data members.
@@ -1304,6 +1332,19 @@ void VarDecl::setInit(Expr *I) {
Init = I;
}
+bool VarDecl::extendsLifetimeOfTemporary() const {
+ assert(getType()->isReferenceType() &&"Non-references never extend lifetime");
+
+ const Expr *E = getInit();
+ if (!E)
+ return false;
+
+ if (const ExprWithCleanups *Cleanups = dyn_cast<ExprWithCleanups>(E))
+ E = Cleanups->getSubExpr();
+
+ return isa<MaterializeTemporaryExpr>(E);
+}
+
VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return cast<VarDecl>(MSI->getInstantiatedFrom());
@@ -2320,8 +2361,15 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource::Deserializing TheFields(Source);
llvm::SmallVector<Decl*, 64> Decls;
- if (Source->FindExternalLexicalDeclsBy<FieldDecl>(this, Decls))
+ LoadedFieldsFromExternalStorage = true;
+ switch (Source->FindExternalLexicalDeclsBy<FieldDecl>(this, Decls)) {
+ case ELR_Success:
+ break;
+
+ case ELR_AlreadyLoaded:
+ case ELR_Failure:
return;
+ }
#ifndef NDEBUG
// Check that all decls we got were FieldDecls.
@@ -2329,8 +2377,6 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
assert(isa<FieldDecl>(Decls[i]));
#endif
- LoadedFieldsFromExternalStorage = true;
-
if (Decls.empty())
return;
@@ -2376,6 +2422,16 @@ void BlockDecl::setCaptures(ASTContext &Context,
Captures = static_cast<Capture*>(buffer);
}
+bool BlockDecl::capturesVariable(const VarDecl *variable) const {
+ for (capture_const_iterator
+ i = capture_begin(), e = capture_end(); i != e; ++i)
+ // Only auto vars can be captured, so no redeclaration worries.
+ if (i->getVariable() == variable)
+ return true;
+
+ return false;
+}
+
SourceRange BlockDecl::getSourceRange() const {
return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 1766d39c1405..b2806f092cbd 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -29,7 +29,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <cstdio>
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -76,26 +75,27 @@ bool Decl::CollectingStats(bool Enable) {
}
void Decl::PrintStats() {
- fprintf(stderr, "*** Decl Stats:\n");
+ llvm::errs() << "\n*** Decl Stats:\n";
int totalDecls = 0;
#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
- fprintf(stderr, " %d decls total.\n", totalDecls);
+ llvm::errs() << " " << totalDecls << " decls total.\n";
int totalBytes = 0;
#define DECL(DERIVED, BASE) \
if (n##DERIVED##s > 0) { \
totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
- fprintf(stderr, " %d " #DERIVED " decls, %d each (%d bytes)\n", \
- n##DERIVED##s, (int)sizeof(DERIVED##Decl), \
- (int)(n##DERIVED##s * sizeof(DERIVED##Decl))); \
+ llvm::errs() << " " << n##DERIVED##s << " " #DERIVED " decls, " \
+ << sizeof(DERIVED##Decl) << " each (" \
+ << n##DERIVED##s * sizeof(DERIVED##Decl) \
+ << " bytes)\n"; \
}
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
- fprintf(stderr, "Total bytes = %d\n", totalBytes);
+ llvm::errs() << "Total bytes = " << totalBytes << "\n";
}
void Decl::add(Kind k) {
@@ -641,12 +641,8 @@ DeclContext *Decl::getNonClosureContext() {
// This is basically "while (DC->isClosure()) DC = DC->getParent();"
// except that it's significantly more efficient to cast to a known
// decl type and call getDeclContext() than to call getParent().
- do {
- if (isa<BlockDecl>(DC)) {
- DC = cast<BlockDecl>(DC)->getDeclContext();
- continue;
- }
- } while (false);
+ while (isa<BlockDecl>(DC))
+ DC = cast<BlockDecl>(DC)->getDeclContext();
assert(!DC->isClosure());
return DC;
@@ -843,12 +839,17 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
// Notify that we have a DeclContext that is initializing.
ExternalASTSource::Deserializing ADeclContext(Source);
+ // Load the external declarations, if any.
llvm::SmallVector<Decl*, 64> Decls;
- if (Source->FindExternalLexicalDecls(this, Decls))
- return;
-
- // There is no longer any lexical storage in this context
ExternalLexicalStorage = false;
+ switch (Source->FindExternalLexicalDecls(this, Decls)) {
+ case ELR_Success:
+ break;
+
+ case ELR_Failure:
+ case ELR_AlreadyLoaded:
+ return;
+ }
if (Decls.empty())
return;
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 08ac2a5be4d9..4b59bf37d74b 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -228,6 +228,11 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (!BaseClassDecl->hasTrivialDestructor())
data().HasTrivialDestructor = false;
+ // A class has an Objective-C object member if... or any of its bases
+ // has an Objective-C object member.
+ if (BaseClassDecl->hasObjectMember())
+ setHasObjectMember(true);
+
// Keep track of the presence of mutable fields.
if (BaseClassDecl->hasMutableFields())
data().HasMutableFields = true;
@@ -239,22 +244,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// Create base specifier for any direct or indirect virtual bases.
data().VBases = new (C) CXXBaseSpecifier[VBases.size()];
data().NumVBases = VBases.size();
- for (int I = 0, E = VBases.size(); I != E; ++I) {
- TypeSourceInfo *VBaseTypeInfo = VBases[I]->getTypeSourceInfo();
-
- // Skip dependent types; we can't do any checking on them now.
- if (VBaseTypeInfo->getType()->isDependentType())
- continue;
-
- CXXRecordDecl *VBaseClassDecl = cast<CXXRecordDecl>(
- VBaseTypeInfo->getType()->getAs<RecordType>()->getDecl());
-
- data().getVBases()[I] =
- CXXBaseSpecifier(VBaseClassDecl->getSourceRange(), true,
- VBaseClassDecl->getTagKind() == TTK_Class,
- VBases[I]->getAccessSpecifier(), VBaseTypeInfo,
- SourceLocation());
- }
+ for (int I = 0, E = VBases.size(); I != E; ++I)
+ data().getVBases()[I] = *VBases[I];
}
/// Callback function for CXXRecordDecl::forallBases that acknowledges
@@ -698,10 +689,23 @@ NotASpecialMember:;
// A POD struct is a class that is both a trivial class and a
// standard-layout class, and has no non-static data members of type
// non-POD struct, non-POD union (or array of such types).
+ //
+ // Automatic Reference Counting: the presence of a member of Objective-C pointer type
+ // that does not explicitly have no lifetime makes the class a non-POD.
+ // However, we delay setting PlainOldData to false in this case so that
+ // Sema has a chance to diagnostic causes where the same class will be
+ // non-POD with Automatic Reference Counting but a POD without Instant Objects.
+ // In this case, the class will become a non-POD class when we complete
+ // the definition.
ASTContext &Context = getASTContext();
QualType T = Context.getBaseElementType(Field->getType());
- if (!T->isPODType())
+ if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
+ if (!Context.getLangOptions().ObjCAutoRefCount ||
+ T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone)
+ setHasObjectMember(true);
+ } else if (!T.isPODType(Context))
data().PlainOldData = false;
+
if (T->isReferenceType()) {
data().HasTrivialDefaultConstructor = false;
@@ -768,6 +772,8 @@ NotASpecialMember:;
if (!FieldRec->hasTrivialDestructor())
data().HasTrivialDestructor = false;
+ if (FieldRec->hasObjectMember())
+ setHasObjectMember(true);
// C++0x [class]p7:
// A standard-layout class is a class that:
@@ -1078,6 +1084,20 @@ void CXXRecordDecl::completeDefinition() {
void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
RecordDecl::completeDefinition();
+ if (hasObjectMember() && getASTContext().getLangOptions().ObjCAutoRefCount) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, copy assignment
+ // operator, and destructor are non-trivial.
+ struct DefinitionData &Data = data();
+ Data.PlainOldData = false;
+ Data.HasTrivialDefaultConstructor = false;
+ Data.HasTrivialCopyConstructor = false;
+ Data.HasTrivialCopyAssignment = false;
+ Data.HasTrivialDestructor = false;
+ }
+
// If the class may be abstract (but hasn't been marked as such), check for
// any pure final overriders.
if (mayBeAbstract()) {
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index e2c4f38ff9be..557b681d2fa6 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -452,6 +452,34 @@ ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
if (!isInstanceMethod())
family = OMF_None;
break;
+
+ case OMF_performSelector:
+ if (!isInstanceMethod() ||
+ !getResultType()->isObjCIdType())
+ family = OMF_None;
+ else {
+ unsigned noParams = param_size();
+ if (noParams < 1 || noParams > 3)
+ family = OMF_None;
+ else {
+ ObjCMethodDecl::arg_type_iterator it = arg_type_begin();
+ QualType ArgT = (*it);
+ if (!ArgT->isObjCSelType()) {
+ family = OMF_None;
+ break;
+ }
+ while (--noParams) {
+ it++;
+ ArgT = (*it);
+ if (!ArgT->isObjCIdType()) {
+ family = OMF_None;
+ break;
+ }
+ }
+ }
+ }
+ break;
+
}
// Cache the result.
@@ -474,8 +502,34 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
} else // we have a factory method.
selfTy = Context.getObjCClassType();
- setSelfDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
- &Context.Idents.get("self"), selfTy));
+ bool selfIsPseudoStrong = false;
+ bool selfIsConsumed = false;
+ if (isInstanceMethod() && Context.getLangOptions().ObjCAutoRefCount) {
+ selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
+
+ // 'self' is always __strong. It's actually pseudo-strong except
+ // in init methods, though.
+ Qualifiers qs;
+ qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ selfTy = Context.getQualifiedType(selfTy, qs);
+
+ // In addition, 'self' is const unless this is an init method.
+ if (getMethodFamily() != OMF_init) {
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+
+ ImplicitParamDecl *self
+ = ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("self"), selfTy);
+ setSelfDecl(self);
+
+ if (selfIsConsumed)
+ self->addAttr(new (Context) NSConsumedAttr(SourceLocation(), Context));
+
+ if (selfIsPseudoStrong)
+ self->setARCPseudoStrong(true);
setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
&Context.Idents.get("_cmd"),
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 421770ea70fb..19554a3baaea 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -933,6 +933,11 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
first = false;
}
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
+ Out << (first ? ' ' : ',') << "strong";
+ first = false;
+ }
+
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
Out << (first ? ' ' : ',') << "copy";
first = false;
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index cef54e97c93a..72c0e9da7f1d 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -533,6 +533,28 @@ bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
llvm_unreachable("All name kinds handled.");
}
+bool DeclarationNameInfo::isInstantiationDependent() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->isInstantiationDependentType();
+
+ return Name.getCXXNameType()->isInstantiationDependentType();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
std::string DeclarationNameInfo::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 987213907e45..4611ae369969 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -142,9 +142,10 @@ void ExplicitTemplateArgumentList::initializeFrom(
}
void ExplicitTemplateArgumentList::initializeFrom(
- const TemplateArgumentListInfo &Info,
- bool &Dependent,
- bool &ContainsUnexpandedParameterPack) {
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack) {
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
@@ -152,6 +153,8 @@ void ExplicitTemplateArgumentList::initializeFrom(
TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
for (unsigned i = 0; i != NumTemplateArgs; ++i) {
Dependent = Dependent || Info[i].getArgument().isDependent();
+ InstantiationDependent = InstantiationDependent ||
+ Info[i].getArgument().isInstantiationDependent();
ContainsUnexpandedParameterPack
= ContainsUnexpandedParameterPack ||
Info[i].getArgument().containsUnexpandedParameterPack();
@@ -178,14 +181,16 @@ std::size_t ExplicitTemplateArgumentList::sizeFor(
return sizeFor(Info.size());
}
-/// \brief Compute the type- and value-dependence of a declaration reference
+/// \brief Compute the type-, value-, and instantiation-dependence of a
+/// declaration reference
/// based on the declaration being referenced.
static void computeDeclRefDependence(NamedDecl *D, QualType T,
bool &TypeDependent,
- bool &ValueDependent) {
+ bool &ValueDependent,
+ bool &InstantiationDependent) {
TypeDependent = false;
ValueDependent = false;
-
+ InstantiationDependent = false;
// (TD) C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
@@ -200,20 +205,31 @@ static void computeDeclRefDependence(NamedDecl *D, QualType T,
if (T->isDependentType()) {
TypeDependent = true;
ValueDependent = true;
+ InstantiationDependent = true;
return;
+ } else if (T->isInstantiationDependentType()) {
+ InstantiationDependent = true;
}
// (TD) - a conversion-function-id that specifies a dependent type
if (D->getDeclName().getNameKind()
- == DeclarationName::CXXConversionFunctionName &&
- D->getDeclName().getCXXNameType()->isDependentType()) {
- TypeDependent = true;
- ValueDependent = true;
- return;
+ == DeclarationName::CXXConversionFunctionName) {
+ QualType T = D->getDeclName().getCXXNameType();
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ if (T->isInstantiationDependentType())
+ InstantiationDependent = true;
}
+
// (VD) - the name of a non-type template parameter,
if (isa<NonTypeTemplateParmDecl>(D)) {
ValueDependent = true;
+ InstantiationDependent = true;
return;
}
@@ -223,16 +239,20 @@ static void computeDeclRefDependence(NamedDecl *D, QualType T,
if (Var->getType()->isIntegralOrEnumerationType() &&
Var->getType().getCVRQualifiers() == Qualifiers::Const) {
if (const Expr *Init = Var->getAnyInitializer())
- if (Init->isValueDependent())
+ if (Init->isValueDependent()) {
ValueDependent = true;
+ InstantiationDependent = true;
+ }
}
// (VD) - FIXME: Missing from the standard:
// - a member function or a static data member of the current
// instantiation
else if (Var->isStaticDataMember() &&
- Var->getDeclContext()->isDependentContext())
+ Var->getDeclContext()->isDependentContext()) {
ValueDependent = true;
+ InstantiationDependent = true;
+ }
return;
}
@@ -242,6 +262,7 @@ static void computeDeclRefDependence(NamedDecl *D, QualType T,
// instantiation
if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
ValueDependent = true;
+ InstantiationDependent = true;
return;
}
}
@@ -249,7 +270,9 @@ static void computeDeclRefDependence(NamedDecl *D, QualType T,
void DeclRefExpr::computeDependence() {
bool TypeDependent = false;
bool ValueDependent = false;
- computeDeclRefDependence(getDecl(), getType(), TypeDependent, ValueDependent);
+ bool InstantiationDependent = false;
+ computeDeclRefDependence(getDecl(), getType(), TypeDependent, ValueDependent,
+ InstantiationDependent);
// (TD) C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
@@ -262,13 +285,16 @@ void DeclRefExpr::computeDependence() {
hasExplicitTemplateArgs() &&
TemplateSpecializationType::anyDependentTemplateArguments(
getTemplateArgs(),
- getNumTemplateArgs())) {
+ getNumTemplateArgs(),
+ InstantiationDependent)) {
TypeDependent = true;
ValueDependent = true;
+ InstantiationDependent = true;
}
ExprBits.TypeDependent = TypeDependent;
ExprBits.ValueDependent = ValueDependent;
+ ExprBits.InstantiationDependent = InstantiationDependent;
// Is the declaration a parameter pack?
if (getDecl()->isParameterPack())
@@ -280,7 +306,7 @@ DeclRefExpr::DeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK)
- : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false),
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
if (QualifierLoc)
@@ -289,9 +315,17 @@ DeclRefExpr::DeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
if (FoundD)
getInternalFoundDecl() = FoundD;
DeclRefExprBits.HasExplicitTemplateArgs = TemplateArgs ? 1 : 0;
- if (TemplateArgs)
- getExplicitTemplateArgs().initializeFrom(*TemplateArgs);
-
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ setInstantiationDependent(true);
+ }
+
computeDependence();
}
@@ -498,8 +532,8 @@ double FloatingLiteral::getValueAsApproximateDouble() const {
return V.convertToDouble();
}
-StringLiteral *StringLiteral::Create(ASTContext &C, const char *StrData,
- unsigned ByteLength, bool Wide,
+StringLiteral *StringLiteral::Create(ASTContext &C, llvm::StringRef Str,
+ bool Wide,
bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumStrs) {
@@ -511,10 +545,10 @@ StringLiteral *StringLiteral::Create(ASTContext &C, const char *StrData,
StringLiteral *SL = new (Mem) StringLiteral(Ty);
// OPTIMIZE: could allocate this appended to the StringLiteral.
- char *AStrData = new (C, 1) char[ByteLength];
- memcpy(AStrData, StrData, ByteLength);
+ char *AStrData = new (C, 1) char[Str.size()];
+ memcpy(AStrData, Str.data(), Str.size());
SL->StrData = AStrData;
- SL->ByteLength = ByteLength;
+ SL->ByteLength = Str.size();
SL->IsWide = Wide;
SL->IsPascal = Pascal;
SL->TokLocs[0] = Loc[0];
@@ -593,7 +627,7 @@ getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
// If the byte is in this token, return the location of the byte.
if (ByteNo < TokNumBytes ||
- (ByteNo == TokNumBytes && TokNo == getNumConcatenated())) {
+ (ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) {
unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
// Now that we know the offset of the token in the spelling, use the
@@ -670,6 +704,7 @@ CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
: Expr(SC, t, VK, OK_Ordinary,
fn->isTypeDependent(),
fn->isValueDependent(),
+ fn->isInstantiationDependent(),
fn->containsUnexpandedParameterPack()),
NumArgs(numargs) {
@@ -680,6 +715,8 @@ CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -695,6 +732,7 @@ CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
: Expr(CallExprClass, t, VK, OK_Ordinary,
fn->isTypeDependent(),
fn->isValueDependent(),
+ fn->isInstantiationDependent(),
fn->containsUnexpandedParameterPack()),
NumArgs(numargs) {
@@ -705,6 +743,8 @@ CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -862,6 +902,7 @@ OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
: Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
/*TypeDependent=*/false,
/*ValueDependent=*/tsi->getType()->isDependentType(),
+ tsi->getType()->isInstantiationDependentType(),
tsi->getType()->containsUnexpandedParameterPack()),
OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
NumComps(numComps), NumExprs(numExprs)
@@ -917,7 +958,12 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
E->setValueDependent(true);
E->setTypeDependent(true);
- }
+ E->setInstantiationDependent(true);
+ }
+ else if (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
+ E->setInstantiationDependent(true);
+
E->HasQualifierOrFoundDecl = true;
MemberNameQualifier *NQ = E->getMemberQualifier();
@@ -926,8 +972,15 @@ MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
}
if (targs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
E->HasExplicitTemplateArgumentList = true;
- E->getExplicitTemplateArgs().initializeFrom(*targs);
+ E->getExplicitTemplateArgs().initializeFrom(*targs, Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ E->setInstantiationDependent(true);
}
return E;
@@ -1045,6 +1098,12 @@ const char *CastExpr::getCastKindName() const {
return "IntegralComplexCast";
case CK_IntegralComplexToFloatingComplex:
return "IntegralComplexToFloatingComplex";
+ case CK_ObjCConsumeObject:
+ return "ObjCConsumeObject";
+ case CK_ObjCProduceObject:
+ return "ObjCProduceObject";
+ case CK_ObjCReclaimReturnedObject:
+ return "ObjCReclaimReturnedObject";
}
llvm_unreachable("Unhandled cast kind!");
@@ -1056,7 +1115,12 @@ Expr *CastExpr::getSubExprAsWritten() {
CastExpr *E = this;
do {
SubExpr = E->getSubExpr();
-
+
+ // Skip through reference binding to temporary.
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(SubExpr))
+ SubExpr = Materialize->GetTemporaryExpr();
+
// Skip any temporary bindings; they're implicit.
if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
SubExpr = Binder->getSubExpr();
@@ -1242,7 +1306,7 @@ InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
Expr **initExprs, unsigned numInits,
SourceLocation rbraceloc)
: Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
- false),
+ false, false),
InitExprs(C, numInits),
LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0),
HadArrayRangeDesignator(false)
@@ -1252,6 +1316,8 @@ InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
ExprBits.TypeDependent = true;
if (initExprs[I]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (initExprs[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (initExprs[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
}
@@ -1490,6 +1556,17 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
case ObjCMessageExprClass: {
const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
+ if (Ctx.getLangOptions().ObjCAutoRefCount &&
+ ME->isInstanceMessage() &&
+ !ME->getType()->isVoidType() &&
+ ME->getSelector().getIdentifierInfoForSlot(0) &&
+ ME->getSelector().getIdentifierInfoForSlot(0)
+ ->getName().startswith("init")) {
+ Loc = getExprLoc();
+ R1 = ME->getSourceRange();
+ return true;
+ }
+
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
Loc = getExprLoc();
@@ -1584,6 +1661,9 @@ bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
return cast<UnaryOperator>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
case ImplicitCastExprClass:
return cast<ImplicitCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr()
+ ->isOBJCGCCandidate(Ctx);
case CStyleCastExprClass:
return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
case DeclRefExprClass: {
@@ -1858,7 +1938,8 @@ Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
case CXXStaticCastExprClass:
case CXXFunctionalCastExprClass:
case BinaryOperatorClass:
- case CompoundAssignOperatorClass: {
+ case CompoundAssignOperatorClass:
+ case MaterializeTemporaryExprClass: {
CanThrowResult CT = isTypeDependent() ? CT_Dependent : CT_Cannot;
return MergeCanThrow(CT, CanSubExprsThrow(C, this));
}
@@ -1938,6 +2019,12 @@ Expr *Expr::IgnoreParenCasts() {
continue;
}
}
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+
return E;
}
}
@@ -1967,6 +2054,10 @@ Expr *Expr::IgnoreParenLValueCasts() {
E = P->getResultExpr();
continue;
}
+ } else if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
}
break;
}
@@ -1996,13 +2087,18 @@ Expr *Expr::IgnoreParenImpCasts() {
continue;
}
}
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
return E;
}
}
Expr *Expr::IgnoreConversionOperator() {
if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
- if (isa<CXXConversionDecl>(MCE->getMethodDecl()))
+ if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
return MCE->getImplicitObjectArgument();
}
return this;
@@ -2059,6 +2155,9 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
bool Expr::isDefaultArgument() const {
const Expr *E = this;
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExprAsWritten();
@@ -2068,6 +2167,9 @@ bool Expr::isDefaultArgument() const {
/// \brief Skip over any no-op casts and any temporary-binding
/// expressions.
static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_NoOp)
E = ICE->getSubExpr();
@@ -2155,6 +2257,12 @@ bool Expr::isImplicitCXXThis() const {
}
}
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = M->GetTemporaryExpr();
+ continue;
+ }
+
break;
}
@@ -2287,6 +2395,10 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
->isConstantInitializer(Ctx, false);
break;
+
+ case MaterializeTemporaryExprClass:
+ return llvm::cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
+ ->isConstantInitializer(Ctx, false);
}
return isEvaluatable(Ctx);
}
@@ -2345,6 +2457,9 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
} else if (isa<GNUNullExpr>(this)) {
// The GNU __null extension is always a null pointer constant.
return NPCK_GNUNull;
+ } else if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(this)) {
+ return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC);
}
// C++0x nullptr_t is always a null pointer constant.
@@ -2414,10 +2529,14 @@ FieldDecl *Expr::getBitField() {
if (Field->isBitField())
return Field;
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E))
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) {
if (BinOp->isAssignmentOp() && BinOp->getLHS())
return BinOp->getLHS()->getBitField();
+ if (BinOp->getOpcode() == BO_Comma && BinOp->getRHS())
+ return BinOp->getRHS()->getBitField();
+ }
+
return 0;
}
@@ -2517,9 +2636,10 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SourceLocation RBracLoc)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
/*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
/*ContainsUnexpandedParameterPack=*/false),
NumArgs(NumArgs), Kind(IsInstanceSuper? SuperInstance : SuperClass),
- HasMethod(Method != 0), SuperLoc(SuperLoc),
+ HasMethod(Method != 0), IsDelegateInitCall(false), SuperLoc(SuperLoc),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
SelectorLoc(SelLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
@@ -2539,8 +2659,10 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
Expr **Args, unsigned NumArgs,
SourceLocation RBracLoc)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
- T->isDependentType(), T->containsUnexpandedParameterPack()),
- NumArgs(NumArgs), Kind(Class), HasMethod(Method != 0),
+ T->isDependentType(), T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ NumArgs(NumArgs), Kind(Class),
+ HasMethod(Method != 0), IsDelegateInitCall(false),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
SelectorLoc(SelLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
@@ -2552,6 +2674,8 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
ExprBits.TypeDependent = true;
if (Args[I]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -2570,8 +2694,10 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
SourceLocation RBracLoc)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
Receiver->isTypeDependent(),
+ Receiver->isInstantiationDependent(),
Receiver->containsUnexpandedParameterPack()),
- NumArgs(NumArgs), Kind(Instance), HasMethod(Method != 0),
+ NumArgs(NumArgs), Kind(Instance),
+ HasMethod(Method != 0), IsDelegateInitCall(false),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
SelectorLoc(SelLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
@@ -2583,6 +2709,8 @@ ObjCMessageExpr::ObjCMessageExpr(QualType T,
ExprBits.TypeDependent = true;
if (Args[I]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -2702,6 +2830,19 @@ ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
return 0;
}
+llvm::StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
+ switch (getBridgeKind()) {
+ case OBC_Bridge:
+ return "__bridge";
+ case OBC_BridgeTransfer:
+ return "__bridge_transfer";
+ case OBC_BridgeRetained:
+ return "__bridge_retained";
+ }
+
+ return "__bridge";
+}
+
bool ChooseExpr::isConditionTrue(const ASTContext &C) const {
return getCond()->EvaluateAsInt(C) != 0;
}
@@ -2711,6 +2852,7 @@ ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
SourceLocation RP)
: Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
Type->isDependentType(), Type->isDependentType(),
+ Type->isInstantiationDependentType(),
Type->containsUnexpandedParameterPack()),
BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(nexpr)
{
@@ -2720,6 +2862,8 @@ ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -2749,6 +2893,7 @@ GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
AssocExprs[ResultIndex]->getObjectKind(),
AssocExprs[ResultIndex]->isTypeDependent(),
AssocExprs[ResultIndex]->isValueDependent(),
+ AssocExprs[ResultIndex]->isInstantiationDependent(),
ContainsUnexpandedParameterPack),
AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
@@ -2769,8 +2914,9 @@ GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
Context.DependentTy,
VK_RValue,
OK_Ordinary,
- /*isTypeDependent=*/ true,
- /*isValueDependent=*/ true,
+ /*isTypeDependent=*/true,
+ /*isValueDependent=*/true,
+ /*isInstantiationDependent=*/true,
ContainsUnexpandedParameterPack),
AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
@@ -2785,7 +2931,7 @@ GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
// DesignatedInitExpr
//===----------------------------------------------------------------------===//
-IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() {
+IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
assert(Kind == FieldDesignator && "Only valid on a field designator");
if (Field.NameOrField & 0x01)
return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
@@ -2804,6 +2950,7 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
: Expr(DesignatedInitExprClass, Ty,
Init->getValueKind(), Init->getObjectKind(),
Init->isTypeDependent(), Init->isValueDependent(),
+ Init->isInstantiationDependent(),
Init->containsUnexpandedParameterPack()),
EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
NumDesignators(NumDesignators), NumSubExprs(NumIndexExprs + 1) {
@@ -2824,7 +2971,8 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
Expr *Index = IndexExprs[IndexIdx];
if (Index->isTypeDependent() || Index->isValueDependent())
ExprBits.ValueDependent = true;
-
+ if (Index->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
// Propagate unexpanded parameter packs.
if (Index->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -2836,9 +2984,14 @@ DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
Expr *Start = IndexExprs[IndexIdx];
Expr *End = IndexExprs[IndexIdx + 1];
if (Start->isTypeDependent() || Start->isValueDependent() ||
- End->isTypeDependent() || End->isValueDependent())
+ End->isTypeDependent() || End->isValueDependent()) {
ExprBits.ValueDependent = true;
-
+ ExprBits.InstantiationDependent = true;
+ } else if (Start->isInstantiationDependent() ||
+ End->isInstantiationDependent()) {
+ ExprBits.InstantiationDependent = true;
+ }
+
// Propagate unexpanded parameter packs.
if (Start->containsUnexpandedParameterPack() ||
End->containsUnexpandedParameterPack())
@@ -2960,17 +3113,19 @@ void DesignatedInitExpr::ExpandDesignator(ASTContext &C, unsigned Idx,
ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
Expr **exprs, unsigned nexprs,
- SourceLocation rparenloc)
- : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
- false, false, false),
+ SourceLocation rparenloc, QualType T)
+ : Expr(ParenListExprClass, T, VK_RValue, OK_Ordinary,
+ false, false, false, false),
NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
-
+ assert(!T.isNull() && "ParenListExpr must have a valid type");
Exprs = new (C) Stmt*[nexprs];
for (unsigned i = 0; i != nexprs; ++i) {
if (exprs[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (exprs[i]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (exprs[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (exprs[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -2981,6 +3136,8 @@ ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
e = ewc->getSubExpr();
+ if (const MaterializeTemporaryExpr *m = dyn_cast<MaterializeTemporaryExpr>(e))
+ e = m->GetTemporaryExpr();
e = cast<CXXConstructExpr>(e)->getArg(0);
while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
e = ice->getSubExpr();
@@ -3033,13 +3190,16 @@ Stmt::child_range ObjCMessageExpr::children() {
BlockDeclRefExpr::BlockDeclRefExpr(VarDecl *d, QualType t, ExprValueKind VK,
SourceLocation l, bool ByRef,
bool constAdded)
- : Expr(BlockDeclRefExprClass, t, VK, OK_Ordinary, false, false,
+ : Expr(BlockDeclRefExprClass, t, VK, OK_Ordinary, false, false, false,
d->isParameterPack()),
D(d), Loc(l), IsByRef(ByRef), ConstQualAdded(constAdded)
{
bool TypeDependent = false;
bool ValueDependent = false;
- computeDeclRefDependence(D, getType(), TypeDependent, ValueDependent);
+ bool InstantiationDependent = false;
+ computeDeclRefDependence(D, getType(), TypeDependent, ValueDependent,
+ InstantiationDependent);
ExprBits.TypeDependent = TypeDependent;
ExprBits.ValueDependent = ValueDependent;
+ ExprBits.InstantiationDependent = InstantiationDependent;
}
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 1a1a0a36a65b..f92afffb5851 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -57,6 +57,7 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SourceLocation constructorRParen)
: Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
ty->isDependentType(), ty->isDependentType(),
+ ty->isInstantiationDependentType(),
ty->containsUnexpandedParameterPack()),
GlobalNew(globalNew), Initializer(initializer),
UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize),
@@ -68,6 +69,9 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs);
unsigned i = 0;
if (Array) {
+ if (arraySize->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
if (arraySize->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -75,6 +79,8 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
}
for (unsigned j = 0; j < NumPlacementArgs; ++j) {
+ if (placementArgs[j]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (placementArgs[j]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -82,6 +88,8 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
}
for (unsigned j = 0; j < NumConstructorArgs; ++j) {
+ if (constructorArgs[j]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (constructorArgs[j]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -144,6 +152,14 @@ CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(ASTContext &Context,
(DestroyedType.getTypeSourceInfo() &&
DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
/*isValueDependent=*/Base->isValueDependent(),
+ (Base->isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
+ (ScopeType &&
+ ScopeType->getType()->isInstantiationDependentType()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->isInstantiationDependentType())),
// ContainsUnexpandedParameterPack
(Base->containsUnexpandedParameterPack() ||
(QualifierLoc &&
@@ -212,9 +228,14 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End,
bool KnownDependent,
+ bool KnownInstantiationDependent,
bool KnownContainsUnexpandedParameterPack)
: Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
KnownDependent,
+ (KnownInstantiationDependent ||
+ NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
(KnownContainsUnexpandedParameterPack ||
NameInfo.containsUnexpandedParameterPack() ||
(QualifierLoc &&
@@ -246,14 +267,18 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
// expansions.
if (TemplateArgs) {
bool Dependent = false;
+ bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
+ InstantiationDependent,
ContainsUnexpandedParameterPack);
if (Dependent) {
- ExprBits.TypeDependent = true;
- ExprBits.ValueDependent = true;
- }
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ if (InstantiationDependent)
+ ExprBits.InstantiationDependent = true;
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
}
@@ -291,6 +316,9 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
const TemplateArgumentListInfo *Args)
: Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
true, true,
+ (NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
(NameInfo.containsUnexpandedParameterPack() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
@@ -300,11 +328,14 @@ DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
{
if (Args) {
bool Dependent = true;
+ bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack
= ExprBits.ContainsUnexpandedParameterPack;
reinterpret_cast<ExplicitTemplateArgumentList*>(this+1)
- ->initializeFrom(*Args, Dependent, ContainsUnexpandedParameterPack);
+ ->initializeFrom(*Args, Dependent, InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+
ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
}
}
@@ -632,6 +663,7 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
SourceRange ParenRange)
: Expr(SC, T, VK_RValue, OK_Ordinary,
T->isDependentType(), T->isDependentType(),
+ T->isInstantiationDependentType(),
T->containsUnexpandedParameterPack()),
Constructor(D), Loc(Loc), ParenRange(ParenRange), Elidable(elidable),
ZeroInitialization(ZeroInitialization), ConstructKind(ConstructKind),
@@ -645,6 +677,8 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -660,6 +694,7 @@ ExprWithCleanups::ExprWithCleanups(ASTContext &C,
: Expr(ExprWithCleanupsClass, subexpr->getType(),
subexpr->getValueKind(), subexpr->getObjectKind(),
subexpr->isTypeDependent(), subexpr->isValueDependent(),
+ subexpr->isInstantiationDependent(),
subexpr->containsUnexpandedParameterPack()),
SubExpr(subexpr), Temps(0), NumTemps(0) {
if (numtemps) {
@@ -690,8 +725,11 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
SourceLocation RParenLoc)
: Expr(CXXUnresolvedConstructExprClass,
Type->getType().getNonReferenceType(),
- VK_LValue, OK_Ordinary,
- Type->getType()->isDependentType(), true,
+ (Type->getType()->isLValueReferenceType() ? VK_LValue
+ :Type->getType()->isRValueReferenceType()? VK_XValue
+ :VK_RValue),
+ OK_Ordinary,
+ Type->getType()->isDependentType(), true, true,
Type->getType()->containsUnexpandedParameterPack()),
Type(Type),
LParenLoc(LParenLoc),
@@ -740,7 +778,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
: Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
- VK_LValue, OK_Ordinary, true, true,
+ VK_LValue, OK_Ordinary, true, true, true,
((Base && Base->containsUnexpandedParameterPack()) ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
@@ -753,8 +791,10 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
MemberNameInfo(MemberNameInfo) {
if (TemplateArgs) {
bool Dependent = true;
+ bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack = false;
getExplicitTemplateArgs().initializeFrom(*TemplateArgs, Dependent,
+ InstantiationDependent,
ContainsUnexpandedParameterPack);
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
@@ -769,7 +809,7 @@ CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo)
: Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
- VK_LValue, OK_Ordinary, true, true,
+ VK_LValue, OK_Ordinary, true, true, true,
((Base && Base->containsUnexpandedParameterPack()) ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->
@@ -874,6 +914,8 @@ UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C,
// Dependent
((Base && Base->isTypeDependent()) ||
BaseType->isDependentType()),
+ ((Base && Base->isInstantiationDependent()) ||
+ BaseType->isInstantiationDependentType()),
// Contains unexpanded parameter pack
((Base && Base->containsUnexpandedParameterPack()) ||
BaseType->containsUnexpandedParameterPack())),
@@ -962,7 +1004,7 @@ SubstNonTypeTemplateParmPackExpr(QualType T,
SourceLocation NameLoc,
const TemplateArgument &ArgPack)
: Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
- true, false, true),
+ true, true, true, true),
Param(Param), Arguments(ArgPack.pack_begin()),
NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index d177cb5cbc97..e7888a6aa7b3 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -117,7 +117,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
case Expr::CXXDependentScopeMemberExprClass:
- case Expr::CXXUnresolvedConstructExprClass:
case Expr::DependentScopeDeclRefExprClass:
// ObjC instance variables are lvalues
// FIXME: ObjC++0x might have different rules
@@ -162,9 +161,13 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::SizeOfPackExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
return Cl::CL_PRValue;
// Next come the complicated cases.
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return ClassifyInternal(Ctx,
+ cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
// C++ [expr.sub]p1: The result is an lvalue of type "T".
// However, subscripting vector types is more like member access.
@@ -289,10 +292,15 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
// Only in C++ can casts be interesting at all.
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
+ case Expr::CXXUnresolvedConstructExprClass:
+ return ClassifyUnnamed(Ctx,
+ cast<CXXUnresolvedConstructExpr>(E)->getTypeAsWritten());
+
case Expr::BinaryConditionalOperatorClass: {
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
@@ -339,6 +347,11 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::PackExpansionExprClass:
return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
+
+ case Expr::MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference()
+ ? Cl::CL_LValue
+ : Cl::CL_XValue;
}
llvm_unreachable("unhandled expression kind in classification");
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 06c5645afb3f..786155af281d 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -224,11 +224,10 @@ static APSInt HandleFloatToIntCast(QualType DestType, QualType SrcType,
bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
// FIXME: Warning for overflow.
- uint64_t Space[4];
+ APSInt Result(DestWidth, !DestSigned);
bool ignored;
- (void)Value.convertToInteger(Space, DestWidth, DestSigned,
- llvm::APFloat::rmTowardZero, &ignored);
- return APSInt(llvm::APInt(DestWidth, 4, Space), !DestSigned);
+ (void)Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored);
+ return Result;
}
static APFloat HandleFloatToFloatCast(QualType DestType, QualType SrcType,
@@ -282,6 +281,17 @@ public:
return true;
return false;
}
+ bool VisitObjCIvarRefExpr(const ObjCIvarRefExpr *E) {
+ if (Info.Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return false;
+ }
+ bool VisitBlockDeclRefExpr (const BlockDeclRefExpr *E) {
+ if (Info.Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return false;
+ }
+
// We don't want to evaluate BlockExprs multiple times, as they generate
// a ton of code.
bool VisitBlockExpr(const BlockExpr *E) { return true; }
@@ -395,6 +405,8 @@ public:
{ return StmtVisitorTy::Visit(E->getChosenSubExpr(Info.Ctx)); }
RetTy VisitGenericSelectionExpr(const GenericSelectionExpr *E)
{ return StmtVisitorTy::Visit(E->getResultExpr()); }
+ RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
+ { return StmtVisitorTy::Visit(E->getReplacement()); }
RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
OpaqueValueEvaluation opaque(Info, E->getOpaqueValue(), E->getCommon());
@@ -525,15 +537,7 @@ bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
if (FD->getType()->isReferenceType())
return false;
- // FIXME: This is linear time.
- unsigned i = 0;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- Field != FieldEnd; (void)++Field, ++i) {
- if (*Field == FD)
- break;
- }
-
+ unsigned i = FD->getFieldIndex();
Result.Offset += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
return true;
}
@@ -945,7 +949,7 @@ public:
: ExprEvaluatorBaseTy(info), Result(result) {}
bool Success(const llvm::APSInt &SI, const Expr *E) {
- assert(E->getType()->isIntegralOrEnumerationType() &&
+ assert(E->getType()->isIntegralOrEnumerationType() &&
"Invalid evaluation result.");
assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
"Invalid evaluation result.");
@@ -1095,8 +1099,25 @@ static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) {
bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
// Enums are integer constant exprs.
- if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D))
- return Success(ECD->getInitVal(), E);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ // Check for signedness/width mismatches between E type and ECD value.
+ bool SameSign = (ECD->getInitVal().isSigned()
+ == E->getType()->isSignedIntegerOrEnumerationType());
+ bool SameWidth = (ECD->getInitVal().getBitWidth()
+ == Info.Ctx.getIntWidth(E->getType()));
+ if (SameSign && SameWidth)
+ return Success(ECD->getInitVal(), E);
+ else {
+ // Get rid of mismatch (otherwise Success assertions will fail)
+ // by computing a new value matching the type of E.
+ llvm::APSInt Val = ECD->getInitVal();
+ if (!SameSign)
+ Val.setIsSigned(!ECD->getInitVal().isSigned());
+ if (!SameWidth)
+ Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
+ return Success(Val, E);
+ }
+ }
// In C++, const, non-volatile integers initialized with ICEs are ICEs.
// In C, they can also be folded, although they are not ICEs.
@@ -1797,6 +1818,9 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_GetObjCProperty:
case CK_LValueBitCast:
case CK_UserDefinedConversion:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
return false;
case CK_LValueToRValue:
@@ -2301,6 +2325,9 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToReal:
case CK_IntegralComplexToBoolean:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -2771,6 +2798,8 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::PackExpansionExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::MaterializeTemporaryExprClass:
return ICEDiag(2, E->getLocStart());
case Expr::SizeOfPackExprClass:
@@ -2778,6 +2807,10 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
// GCC considers the GNU __null value to be an integral constant expression.
return NoDiag();
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return
+ CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+
case Expr::ParenExprClass:
return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
case Expr::GenericSelectionExprClass:
@@ -2995,7 +3028,8 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXFunctionalCastExprClass:
case Expr::CXXStaticCastExprClass:
case Expr::CXXReinterpretCastExprClass:
- case Expr::CXXConstCastExprClass: {
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass: {
const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
if (SubExpr->getType()->isIntegralOrEnumerationType())
return CheckICE(SubExpr, Ctx);
diff --git a/lib/AST/ExternalASTSource.cpp b/lib/AST/ExternalASTSource.cpp
index f428318a21e3..b96d65a729ca 100644
--- a/lib/AST/ExternalASTSource.cpp
+++ b/lib/AST/ExternalASTSource.cpp
@@ -51,11 +51,11 @@ ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
void ExternalASTSource::MaterializeVisibleDecls(const DeclContext *DC) { }
-bool
+ExternalLoadResult
ExternalASTSource::FindExternalLexicalDecls(const DeclContext *DC,
bool (*isKindWeWant)(Decl::Kind),
llvm::SmallVectorImpl<Decl*> &Result) {
- return true;
+ return ELR_AlreadyLoaded;
}
void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { }
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index e81ec7e54b62..ec9863b298e6 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/SourceManager.h"
@@ -236,6 +237,9 @@ private:
bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
+ void mangleExistingSubstitution(QualType type);
+ void mangleExistingSubstitution(TemplateName name);
+
bool mangleStandardSubstitution(const NamedDecl *ND);
void addSubstitution(const NamedDecl *ND) {
@@ -255,9 +259,6 @@ private:
DeclarationName name,
unsigned KnownArity = UnknownArity);
- static bool isUnresolvedType(const Type *type);
- void mangleUnresolvedType(const Type *type);
-
void mangleName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
@@ -318,7 +319,7 @@ private:
unsigned NumTemplateArgs);
void mangleTemplateArgs(const TemplateParameterList &PL,
const TemplateArgumentList &AL);
- void mangleTemplateArg(const NamedDecl *P, const TemplateArgument &A);
+ void mangleTemplateArg(const NamedDecl *P, TemplateArgument A);
void mangleUnresolvedTemplateArgs(const TemplateArgument *args,
unsigned numArgs);
@@ -451,13 +452,8 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
FD = PrimaryTemplate->getTemplatedDecl();
}
- // Do the canonicalization out here because parameter types can
- // undergo additional canonicalization (e.g. array decay).
- const FunctionType *FT
- = cast<FunctionType>(Context.getASTContext()
- .getCanonicalType(FD->getType()));
-
- mangleBareFunctionType(FT, MangleReturnType);
+ mangleBareFunctionType(FD->getType()->getAs<FunctionType>(),
+ MangleReturnType);
}
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
@@ -597,19 +593,13 @@ void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
if (mangleSubstitution(Template))
return;
- // FIXME: How to cope with operators here?
DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
assert(Dependent && "Not a dependent template name?");
- if (!Dependent->isIdentifier()) {
- // FIXME: We can't possibly know the arity of the operator here!
- Diagnostic &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
- "cannot mangle dependent operator name");
- Diags.Report(DiagID);
- return;
- }
+ if (const IdentifierInfo *Id = Dependent->getIdentifier())
+ mangleSourceName(Id);
+ else
+ mangleOperatorName(Dependent->getOperator(), UnknownArity);
- mangleSourceName(Dependent->getIdentifier());
addSubstitution(Template);
}
@@ -702,31 +692,6 @@ void CXXNameMangler::manglePrefix(QualType type) {
}
}
-/// Returns true if the given type, appearing within an
-/// unresolved-name, should be mangled as an unresolved-type.
-bool CXXNameMangler::isUnresolvedType(const Type *type) {
- // <unresolved-type> ::= <template-param>
- // ::= <decltype>
- // ::= <template-template-param> <template-args>
- // (this last is not official yet)
-
- if (isa<TemplateTypeParmType>(type)) return true;
- if (isa<DecltypeType>(type)) return true;
- // typeof?
- if (const TemplateSpecializationType *tst =
- dyn_cast<TemplateSpecializationType>(type)) {
- TemplateDecl *temp = tst->getTemplateName().getAsTemplateDecl();
- if (temp && isa<TemplateTemplateParmDecl>(temp))
- return true;
- }
- return false;
-}
-
-void CXXNameMangler::mangleUnresolvedType(const Type *type) {
- // This seems to be do everything we want.
- mangleType(QualType(type, 0));
-}
-
/// Mangle everything prior to the base-unresolved-name in an unresolved-name.
///
/// \param firstQualifierLookup - the entity found by unqualified lookup
@@ -794,45 +759,141 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
} else {
// Otherwise, all the cases want this.
Out << "sr";
+ }
+
+ // Only certain other types are valid as prefixes; enumerate them.
+ switch (type->getTypeClass()) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::Paren:
+ case Type::Elaborated:
+ case Type::Attributed:
+ case Type::Auto:
+ case Type::PackExpansion:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type is illegal as a nested name specifier");
+
+ case Type::SubstTemplateTypeParmPack:
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(decltype(T::foo(U())) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ // <unresolved-type> ::= <template-param>
+ // ::= <decltype>
+ // ::= <template-template-param> <template-args>
+ // (this last is not official yet)
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::TemplateTypeParm:
+ case Type::UnaryTransform:
+ case Type::SubstTemplateTypeParm:
+ unresolvedType:
+ assert(!qualifier->getPrefix());
+
+ // We only get here recursively if we're followed by identifiers.
+ if (recursive) Out << 'N';
+
+ // This seems to do everything we want. It's not really
+ // sanctioned for a substituted template parameter, though.
+ mangleType(QualType(type, 0));
+
+ // We never want to print 'E' directly after an unresolved-type,
+ // so we return directly.
+ return;
+
+ case Type::Typedef:
+ mangleSourceName(cast<TypedefType>(type)->getDecl()->getIdentifier());
+ break;
+
+ case Type::UnresolvedUsing:
+ mangleSourceName(cast<UnresolvedUsingType>(type)->getDecl()
+ ->getIdentifier());
+ break;
- if (isUnresolvedType(type)) {
- // We only get here recursively if we're followed by identifiers.
- if (recursive) Out << 'N';
- mangleUnresolvedType(type);
+ case Type::Record:
+ mangleSourceName(cast<RecordType>(type)->getDecl()->getIdentifier());
+ break;
- // We never want to print 'E' directly after an unresolved-type,
- // so we return directly.
- return;
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *tst
+ = cast<TemplateSpecializationType>(type);
+ TemplateName name = tst->getTemplateName();
+ switch (name.getKind()) {
+ case TemplateName::Template:
+ case TemplateName::QualifiedTemplate: {
+ TemplateDecl *temp = name.getAsTemplateDecl();
+
+ // If the base is a template template parameter, this is an
+ // unresolved type.
+ assert(temp && "no template for template specialization type");
+ if (isa<TemplateTemplateParmDecl>(temp)) goto unresolvedType;
+
+ mangleSourceName(temp->getIdentifier());
+ break;
}
- }
- assert(!isUnresolvedType(type));
+ case TemplateName::OverloadedTemplate:
+ case TemplateName::DependentTemplate:
+ llvm_unreachable("invalid base for a template specialization type");
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = name.getAsSubstTemplateTemplateParm();
+ mangleExistingSubstitution(subst->getReplacement());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class U> class T...> class A {
+ // template <class U...> void foo(decltype(T<U>::foo) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
- // Only certain other types are valid as prefixes; enumerate them.
- // FIXME: can we get ElaboratedTypes here?
- // FIXME: SubstTemplateTypeParmType?
- if (const TagType *t = dyn_cast<TagType>(type)) {
- mangleSourceName(t->getDecl()->getIdentifier());
- } else if (const TypedefType *t = dyn_cast<TypedefType>(type)) {
- mangleSourceName(t->getDecl()->getIdentifier());
- } else if (const UnresolvedUsingType *t
- = dyn_cast<UnresolvedUsingType>(type)) {
- mangleSourceName(t->getDecl()->getIdentifier());
- } else if (const DependentNameType *t
- = dyn_cast<DependentNameType>(type)) {
- mangleSourceName(t->getIdentifier());
- } else if (const TemplateSpecializationType *tst
- = dyn_cast<TemplateSpecializationType>(type)) {
- TemplateDecl *temp = tst->getTemplateName().getAsTemplateDecl();
- assert(temp && "no template for template specialization type");
- mangleSourceName(temp->getIdentifier());
mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
- } else if (const DependentTemplateSpecializationType *tst
- = dyn_cast<DependentTemplateSpecializationType>(type)) {
+ break;
+ }
+
+ case Type::InjectedClassName:
+ mangleSourceName(cast<InjectedClassNameType>(type)->getDecl()
+ ->getIdentifier());
+ break;
+
+ case Type::DependentName:
+ mangleSourceName(cast<DependentNameType>(type)->getIdentifier());
+ break;
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *tst
+ = cast<DependentTemplateSpecializationType>(type);
mangleSourceName(tst->getIdentifier());
mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
- } else {
- llvm_unreachable("unexpected type in nested name specifier!");
+ break;
+ }
}
break;
}
@@ -1036,7 +1097,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
case DeclarationName::CXXConversionFunctionName:
// <operator-name> ::= cv <type> # (cast)
Out << "cv";
- mangleType(Context.getASTContext().getCanonicalType(Name.getCXXNameType()));
+ mangleType(Name.getCXXNameType());
break;
case DeclarationName::CXXOperatorName: {
@@ -1323,10 +1384,23 @@ void CXXNameMangler::mangleType(TemplateName TN) {
break;
}
+ case TemplateName::SubstTemplateTemplateParm: {
+ // Substituted template parameters are mangled as the substituted
+ // template. This will check for the substitution twice, which is
+ // fine, but we have to return early so that we don't try to *add*
+ // the substitution twice.
+ SubstTemplateTemplateParmStorage *subst
+ = TN.getAsSubstTemplateTemplateParm();
+ mangleType(subst->getReplacement());
+ return;
+ }
+
case TemplateName::SubstTemplateTemplateParmPack: {
- SubstTemplateTemplateParmPackStorage *SubstPack
- = TN.getAsSubstTemplateTemplateParmPack();
- mangleTemplateParameter(SubstPack->getParameterPack()->getIndex());
+ // FIXME: not clear how to mangle this!
+ // template <template <class> class T...> class A {
+ // template <template <class> class U...> void foo(B<T,U> x...);
+ // };
+ Out << "_SUBSTPACK_";
break;
}
}
@@ -1464,7 +1538,40 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
Out << 'U' << ASString.size() << ASString;
}
- // FIXME: For now, just drop all extension qualifiers on the floor.
+ llvm::StringRef LifetimeName;
+ switch (Quals.getObjCLifetime()) {
+ // Objective-C ARC Extension:
+ //
+ // <type> ::= U "__strong"
+ // <type> ::= U "__weak"
+ // <type> ::= U "__autoreleasing"
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ LifetimeName = "__weak";
+ break;
+
+ case Qualifiers::OCL_Strong:
+ LifetimeName = "__strong";
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ LifetimeName = "__autoreleasing";
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ // The __unsafe_unretained qualifier is *not* mangled, so that
+ // __unsafe_unretained types in ARC produce the same manglings as the
+ // equivalent (but, naturally, unqualified) types in non-ARC, providing
+ // better ABI compatibility.
+ //
+ // It's safe to do this because unqualified 'id' won't show up
+ // in any type signatures that need to be mangled.
+ break;
+ }
+ if (!LifetimeName.empty())
+ Out << 'U' << LifetimeName.size() << LifetimeName;
}
void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
@@ -1489,26 +1596,59 @@ void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
Context.mangleObjCMethodName(MD, Out);
}
-void CXXNameMangler::mangleType(QualType nonCanon) {
- // Only operate on the canonical type!
- QualType canon = nonCanon.getCanonicalType();
-
- SplitQualType split = canon.split();
+void CXXNameMangler::mangleType(QualType T) {
+ // If our type is instantiation-dependent but not dependent, we mangle
+ // it as it was written in the source, removing any top-level sugar.
+ // Otherwise, use the canonical type.
+ //
+ // FIXME: This is an approximation of the instantiation-dependent name
+ // mangling rules, since we should really be using the type as written and
+ // augmented via semantic analysis (i.e., with implicit conversions and
+ // default template arguments) for any instantiation-dependent type.
+ // Unfortunately, that requires several changes to our AST:
+ // - Instantiation-dependent TemplateSpecializationTypes will need to be
+ // uniqued, so that we can handle substitutions properly
+ // - Default template arguments will need to be represented in the
+ // TemplateSpecializationType, since they need to be mangled even though
+ // they aren't written.
+ // - Conversions on non-type template arguments need to be expressed, since
+ // they can affect the mangling of sizeof/alignof.
+ if (!T->isInstantiationDependentType() || T->isDependentType())
+ T = T.getCanonicalType();
+ else {
+ // Desugar any types that are purely sugar.
+ do {
+ // Don't desugar through template specialization types that aren't
+ // type aliases. We need to mangle the template arguments as written.
+ if (const TemplateSpecializationType *TST
+ = dyn_cast<TemplateSpecializationType>(T))
+ if (!TST->isTypeAlias())
+ break;
+
+ QualType Desugared
+ = T.getSingleStepDesugaredType(Context.getASTContext());
+ if (Desugared == T)
+ break;
+
+ T = Desugared;
+ } while (true);
+ }
+ SplitQualType split = T.split();
Qualifiers quals = split.second;
const Type *ty = split.first;
- bool isSubstitutable = quals || !isa<BuiltinType>(ty);
- if (isSubstitutable && mangleSubstitution(canon))
+ bool isSubstitutable = quals || !isa<BuiltinType>(T);
+ if (isSubstitutable && mangleSubstitution(T))
return;
// If we're mangling a qualified array type, push the qualifiers to
// the element type.
- if (quals && isa<ArrayType>(ty)) {
- ty = Context.getASTContext().getAsArrayType(canon);
+ if (quals && isa<ArrayType>(T)) {
+ ty = Context.getASTContext().getAsArrayType(T);
quals = Qualifiers();
- // Note that we don't update canon: we want to add the
- // substitution at the canonical type.
+ // Note that we don't update T: we want to add the
+ // substitution at the original type.
}
if (quals) {
@@ -1533,7 +1673,7 @@ void CXXNameMangler::mangleType(QualType nonCanon) {
// Add the substitution.
if (isSubstitutable)
- addSubstitution(canon);
+ addSubstitution(T);
}
void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
@@ -1647,7 +1787,7 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
ArgEnd = Proto->arg_type_end();
Arg != ArgEnd; ++Arg)
- mangleType(*Arg);
+ mangleType(Context.getASTContext().getSignatureParameterType(*Arg));
FunctionTypeDepth.pop(saved);
@@ -1737,7 +1877,11 @@ void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
// <type> ::= <template-param>
void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
- mangleTemplateParameter(T->getReplacedParameter()->getIndex());
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(T(*)(U) x...);
+ // };
+ Out << "_SUBSTPACK_";
}
// <type> ::= P <type> # pointer-to
@@ -2052,6 +2196,9 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// <expr-primary> ::= L <type> <value number> E # integer literal
// ::= L <type <value float> E # floating literal
// ::= L <mangled-name> E # external name
+ QualType ImplicitlyConvertedToType;
+
+recurse:
switch (E->getStmtClass()) {
case Expr::NoStmtClass:
#define ABSTRACT_STMT(Type)
@@ -2089,6 +2236,7 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
case Expr::ObjCProtocolExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::OffsetOfExprClass:
case Expr::PredefinedExprClass:
case Expr::ShuffleVectorExprClass:
@@ -2131,6 +2279,11 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
break;
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ Arity);
+ break;
+
case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
@@ -2209,6 +2362,10 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
case Expr::UnresolvedLookupExprClass: {
const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
mangleUnresolvedName(ULE->getQualifier(), 0, ULE->getName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
if (ULE->hasExplicitTemplateArgs())
mangleTemplateArgs(ULE->getExplicitTemplateArgs());
break;
@@ -2241,6 +2398,23 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
case Expr::UnaryExprOrTypeTraitExprClass: {
const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
+
+ if (!SAE->isInstantiationDependent()) {
+ // Itanium C++ ABI:
+ // If the operand of a sizeof or alignof operator is not
+ // instantiation-dependent it is encoded as an integer literal
+ // reflecting the result of the operator.
+ //
+ // If the result of the operator is implicitly converted to a known
+ // integer type, that type is used for the literal; otherwise, the type
+ // of std::size_t or std::ptrdiff_t is used.
+ QualType T = (ImplicitlyConvertedToType.isNull() ||
+ !ImplicitlyConvertedToType->isIntegerType())? SAE->getType()
+ : ImplicitlyConvertedToType;
+ mangleIntegerLiteral(T, SAE->EvaluateAsInt(Context.getASTContext()));
+ break;
+ }
+
switch(SAE->getKind()) {
case UETT_SizeOf:
Out << 's';
@@ -2344,10 +2518,19 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
}
case Expr::ImplicitCastExprClass: {
- mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr(), Arity);
- break;
+ ImplicitlyConvertedToType = E->getType();
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto recurse;
}
-
+
+ case Expr::ObjCBridgedCastExprClass: {
+ // Mangle ownership casts as a vendor extended operator __bridge,
+ // __bridge_transfer, or __bridge_retain.
+ llvm::StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName();
+ Out << "v1U" << Kind.size() << Kind;
+ }
+ // Fall through to mangle the cast itself.
+
case Expr::CStyleCastExprClass:
case Expr::CXXStaticCastExprClass:
case Expr::CXXDynamicCastExprClass:
@@ -2408,35 +2591,22 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
}
case Expr::SubstNonTypeTemplateParmPackExprClass:
- mangleTemplateParameter(
- cast<SubstNonTypeTemplateParmPackExpr>(E)->getParameterPack()->getIndex());
+ // FIXME: not clear how to mangle this!
+ // template <unsigned N...> class A {
+ // template <class U...> void foo(U (&x)[N]...);
+ // };
+ Out << "_SUBSTPACK_";
break;
case Expr::DependentScopeDeclRefExprClass: {
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
- NestedNameSpecifier *NNS = DRE->getQualifier();
- const Type *QTy = NNS->getAsType();
-
- // When we're dealing with a nested-name-specifier that has just a
- // dependent identifier in it, mangle that as a typename. FIXME:
- // It isn't clear that we ever actually want to have such a
- // nested-name-specifier; why not just represent it as a typename type?
- if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) {
- QTy = getASTContext().getDependentNameType(ETK_Typename,
- NNS->getPrefix(),
- NNS->getAsIdentifier())
- .getTypePtr();
- }
- assert(QTy && "Qualifier was not type!");
+ mangleUnresolvedName(DRE->getQualifier(), 0, DRE->getDeclName(), Arity);
- // ::= sr <type> <unqualified-name> # dependent name
- // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
- Out << "sr";
- mangleType(QualType(QTy, 0));
- mangleUnqualifiedName(0, DRE->getDeclName(), Arity);
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
if (DRE->hasExplicitTemplateArgs())
mangleTemplateArgs(DRE->getExplicitTemplateArgs());
-
break;
}
@@ -2537,15 +2707,13 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
else if (const TemplateTemplateParmDecl *TempTP
= dyn_cast<TemplateTemplateParmDecl>(Pack))
mangleTemplateParameter(TempTP->getIndex());
- else {
- // Note: proposed by Mike Herrick on 11/30/10
- // <expression> ::= sZ <function-param> # size of function parameter pack
- Diagnostic &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
- "cannot mangle sizeof...(function parameter pack)");
- Diags.Report(DiagID);
- return;
- }
+ else
+ mangleFunctionParam(cast<ParmVarDecl>(Pack));
+ break;
+ }
+
+ case Expr::MaterializeTemporaryExprClass: {
+ mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr());
break;
}
}
@@ -2696,12 +2864,15 @@ void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
}
void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
- const TemplateArgument &A) {
+ TemplateArgument A) {
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
// ::= J <template-arg>* E # argument pack
- // ::= sp <expression> # pack expansion of (C++0x)
+ // ::= sp <expression> # pack expansion of (C++0x)
+ if (!A.isInstantiationDependent() || A.isDependent())
+ A = Context.getASTContext().getCanonicalTemplateArgument(A);
+
switch (A.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Cannot mangle NULL template argument");
@@ -2780,6 +2951,18 @@ void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
Out << 'T' << (Index - 1) << '_';
}
+void CXXNameMangler::mangleExistingSubstitution(QualType type) {
+ bool result = mangleSubstitution(type);
+ assert(result && "no existing substitution for type");
+ (void) result;
+}
+
+void CXXNameMangler::mangleExistingSubstitution(TemplateName tname) {
+ bool result = mangleSubstitution(tname);
+ assert(result && "no existing substitution for template name");
+ (void) result;
+}
+
// <substitution> ::= S <seq-id> _
// ::= S_
bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 2878dff3edc4..f6d4f2513ce4 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -174,6 +174,28 @@ bool NestedNameSpecifier::isDependent() const {
return false;
}
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isInstantiationDependentType();
+ }
+
+ // Necessary to suppress a GCC warning.
+ return false;
+}
+
bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
switch (getKind()) {
case Identifier:
diff --git a/lib/AST/ParentMap.cpp b/lib/AST/ParentMap.cpp
index eca351aec8dd..b7b2005e9f84 100644
--- a/lib/AST/ParentMap.cpp
+++ b/lib/AST/ParentMap.cpp
@@ -66,6 +66,15 @@ Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
return S;
}
+Stmt *ParentMap::getOuterParenParent(Stmt *S) const {
+ Stmt *Paren = 0;
+ while (isa<ParenExpr>(S)) {
+ Paren = S;
+ S = getParent(S);
+ };
+ return Paren;
+}
+
bool ParentMap::isConsumedExpr(Expr* E) const {
Stmt *P = getParent(E);
Stmt *DirectChild = E;
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index de0b1d0ed9db..5636a6f0f64a 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -1242,12 +1242,11 @@ void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
}
InitializeLayout(D);
-
+ ObjCInterfaceDecl *OI = const_cast<ObjCInterfaceDecl*>(D);
// Layout each ivar sequentially.
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- Context.ShallowCollectObjCIvars(D, Ivars);
- for (unsigned i = 0, e = Ivars.size(); i != e; ++i)
- LayoutField(Ivars[i]);
+ for (ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ LayoutField(IVD);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 380ad94ca224..fd6f21d43b59 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
-#include <cstdio>
+#include "llvm/Support/raw_ostream.h"
using namespace clang;
static struct StmtClassNameTable {
@@ -54,23 +54,24 @@ void Stmt::PrintStats() {
getStmtInfoTableEntry(Stmt::NullStmtClass);
unsigned sum = 0;
- fprintf(stderr, "*** Stmt/Expr Stats:\n");
+ llvm::errs() << "\n*** Stmt/Expr Stats:\n";
for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
if (StmtClassInfo[i].Name == 0) continue;
sum += StmtClassInfo[i].Counter;
}
- fprintf(stderr, " %d stmts/exprs total.\n", sum);
+ llvm::errs() << " " << sum << " stmts/exprs total.\n";
sum = 0;
for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
if (StmtClassInfo[i].Name == 0) continue;
if (StmtClassInfo[i].Counter == 0) continue;
- fprintf(stderr, " %d %s, %d each (%d bytes)\n",
- StmtClassInfo[i].Counter, StmtClassInfo[i].Name,
- StmtClassInfo[i].Size,
- StmtClassInfo[i].Counter*StmtClassInfo[i].Size);
+ llvm::errs() << " " << StmtClassInfo[i].Counter << " "
+ << StmtClassInfo[i].Name << ", " << StmtClassInfo[i].Size
+ << " each (" << StmtClassInfo[i].Counter*StmtClassInfo[i].Size
+ << " bytes)\n";
sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size;
}
- fprintf(stderr, "Total bytes = %d\n", sum);
+
+ llvm::errs() << "Total bytes = " << sum << "\n";
}
void Stmt::addStmtClass(StmtClass s) {
@@ -84,6 +85,18 @@ bool Stmt::CollectingStats(bool Enable) {
return StatSwitch;
}
+Stmt *Stmt::IgnoreImplicit() {
+ Stmt *s = this;
+
+ if (ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(s))
+ s = ewc->getSubExpr();
+
+ while (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(s))
+ s = ice->getSubExpr();
+
+ return s;
+}
+
namespace {
struct good {};
struct bad {};
@@ -365,6 +378,10 @@ unsigned AsmStmt::AnalyzeAsmString(llvm::SmallVectorImpl<AsmStringPiece>&Pieces,
// Handle %x4 and %x[foo] by capturing x as the modifier character.
char Modifier = '\0';
if (isalpha(EscapedChar)) {
+ if (CurPtr == StrEnd) { // Premature end.
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
Modifier = EscapedChar;
EscapedChar = *CurPtr++;
}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 87588e451884..f705a84c7cf4 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -449,6 +449,12 @@ void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
OS << "\n";
}
+void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
+ Indent() << "@autoreleasepool";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
+ OS << "\n";
+}
+
void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
OS << "catch (";
if (Decl *ExDecl = Node->getExceptionDecl())
@@ -1407,6 +1413,15 @@ void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
OS << Node->getParameterPack()->getNameAsString();
}
+void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *Node) {
+ Visit(Node->getReplacement());
+}
+
+void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
+ PrintExpr(Node->GetTemporaryExpr());
+}
+
// Obj-C
void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
@@ -1464,6 +1479,17 @@ void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
OS << "]";
}
+void
+StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ OS << "(" << E->getBridgeKindName() << E->getType().getAsString(Policy)
+ << ")";
+ PrintExpr(E->getSubExpr());
+}
void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
BlockDecl *BD = Node->getBlockDecl();
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index b117cd9a525f..120c9e50a92a 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -23,7 +23,7 @@
using namespace clang;
namespace {
- class StmtProfiler : public StmtVisitor<StmtProfiler> {
+ class StmtProfiler : public ConstStmtVisitor<StmtProfiler> {
llvm::FoldingSetNodeID &ID;
const ASTContext &Context;
bool Canonical;
@@ -33,14 +33,14 @@ namespace {
bool Canonical)
: ID(ID), Context(Context), Canonical(Canonical) { }
- void VisitStmt(Stmt *S);
+ void VisitStmt(const Stmt *S);
-#define STMT(Node, Base) void Visit##Node(Node *S);
+#define STMT(Node, Base) void Visit##Node(const Node *S);
#include "clang/AST/StmtNodes.inc"
/// \brief Visit a declaration that is referenced within an expression
/// or statement.
- void VisitDecl(Decl *D);
+ void VisitDecl(const Decl *D);
/// \brief Visit a type that is referenced within an expression or
/// statement.
@@ -59,96 +59,97 @@ namespace {
/// \brief Visit template arguments that occur within an expression or
/// statement.
- void VisitTemplateArguments(const TemplateArgumentLoc *Args, unsigned NumArgs);
+ void VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs);
/// \brief Visit a single template argument.
void VisitTemplateArgument(const TemplateArgument &Arg);
};
}
-void StmtProfiler::VisitStmt(Stmt *S) {
+void StmtProfiler::VisitStmt(const Stmt *S) {
ID.AddInteger(S->getStmtClass());
- for (Stmt::child_range C = S->children(); C; ++C)
+ for (Stmt::const_child_range C = S->children(); C; ++C)
Visit(*C);
}
-void StmtProfiler::VisitDeclStmt(DeclStmt *S) {
+void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
VisitStmt(S);
- for (DeclStmt::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ for (DeclStmt::const_decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
D != DEnd; ++D)
VisitDecl(*D);
}
-void StmtProfiler::VisitNullStmt(NullStmt *S) {
+void StmtProfiler::VisitNullStmt(const NullStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitCompoundStmt(CompoundStmt *S) {
+void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitSwitchCase(SwitchCase *S) {
+void StmtProfiler::VisitSwitchCase(const SwitchCase *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitCaseStmt(CaseStmt *S) {
+void StmtProfiler::VisitCaseStmt(const CaseStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitDefaultStmt(DefaultStmt *S) {
+void StmtProfiler::VisitDefaultStmt(const DefaultStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitLabelStmt(LabelStmt *S) {
+void StmtProfiler::VisitLabelStmt(const LabelStmt *S) {
VisitStmt(S);
VisitDecl(S->getDecl());
}
-void StmtProfiler::VisitIfStmt(IfStmt *S) {
+void StmtProfiler::VisitIfStmt(const IfStmt *S) {
VisitStmt(S);
VisitDecl(S->getConditionVariable());
}
-void StmtProfiler::VisitSwitchStmt(SwitchStmt *S) {
+void StmtProfiler::VisitSwitchStmt(const SwitchStmt *S) {
VisitStmt(S);
VisitDecl(S->getConditionVariable());
}
-void StmtProfiler::VisitWhileStmt(WhileStmt *S) {
+void StmtProfiler::VisitWhileStmt(const WhileStmt *S) {
VisitStmt(S);
VisitDecl(S->getConditionVariable());
}
-void StmtProfiler::VisitDoStmt(DoStmt *S) {
+void StmtProfiler::VisitDoStmt(const DoStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitForStmt(ForStmt *S) {
+void StmtProfiler::VisitForStmt(const ForStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitGotoStmt(GotoStmt *S) {
+void StmtProfiler::VisitGotoStmt(const GotoStmt *S) {
VisitStmt(S);
VisitDecl(S->getLabel());
}
-void StmtProfiler::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+void StmtProfiler::VisitIndirectGotoStmt(const IndirectGotoStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitContinueStmt(ContinueStmt *S) {
+void StmtProfiler::VisitContinueStmt(const ContinueStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitBreakStmt(BreakStmt *S) {
+void StmtProfiler::VisitBreakStmt(const BreakStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitReturnStmt(ReturnStmt *S) {
+void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitAsmStmt(AsmStmt *S) {
+void StmtProfiler::VisitAsmStmt(const AsmStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->isVolatile());
ID.AddBoolean(S->isSimple());
@@ -168,63 +169,69 @@ void StmtProfiler::VisitAsmStmt(AsmStmt *S) {
VisitStringLiteral(S->getClobber(I));
}
-void StmtProfiler::VisitCXXCatchStmt(CXXCatchStmt *S) {
+void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) {
VisitStmt(S);
VisitType(S->getCaughtType());
}
-void StmtProfiler::VisitCXXTryStmt(CXXTryStmt *S) {
+void StmtProfiler::VisitCXXTryStmt(const CXXTryStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitSEHTryStmt(SEHTryStmt *S) {
+void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+void StmtProfiler::VisitSEHFinallyStmt(const SEHFinallyStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitSEHExceptStmt(SEHExceptStmt *S) {
+void StmtProfiler::VisitSEHExceptStmt(const SEHExceptStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+void StmtProfiler::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+void StmtProfiler::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->hasEllipsis());
if (S->getCatchParamDecl())
VisitType(S->getCatchParamDecl()->getType());
}
-void StmtProfiler::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+void StmtProfiler::VisitObjCAtFinallyStmt(const ObjCAtFinallyStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+void StmtProfiler::VisitObjCAtTryStmt(const ObjCAtTryStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+void
+StmtProfiler::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+void StmtProfiler::VisitObjCAtThrowStmt(const ObjCAtThrowStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitExpr(Expr *S) {
+void
+StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitDeclRefExpr(DeclRefExpr *S) {
+void StmtProfiler::VisitExpr(const Expr *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
VisitExpr(S);
if (!Canonical)
VisitNestedNameSpecifier(S->getQualifier());
@@ -233,52 +240,52 @@ void StmtProfiler::VisitDeclRefExpr(DeclRefExpr *S) {
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
-void StmtProfiler::VisitPredefinedExpr(PredefinedExpr *S) {
+void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getIdentType());
}
-void StmtProfiler::VisitIntegerLiteral(IntegerLiteral *S) {
+void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
}
-void StmtProfiler::VisitCharacterLiteral(CharacterLiteral *S) {
+void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
VisitExpr(S);
ID.AddBoolean(S->isWide());
ID.AddInteger(S->getValue());
}
-void StmtProfiler::VisitFloatingLiteral(FloatingLiteral *S) {
+void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
ID.AddBoolean(S->isExact());
}
-void StmtProfiler::VisitImaginaryLiteral(ImaginaryLiteral *S) {
+void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitStringLiteral(StringLiteral *S) {
+void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
VisitExpr(S);
ID.AddString(S->getString());
ID.AddBoolean(S->isWide());
}
-void StmtProfiler::VisitParenExpr(ParenExpr *S) {
+void StmtProfiler::VisitParenExpr(const ParenExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitParenListExpr(ParenListExpr *S) {
+void StmtProfiler::VisitParenListExpr(const ParenListExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitUnaryOperator(UnaryOperator *S) {
+void StmtProfiler::VisitUnaryOperator(const UnaryOperator *S) {
VisitExpr(S);
ID.AddInteger(S->getOpcode());
}
-void StmtProfiler::VisitOffsetOfExpr(OffsetOfExpr *S) {
+void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
VisitType(S->getTypeSourceInfo()->getType());
unsigned n = S->getNumComponents();
for (unsigned i = 0; i < n; ++i) {
@@ -306,22 +313,23 @@ void StmtProfiler::VisitOffsetOfExpr(OffsetOfExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *S) {
+void
+StmtProfiler::VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getKind());
if (S->isArgumentType())
VisitType(S->getArgumentType());
}
-void StmtProfiler::VisitArraySubscriptExpr(ArraySubscriptExpr *S) {
+void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitCallExpr(CallExpr *S) {
+void StmtProfiler::VisitCallExpr(const CallExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitMemberExpr(MemberExpr *S) {
+void StmtProfiler::VisitMemberExpr(const MemberExpr *S) {
VisitExpr(S);
VisitDecl(S->getMemberDecl());
if (!Canonical)
@@ -329,72 +337,74 @@ void StmtProfiler::VisitMemberExpr(MemberExpr *S) {
ID.AddBoolean(S->isArrow());
}
-void StmtProfiler::VisitCompoundLiteralExpr(CompoundLiteralExpr *S) {
+void StmtProfiler::VisitCompoundLiteralExpr(const CompoundLiteralExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isFileScope());
}
-void StmtProfiler::VisitCastExpr(CastExpr *S) {
+void StmtProfiler::VisitCastExpr(const CastExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitImplicitCastExpr(ImplicitCastExpr *S) {
+void StmtProfiler::VisitImplicitCastExpr(const ImplicitCastExpr *S) {
VisitCastExpr(S);
ID.AddInteger(S->getValueKind());
}
-void StmtProfiler::VisitExplicitCastExpr(ExplicitCastExpr *S) {
+void StmtProfiler::VisitExplicitCastExpr(const ExplicitCastExpr *S) {
VisitCastExpr(S);
VisitType(S->getTypeAsWritten());
}
-void StmtProfiler::VisitCStyleCastExpr(CStyleCastExpr *S) {
+void StmtProfiler::VisitCStyleCastExpr(const CStyleCastExpr *S) {
VisitExplicitCastExpr(S);
}
-void StmtProfiler::VisitBinaryOperator(BinaryOperator *S) {
+void StmtProfiler::VisitBinaryOperator(const BinaryOperator *S) {
VisitExpr(S);
ID.AddInteger(S->getOpcode());
}
-void StmtProfiler::VisitCompoundAssignOperator(CompoundAssignOperator *S) {
+void
+StmtProfiler::VisitCompoundAssignOperator(const CompoundAssignOperator *S) {
VisitBinaryOperator(S);
}
-void StmtProfiler::VisitConditionalOperator(ConditionalOperator *S) {
+void StmtProfiler::VisitConditionalOperator(const ConditionalOperator *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitBinaryConditionalOperator(BinaryConditionalOperator *S){
+void StmtProfiler::VisitBinaryConditionalOperator(
+ const BinaryConditionalOperator *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitAddrLabelExpr(AddrLabelExpr *S) {
+void StmtProfiler::VisitAddrLabelExpr(const AddrLabelExpr *S) {
VisitExpr(S);
VisitDecl(S->getLabel());
}
-void StmtProfiler::VisitStmtExpr(StmtExpr *S) {
+void StmtProfiler::VisitStmtExpr(const StmtExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitShuffleVectorExpr(ShuffleVectorExpr *S) {
+void StmtProfiler::VisitShuffleVectorExpr(const ShuffleVectorExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitChooseExpr(ChooseExpr *S) {
+void StmtProfiler::VisitChooseExpr(const ChooseExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitGNUNullExpr(GNUNullExpr *S) {
+void StmtProfiler::VisitGNUNullExpr(const GNUNullExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitVAArgExpr(VAArgExpr *S) {
+void StmtProfiler::VisitVAArgExpr(const VAArgExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitInitListExpr(InitListExpr *S) {
+void StmtProfiler::VisitInitListExpr(const InitListExpr *S) {
if (S->getSyntacticForm()) {
VisitInitListExpr(S->getSyntacticForm());
return;
@@ -403,11 +413,11 @@ void StmtProfiler::VisitInitListExpr(InitListExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitDesignatedInitExpr(DesignatedInitExpr *S) {
+void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->usesGNUSyntax());
- for (DesignatedInitExpr::designators_iterator D = S->designators_begin(),
- DEnd = S->designators_end();
+ for (DesignatedInitExpr::const_designators_iterator D =
+ S->designators_begin(), DEnd = S->designators_end();
D != DEnd; ++D) {
if (D->isFieldDesignator()) {
ID.AddInteger(0);
@@ -425,28 +435,28 @@ void StmtProfiler::VisitDesignatedInitExpr(DesignatedInitExpr *S) {
}
}
-void StmtProfiler::VisitImplicitValueInitExpr(ImplicitValueInitExpr *S) {
+void StmtProfiler::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitExtVectorElementExpr(ExtVectorElementExpr *S) {
+void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) {
VisitExpr(S);
VisitName(&S->getAccessor());
}
-void StmtProfiler::VisitBlockExpr(BlockExpr *S) {
+void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
VisitExpr(S);
VisitDecl(S->getBlockDecl());
}
-void StmtProfiler::VisitBlockDeclRefExpr(BlockDeclRefExpr *S) {
+void StmtProfiler::VisitBlockDeclRefExpr(const BlockDeclRefExpr *S) {
VisitExpr(S);
VisitDecl(S->getDecl());
ID.AddBoolean(S->isByRef());
ID.AddBoolean(S->isConstQualAdded());
}
-void StmtProfiler::VisitGenericSelectionExpr(GenericSelectionExpr *S) {
+void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
VisitExpr(S);
for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
QualType T = S->getAssocType(i);
@@ -458,7 +468,7 @@ void StmtProfiler::VisitGenericSelectionExpr(GenericSelectionExpr *S) {
}
}
-static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S,
+static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
UnaryOperatorKind &UnaryOp,
BinaryOperatorKind &BinaryOp) {
switch (S->getOperator()) {
@@ -645,7 +655,7 @@ static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S,
}
-void StmtProfiler::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
+void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
if (S->isTypeDependent()) {
// Type-dependent operator calls are profiled like their underlying
// syntactic operator.
@@ -671,97 +681,100 @@ void StmtProfiler::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
ID.AddInteger(S->getOperator());
}
-void StmtProfiler::VisitCXXMemberCallExpr(CXXMemberCallExpr *S) {
+void StmtProfiler::VisitCXXMemberCallExpr(const CXXMemberCallExpr *S) {
VisitCallExpr(S);
}
-void StmtProfiler::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *S) {
+void StmtProfiler::VisitCUDAKernelCallExpr(const CUDAKernelCallExpr *S) {
VisitCallExpr(S);
}
-void StmtProfiler::VisitAsTypeExpr(AsTypeExpr *S) {
+void StmtProfiler::VisitAsTypeExpr(const AsTypeExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitCXXNamedCastExpr(CXXNamedCastExpr *S) {
+void StmtProfiler::VisitCXXNamedCastExpr(const CXXNamedCastExpr *S) {
VisitExplicitCastExpr(S);
}
-void StmtProfiler::VisitCXXStaticCastExpr(CXXStaticCastExpr *S) {
+void StmtProfiler::VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
-void StmtProfiler::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *S) {
+void StmtProfiler::VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
-void StmtProfiler::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *S) {
+void
+StmtProfiler::VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
-void StmtProfiler::VisitCXXConstCastExpr(CXXConstCastExpr *S) {
+void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
-void StmtProfiler::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *S) {
+void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->getValue());
}
-void StmtProfiler::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *S) {
+void StmtProfiler::VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitCXXTypeidExpr(CXXTypeidExpr *S) {
+void StmtProfiler::VisitCXXTypeidExpr(const CXXTypeidExpr *S) {
VisitExpr(S);
if (S->isTypeOperand())
VisitType(S->getTypeOperand());
}
-void StmtProfiler::VisitCXXUuidofExpr(CXXUuidofExpr *S) {
+void StmtProfiler::VisitCXXUuidofExpr(const CXXUuidofExpr *S) {
VisitExpr(S);
if (S->isTypeOperand())
VisitType(S->getTypeOperand());
}
-void StmtProfiler::VisitCXXThisExpr(CXXThisExpr *S) {
+void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitCXXThrowExpr(CXXThrowExpr *S) {
+void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *S) {
+void StmtProfiler::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) {
VisitExpr(S);
VisitDecl(S->getParam());
}
-void StmtProfiler::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *S) {
+void StmtProfiler::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
VisitExpr(S);
VisitDecl(
const_cast<CXXDestructorDecl *>(S->getTemporary()->getDestructor()));
}
-void StmtProfiler::VisitCXXConstructExpr(CXXConstructExpr *S) {
+void StmtProfiler::VisitCXXConstructExpr(const CXXConstructExpr *S) {
VisitExpr(S);
VisitDecl(S->getConstructor());
ID.AddBoolean(S->isElidable());
}
-void StmtProfiler::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *S) {
+void StmtProfiler::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
VisitExplicitCastExpr(S);
}
-void StmtProfiler::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *S) {
+void
+StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
VisitCXXConstructExpr(S);
}
-void StmtProfiler::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *S) {
+void
+StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitCXXDeleteExpr(CXXDeleteExpr *S) {
+void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isGlobalDelete());
ID.AddBoolean(S->isArrayForm());
@@ -769,7 +782,7 @@ void StmtProfiler::VisitCXXDeleteExpr(CXXDeleteExpr *S) {
}
-void StmtProfiler::VisitCXXNewExpr(CXXNewExpr *S) {
+void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
VisitExpr(S);
VisitType(S->getAllocatedType());
VisitDecl(S->getOperatorNew());
@@ -783,14 +796,15 @@ void StmtProfiler::VisitCXXNewExpr(CXXNewExpr *S) {
ID.AddInteger(S->getNumConstructorArgs());
}
-void StmtProfiler::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *S) {
+void
+StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
VisitNestedNameSpecifier(S->getQualifier());
VisitType(S->getDestroyedType());
}
-void StmtProfiler::VisitOverloadExpr(OverloadExpr *S) {
+void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
VisitExpr(S);
VisitNestedNameSpecifier(S->getQualifier());
VisitName(S->getName());
@@ -801,37 +815,37 @@ void StmtProfiler::VisitOverloadExpr(OverloadExpr *S) {
}
void
-StmtProfiler::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *S) {
+StmtProfiler::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *S) {
VisitOverloadExpr(S);
}
-void StmtProfiler::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *S) {
+void StmtProfiler::VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
VisitType(S->getQueriedType());
}
-void StmtProfiler::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *S) {
+void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
VisitType(S->getLhsType());
VisitType(S->getRhsType());
}
-void StmtProfiler::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *S) {
+void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
VisitType(S->getQueriedType());
}
-void StmtProfiler::VisitExpressionTraitExpr(ExpressionTraitExpr *S) {
+void StmtProfiler::VisitExpressionTraitExpr(const ExpressionTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
VisitExpr(S->getQueriedExpression());
}
-void
-StmtProfiler::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *S) {
+void StmtProfiler::VisitDependentScopeDeclRefExpr(
+ const DependentScopeDeclRefExpr *S) {
VisitExpr(S);
VisitName(S->getDeclName());
VisitNestedNameSpecifier(S->getQualifier());
@@ -840,18 +854,18 @@ StmtProfiler::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *S) {
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
-void StmtProfiler::VisitExprWithCleanups(ExprWithCleanups *S) {
+void StmtProfiler::VisitExprWithCleanups(const ExprWithCleanups *S) {
VisitExpr(S);
}
-void
-StmtProfiler::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *S) {
+void StmtProfiler::VisitCXXUnresolvedConstructExpr(
+ const CXXUnresolvedConstructExpr *S) {
VisitExpr(S);
VisitType(S->getTypeAsWritten());
}
-void
-StmtProfiler::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *S) {
+void StmtProfiler::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *S) {
ID.AddBoolean(S->isImplicitAccess());
if (!S->isImplicitAccess()) {
VisitExpr(S);
@@ -864,7 +878,7 @@ StmtProfiler::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *S) {
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
-void StmtProfiler::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *S) {
+void StmtProfiler::VisitUnresolvedMemberExpr(const UnresolvedMemberExpr *S) {
ID.AddBoolean(S->isImplicitAccess());
if (!S->isImplicitAccess()) {
VisitExpr(S);
@@ -877,57 +891,68 @@ void StmtProfiler::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *S) {
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
-void StmtProfiler::VisitCXXNoexceptExpr(CXXNoexceptExpr *S) {
+void StmtProfiler::VisitCXXNoexceptExpr(const CXXNoexceptExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitPackExpansionExpr(PackExpansionExpr *S) {
+void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitSizeOfPackExpr(SizeOfPackExpr *S) {
+void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
VisitExpr(S);
VisitDecl(S->getPack());
}
void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
- SubstNonTypeTemplateParmPackExpr *S) {
+ const SubstNonTypeTemplateParmPackExpr *S) {
VisitExpr(S);
VisitDecl(S->getParameterPack());
VisitTemplateArgument(S->getArgumentPack());
}
-void StmtProfiler::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+void StmtProfiler::VisitSubstNonTypeTemplateParmExpr(
+ const SubstNonTypeTemplateParmExpr *E) {
+ // Profile exactly as the replacement expression.
+ Visit(E->getReplacement());
+}
+
+void StmtProfiler::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
VisitExpr(E);
}
-void StmtProfiler::VisitObjCStringLiteral(ObjCStringLiteral *S) {
+void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
-void StmtProfiler::VisitObjCEncodeExpr(ObjCEncodeExpr *S) {
+void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) {
VisitExpr(S);
VisitType(S->getEncodedType());
}
-void StmtProfiler::VisitObjCSelectorExpr(ObjCSelectorExpr *S) {
+void StmtProfiler::VisitObjCSelectorExpr(const ObjCSelectorExpr *S) {
VisitExpr(S);
VisitName(S->getSelector());
}
-void StmtProfiler::VisitObjCProtocolExpr(ObjCProtocolExpr *S) {
+void StmtProfiler::VisitObjCProtocolExpr(const ObjCProtocolExpr *S) {
VisitExpr(S);
VisitDecl(S->getProtocol());
}
-void StmtProfiler::VisitObjCIvarRefExpr(ObjCIvarRefExpr *S) {
+void StmtProfiler::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *S) {
VisitExpr(S);
VisitDecl(S->getDecl());
ID.AddBoolean(S->isArrow());
ID.AddBoolean(S->isFreeIvar());
}
-void StmtProfiler::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *S) {
+void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) {
VisitExpr(S);
if (S->isImplicitProperty()) {
VisitDecl(S->getImplicitPropertyGetter());
@@ -941,22 +966,34 @@ void StmtProfiler::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *S) {
}
}
-void StmtProfiler::VisitObjCMessageExpr(ObjCMessageExpr *S) {
+void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) {
VisitExpr(S);
VisitName(S->getSelector());
VisitDecl(S->getMethodDecl());
}
-void StmtProfiler::VisitObjCIsaExpr(ObjCIsaExpr *S) {
+void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
}
-void StmtProfiler::VisitDecl(Decl *D) {
+void StmtProfiler::VisitObjCIndirectCopyRestoreExpr(
+ const ObjCIndirectCopyRestoreExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->shouldCopy());
+}
+
+void StmtProfiler::VisitObjCBridgedCastExpr(const ObjCBridgedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+ ID.AddBoolean(S->getBridgeKind());
+}
+
+void StmtProfiler::VisitDecl(const Decl *D) {
ID.AddInteger(D? D->getKind() : 0);
if (Canonical && D) {
- if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(D)) {
ID.AddInteger(NTTP->getDepth());
ID.AddInteger(NTTP->getIndex());
ID.AddBoolean(NTTP->isParameterPack());
@@ -964,7 +1001,7 @@ void StmtProfiler::VisitDecl(Decl *D) {
return;
}
- if (ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
// The Itanium C++ ABI uses the type, scope depth, and scope
// index of a parameter when mangling expressions that involve
// function parameters, so we will use the parameter's type for
@@ -978,7 +1015,8 @@ void StmtProfiler::VisitDecl(Decl *D) {
return;
}
- if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
ID.AddInteger(TTP->getDepth());
ID.AddInteger(TTP->getIndex());
ID.AddBoolean(TTP->isParameterPack());
@@ -1058,7 +1096,7 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
}
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool Canonical) {
+ bool Canonical) const {
StmtProfiler Profiler(ID, Context, Canonical);
Profiler.Visit(this);
}
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index 6114a5a051be..56c6e7bc47c1 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -104,6 +104,45 @@ bool TemplateArgument::isDependent() const {
return false;
}
+bool TemplateArgument::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Null:
+ assert(false && "Should not have a NULL template argument");
+ return false;
+
+ case Type:
+ return getAsType()->isInstantiationDependentType();
+
+ case Template:
+ return getAsTemplate().isInstantiationDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
+ return DC->isDependentContext();
+ return getAsDecl()->getDeclContext()->isDependentContext();
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return getAsExpr()->isInstantiationDependent();
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) {
+ if (P->isInstantiationDependent())
+ return true;
+ }
+
+ return false;
+ }
+
+ return false;
+}
+
bool TemplateArgument::isPackExpansion() const {
switch (getKind()) {
case Null:
@@ -277,8 +316,10 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
break;
case Type: {
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressStrongLifetime = true;
std::string TypeStr;
- getAsType().getAsStringInternal(TypeStr, Policy);
+ getAsType().getAsStringInternal(TypeStr, SubPolicy);
Out << TypeStr;
break;
}
diff --git a/lib/AST/TemplateName.cpp b/lib/AST/TemplateName.cpp
index ebd07f486783..1f7b19aae471 100644
--- a/lib/AST/TemplateName.cpp
+++ b/lib/AST/TemplateName.cpp
@@ -27,7 +27,19 @@ SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
return TemplateArgument(Arguments, size());
}
-void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID) {
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Parameter, Replacement);
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *parameter,
+ TemplateName replacement) {
+ ID.AddPointer(parameter);
+ ID.AddPointer(replacement.getAsVoidPointer());
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context) {
Profile(ID, Context, Parameter, TemplateArgument(Arguments, size()));
}
@@ -46,9 +58,14 @@ TemplateName::NameKind TemplateName::getKind() const {
return DependentTemplate;
if (Storage.is<QualifiedTemplateName *>())
return QualifiedTemplate;
-
- return getAsOverloadedTemplate()? OverloadedTemplate
- : SubstTemplateTemplateParmPack;
+
+ UncommonTemplateNameStorage *uncommon
+ = Storage.get<UncommonTemplateNameStorage*>();
+ if (uncommon->getAsOverloadedStorage())
+ return OverloadedTemplate;
+ if (uncommon->getAsSubstTemplateTemplateParm())
+ return SubstTemplateTemplateParm;
+ return SubstTemplateTemplateParmPack;
}
TemplateDecl *TemplateName::getAsTemplateDecl() const {
@@ -58,6 +75,9 @@ TemplateDecl *TemplateName::getAsTemplateDecl() const {
if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
return QTN->getTemplateDecl();
+ if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
+ return sub->getReplacement().getAsTemplateDecl();
+
return 0;
}
@@ -79,6 +99,15 @@ bool TemplateName::isDependent() const {
return true;
}
+bool TemplateName::isInstantiationDependent() const {
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (QTN->getQualifier()->isInstantiationDependent())
+ return true;
+ }
+
+ return isDependent();
+}
+
bool TemplateName::containsUnexpandedParameterPack() const {
if (TemplateDecl *Template = getAsTemplateDecl()) {
if (TemplateTemplateParmDecl *TTP
@@ -115,6 +144,9 @@ TemplateName::print(llvm::raw_ostream &OS, const PrintingPolicy &Policy,
OS << DTN->getIdentifier()->getName();
else
OS << "operator " << getOperatorSpelling(DTN->getOperator());
+ } else if (SubstTemplateTemplateParmStorage *subst
+ = getAsSubstTemplateTemplateParm()) {
+ subst->getReplacement().print(OS, Policy, SuppressNNS);
} else if (SubstTemplateTemplateParmPackStorage *SubstPack
= getAsSubstTemplateTemplateParmPack())
OS << SubstPack->getParameterPack()->getNameAsString();
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index d28755284477..08971eb03421 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -36,7 +36,10 @@ bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const {
(hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
// Address space superset.
((getAddressSpace() == Other.getAddressSpace()) ||
- (hasAddressSpace()&& !Other.hasAddressSpace()));
+ (hasAddressSpace()&& !Other.hasAddressSpace())) &&
+ // Lifetime qualifier superset.
+ ((getObjCLifetime() == Other.getObjCLifetime()) ||
+ (hasObjCLifetime() && !Other.hasObjCLifetime()));
}
bool QualType::isConstant(QualType T, ASTContext &Ctx) {
@@ -107,6 +110,7 @@ DependentSizedExtVectorType::DependentSizedExtVectorType(const
Expr *SizeExpr,
SourceLocation loc)
: Type(DependentSizedExtVector, can, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
ElementType->isVariablyModifiedType(),
(ElementType->containsUnexpandedParameterPack() ||
(SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
@@ -126,6 +130,7 @@ DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorKind vecKind)
: Type(Vector, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
vecType->isVariablyModifiedType(),
vecType->containsUnexpandedParameterPack()),
ElementType(vecType)
@@ -137,6 +142,7 @@ VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
QualType canonType, VectorKind vecKind)
: Type(tc, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
vecType->isVariablyModifiedType(),
vecType->containsUnexpandedParameterPack()),
ElementType(vecType)
@@ -174,6 +180,26 @@ QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
return Context.getQualifiedType(split.first, split.second);
}
+QualType QualType::getSingleStepDesugaredType(const ASTContext &Context) const {
+ QualifierCollector Qs;
+
+ const Type *CurTy = Qs.strip(*this);
+ switch (CurTy->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *Ty = cast<Class##Type>(CurTy); \
+ if (!Ty->isSugared()) \
+ return *this; \
+ return Context.getQualifiedType(Ty->desugar(), Qs); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+
+ return *this;
+}
+
SplitQualType QualType::getSplitDesugaredType(QualType T) {
QualifierCollector Qs;
@@ -285,7 +311,6 @@ bool Type::isDerivedType() const {
return false;
}
}
-
bool Type::isClassType() const {
if (const RecordType *RT = getAs<RecordType>())
return RT->getDecl()->isClass();
@@ -385,7 +410,7 @@ const RecordType *Type::getAsUnionType() const {
ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols)
- : Type(ObjCObject, Canonical, false, false, false),
+ : Type(ObjCObject, Canonical, false, false, false, false),
BaseType(Base)
{
ObjCObjectTypeBits.NumProtocols = NumProtocols;
@@ -866,39 +891,57 @@ bool Type::isIncompleteType() const {
}
}
-/// isPODType - Return true if this is a plain-old-data type (C++ 3.9p10)
-bool Type::isPODType() const {
+bool QualType::isPODType(ASTContext &Context) const {
// The compiler shouldn't query this for incomplete types, but the user might.
// We return false for that case. Except for incomplete arrays of PODs, which
// are PODs according to the standard.
- if (isIncompleteArrayType() &&
- cast<ArrayType>(CanonicalType)->getElementType()->isPODType())
- return true;
- if (isIncompleteType())
+ if (isNull())
+ return 0;
+
+ if ((*this)->isIncompleteArrayType())
+ return Context.getBaseElementType(*this).isPODType(Context);
+
+ if ((*this)->isIncompleteType())
return false;
+ if (Context.getLangOptions().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
switch (CanonicalType->getTypeClass()) {
// Everything not explicitly mentioned is not POD.
default: return false;
- case VariableArray:
- case ConstantArray:
+ case Type::VariableArray:
+ case Type::ConstantArray:
// IncompleteArray is handled above.
- return cast<ArrayType>(CanonicalType)->getElementType()->isPODType();
-
- case Builtin:
- case Complex:
- case Pointer:
- case MemberPointer:
- case Vector:
- case ExtVector:
- case ObjCObjectPointer:
- case BlockPointer:
+ return Context.getBaseElementType(*this).isPODType(Context);
+
+ case Type::ObjCObjectPointer:
+ case Type::BlockPointer:
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
return true;
- case Enum:
+ case Type::Enum:
return true;
- case Record:
+ case Type::Record:
if (CXXRecordDecl *ClassDecl
= dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
return ClassDecl->isPOD();
@@ -908,6 +951,121 @@ bool Type::isPODType() const {
}
}
+bool QualType::isTrivialType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOptions().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, trivial class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++0x [class]p5:
+ // A trivial class is a class that has a trivial default constructor
+ if (!ClassDecl->hasTrivialDefaultConstructor()) return false;
+ // and is trivially copyable.
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool QualType::isTriviallyCopyableType(ASTContext &Context) const {
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ if (Context.getLangOptions().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ // C++0x [basic.types]p9
+ // Scalar types, trivially copyable class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ QualType CanonicalType = getCanonicalType();
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // Return false for incomplete types after skipping any incomplete array types
+ // which are expressly allowed by the standard and thus our API.
+ if (CanonicalType->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+
+
bool Type::isLiteralType() const {
if (isDependentType())
return false;
@@ -928,6 +1086,10 @@ bool Type::isLiteralType() const {
if (BaseTy->isIncompleteType())
return false;
+ // Objective-C lifetime types are not literal types.
+ if (BaseTy->isObjCRetainableType())
+ return false;
+
// C++0x [basic.types]p10:
// A type is a literal type if it is:
// -- a scalar type; or
@@ -961,68 +1123,6 @@ bool Type::isLiteralType() const {
return false;
}
-bool Type::isTrivialType() const {
- if (isDependentType())
- return false;
-
- // C++0x [basic.types]p9:
- // Scalar types, trivial class types, arrays of such types, and
- // cv-qualified versions of these types are collectively called trivial
- // types.
- const Type *BaseTy = getBaseElementTypeUnsafe();
- assert(BaseTy && "NULL element type");
-
- // Return false for incomplete types after skipping any incomplete array
- // types which are expressly allowed by the standard and thus our API.
- if (BaseTy->isIncompleteType())
- return false;
-
- // As an extension, Clang treats vector types as Scalar types.
- if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
- if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!ClassDecl->isTrivial()) return false;
- }
-
- return true;
- }
-
- // No other types can match.
- return false;
-}
-
-bool Type::isTriviallyCopyableType() const {
- if (isDependentType())
- return false;
-
- // C++0x [basic.types]p9
- // Scalar types, trivially copyable class types, arrays of such types, and
- // cv-qualified versions of these types are collectively called trivial
- // types.
- const Type *BaseTy = getBaseElementTypeUnsafe();
- assert(BaseTy && "NULL element type");
-
- // Return false for incomplete types after skipping any incomplete array types
- // which are expressly allowed by the standard and thus our API.
- if (BaseTy->isIncompleteType())
- return false;
-
- // As an extension, Clang treats vector types as Scalar types.
- if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
- if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
- if (const CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!ClassDecl->isTriviallyCopyable()) return false;
- }
-
- return true;
- }
-
- // No other types can match.
- return false;
-}
-
bool Type::isStandardLayoutType() const {
if (isDependentType())
return false;
@@ -1060,14 +1160,32 @@ bool Type::isStandardLayoutType() const {
// This is effectively the intersection of isTrivialType and
// isStandardLayoutType. We implement it dircetly to avoid redundant
// conversions from a type to a CXXRecordDecl.
-bool Type::isCXX11PODType() const {
- if (isDependentType())
+bool QualType::isCXX11PODType(ASTContext &Context) const {
+ const Type *ty = getTypePtr();
+ if (ty->isDependentType())
return false;
+ if (Context.getLangOptions().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if (ty->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
// C++11 [basic.types]p9:
// Scalar types, POD classes, arrays of such types, and cv-qualified
// versions of these types are collectively called trivial types.
- const Type *BaseTy = getBaseElementTypeUnsafe();
+ const Type *BaseTy = ty->getBaseElementTypeUnsafe();
assert(BaseTy && "NULL element type");
// Return false for incomplete types after skipping any incomplete array
@@ -1253,7 +1371,7 @@ DependentTemplateSpecializationType::DependentTemplateSpecializationType(
NestedNameSpecifier *NNS, const IdentifierInfo *Name,
unsigned NumArgs, const TemplateArgument *Args,
QualType Canon)
- : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true,
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
/*VariablyModified=*/false,
NNS && NNS->containsUnexpandedParameterPack()),
NNS(NNS), Name(Name), NumArgs(NumArgs) {
@@ -1388,18 +1506,22 @@ FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
: FunctionType(FunctionProto, result, epi.Variadic, epi.TypeQuals,
epi.RefQualifier, canonical,
result->isDependentType(),
+ result->isInstantiationDependentType(),
result->isVariablyModifiedType(),
result->containsUnexpandedParameterPack(),
epi.ExtInfo),
NumArgs(numArgs), NumExceptions(epi.NumExceptions),
- ExceptionSpecType(epi.ExceptionSpecType)
+ ExceptionSpecType(epi.ExceptionSpecType),
+ HasAnyConsumedArgs(epi.ConsumedArguments != 0)
{
// Fill in the trailing argument array.
QualType *argSlot = reinterpret_cast<QualType*>(this+1);
for (unsigned i = 0; i != numArgs; ++i) {
if (args[i]->isDependentType())
setDependent();
-
+ else if (args[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
if (args[i]->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
@@ -1412,7 +1534,9 @@ FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
if (epi.Exceptions[i]->isDependentType())
setDependent();
-
+ else if (epi.Exceptions[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
if (epi.Exceptions[i]->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
@@ -1422,6 +1546,20 @@ FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
// Store the noexcept expression and context.
Expr **noexSlot = reinterpret_cast<Expr**>(argSlot + numArgs);
*noexSlot = epi.NoexceptExpr;
+
+ if (epi.NoexceptExpr) {
+ if (epi.NoexceptExpr->isValueDependent()
+ || epi.NoexceptExpr->isTypeDependent())
+ setDependent();
+ else if (epi.NoexceptExpr->isInstantiationDependent())
+ setInstantiationDependent();
+ }
+ }
+
+ if (epi.ConsumedArguments) {
+ bool *consumedArgs = const_cast<bool*>(getConsumedArgsBuffer());
+ for (unsigned i = 0; i != numArgs; ++i)
+ consumedArgs[i] = epi.ConsumedArguments[i];
}
}
@@ -1461,18 +1599,48 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
const QualType *ArgTys, unsigned NumArgs,
const ExtProtoInfo &epi,
const ASTContext &Context) {
+
+ // We have to be careful not to get ambiguous profile encodings.
+ // Note that valid type pointers are never ambiguous with anything else.
+ //
+ // The encoding grammar begins:
+ // type type* bool int bool
+ // If that final bool is true, then there is a section for the EH spec:
+ // bool type*
+ // This is followed by an optional "consumed argument" section of the
+ // same length as the first type sequence:
+ // bool*
+ // Finally, we have the ext info:
+ // int
+ //
+ // There is no ambiguity between the consumed arguments and an empty EH
+ // spec because of the leading 'bool' which unambiguously indicates
+ // whether the following bool is the EH spec or part of the arguments.
+
ID.AddPointer(Result.getAsOpaquePtr());
for (unsigned i = 0; i != NumArgs; ++i)
ID.AddPointer(ArgTys[i].getAsOpaquePtr());
- ID.AddBoolean(epi.Variadic);
- ID.AddInteger(epi.TypeQuals);
- ID.AddInteger(epi.RefQualifier);
- ID.AddInteger(epi.ExceptionSpecType);
+ // This method is relatively performance sensitive, so as a performance
+ // shortcut, use one AddInteger call instead of four for the next four
+ // fields.
+ assert(!(unsigned(epi.Variadic) & ~1) &&
+ !(unsigned(epi.TypeQuals) & ~255) &&
+ !(unsigned(epi.RefQualifier) & ~3) &&
+ !(unsigned(epi.ExceptionSpecType) & ~7) &&
+ "Values larger than expected.");
+ ID.AddInteger(unsigned(epi.Variadic) +
+ (epi.TypeQuals << 1) +
+ (epi.RefQualifier << 9) +
+ (epi.ExceptionSpecType << 11));
if (epi.ExceptionSpecType == EST_Dynamic) {
for (unsigned i = 0; i != epi.NumExceptions; ++i)
ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr());
} else if (epi.ExceptionSpecType == EST_ComputedNoexcept && epi.NoexceptExpr){
- epi.NoexceptExpr->Profile(ID, Context, true);
+ epi.NoexceptExpr->Profile(ID, Context, false);
+ }
+ if (epi.ConsumedArguments) {
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddBoolean(epi.ConsumedArguments[i]);
}
epi.ExtInfo.Profile(ID);
}
@@ -1489,13 +1657,21 @@ QualType TypedefType::desugar() const {
TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
: Type(TypeOfExpr, can, E->isTypeDependent(),
+ E->isInstantiationDependent(),
E->getType()->isVariablyModifiedType(),
E->containsUnexpandedParameterPack()),
TOExpr(E) {
}
+bool TypeOfExprType::isSugared() const {
+ return !TOExpr->isTypeDependent();
+}
+
QualType TypeOfExprType::desugar() const {
- return getUnderlyingExpr()->getType();
+ if (isSugared())
+ return getUnderlyingExpr()->getType();
+
+ return QualType(this, 0);
}
void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
@@ -1505,12 +1681,22 @@ void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
: Type(Decltype, can, E->isTypeDependent(),
+ E->isInstantiationDependent(),
E->getType()->isVariablyModifiedType(),
E->containsUnexpandedParameterPack()),
E(E),
UnderlyingType(underlyingType) {
}
+bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
+
+QualType DecltypeType::desugar() const {
+ if (isSugared())
+ return getUnderlyingType();
+
+ return QualType(this, 0);
+}
+
DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
: DecltypeType(E, Context.DependentTy), Context(Context) { }
@@ -1520,7 +1706,9 @@ void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
}
TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
- : Type(TC, can, D->isDependentType(), /*VariablyModified=*/false,
+ : Type(TC, can, D->isDependentType(),
+ /*InstantiationDependent=*/D->isDependentType(),
+ /*VariablyModified=*/false,
/*ContainsUnexpandedParameterPack=*/false),
decl(const_cast<TagDecl*>(D)) {}
@@ -1540,6 +1728,7 @@ UnaryTransformType::UnaryTransformType(QualType BaseType,
UTTKind UKind,
QualType CanonicalType)
: Type(UnaryTransform, CanonicalType, UnderlyingType->isDependentType(),
+ UnderlyingType->isInstantiationDependentType(),
UnderlyingType->isVariablyModifiedType(),
BaseType->containsUnexpandedParameterPack())
, BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind)
@@ -1573,7 +1762,8 @@ SubstTemplateTypeParmPackType::
SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
QualType Canon,
const TemplateArgument &ArgPack)
- : Type(SubstTemplateTypeParmPack, Canon, true, false, true), Replaced(Param),
+ : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
+ Replaced(Param),
Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size())
{
}
@@ -1598,23 +1788,39 @@ void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
}
bool TemplateSpecializationType::
-anyDependentTemplateArguments(const TemplateArgumentListInfo &Args) {
- return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size());
+anyDependentTemplateArguments(const TemplateArgumentListInfo &Args,
+ bool &InstantiationDependent) {
+ return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size(),
+ InstantiationDependent);
}
bool TemplateSpecializationType::
-anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N) {
- for (unsigned i = 0; i != N; ++i)
- if (Args[i].getArgument().isDependent())
+anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].getArgument().isDependent()) {
+ InstantiationDependent = true;
return true;
+ }
+
+ if (Args[i].getArgument().isInstantiationDependent())
+ InstantiationDependent = true;
+ }
return false;
}
bool TemplateSpecializationType::
-anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N) {
- for (unsigned i = 0; i != N; ++i)
- if (Args[i].isDependent())
+anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].isDependent()) {
+ InstantiationDependent = true;
return true;
+ }
+
+ if (Args[i].isInstantiationDependent())
+ InstantiationDependent = true;
+ }
return false;
}
@@ -1625,12 +1831,22 @@ TemplateSpecializationType(TemplateName T,
: Type(TemplateSpecialization,
Canon.isNull()? QualType(this, 0) : Canon,
Canon.isNull()? T.isDependent() : Canon->isDependentType(),
+ Canon.isNull()? T.isDependent()
+ : Canon->isInstantiationDependentType(),
false, T.containsUnexpandedParameterPack()),
Template(T), NumArgs(NumArgs) {
assert(!T.getAsDependentTemplateName() &&
"Use DependentTemplateSpecializationType for dependent template-name");
+ assert((T.getKind() == TemplateName::Template ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParm ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
+ "Unexpected template name for TemplateSpecializationType");
+ bool InstantiationDependent;
+ (void)InstantiationDependent;
assert((!Canon.isNull() ||
- T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)) &&
+ T.isDependent() ||
+ anyDependentTemplateArguments(Args, NumArgs,
+ InstantiationDependent)) &&
"No canonical type for non-dependent class template specialization");
TemplateArgument *TemplateArgs
@@ -1644,6 +1860,9 @@ TemplateSpecializationType(TemplateName T,
// U<T> is always non-dependent, irrespective of the type T.
if (Canon.isNull() && Args[Arg].isDependent())
setDependent();
+ else if (Args[Arg].isInstantiationDependent())
+ setInstantiationDependent();
+
if (Args[Arg].getKind() == TemplateArgument::Type &&
Args[Arg].getAsType()->isVariablyModifiedType())
setVariablyModified();
@@ -1795,8 +2014,8 @@ static CachedProperties computeCachedProperties(const Type *T) {
#define DEPENDENT_TYPE(Class,Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
- // Treat dependent types as external.
- assert(T->isDependentType());
+ // Treat instantiation-dependent types as external.
+ assert(T->isInstantiationDependentType());
return CachedProperties(ExternalLinkage, DefaultVisibility, false);
case Type::Builtin:
@@ -1900,6 +2119,79 @@ void Type::ClearLinkageCache() {
CanonicalType->TypeBits.CacheValidAndVisibility = 0;
}
+Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const {
+ if (isObjCARCImplicitlyUnretainedType())
+ return Qualifiers::OCL_ExplicitNone;
+ return Qualifiers::OCL_Strong;
+}
+
+bool Type::isObjCARCImplicitlyUnretainedType() const {
+ assert(isObjCLifetimeType() &&
+ "cannot query implicit lifetime for non-inferrable type");
+
+ const Type *canon = getCanonicalTypeInternal().getTypePtr();
+
+ // Walk down to the base type. We don't care about qualifiers for this.
+ while (const ArrayType *array = dyn_cast<ArrayType>(canon))
+ canon = array->getElementType().getTypePtr();
+
+ if (const ObjCObjectPointerType *opt
+ = dyn_cast<ObjCObjectPointerType>(canon)) {
+ // Class and Class<Protocol> don't require retension.
+ if (opt->getObjectType()->isObjCClass())
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isObjCNSObjectType() const {
+ if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
+ return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
+ return false;
+}
+bool Type::isObjCRetainableType() const {
+ return isObjCObjectPointerType() ||
+ isBlockPointerType() ||
+ isObjCNSObjectType();
+}
+bool Type::isObjCIndirectLifetimeType() const {
+ if (isObjCLifetimeType())
+ return true;
+ if (const PointerType *OPT = getAs<PointerType>())
+ return OPT->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const ReferenceType *Ref = getAs<ReferenceType>())
+ return Ref->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const MemberPointerType *MemPtr = getAs<MemberPointerType>())
+ return MemPtr->getPointeeType()->isObjCIndirectLifetimeType();
+ return false;
+}
+
+/// Returns true if objects of this type have lifetime semantics under
+/// ARC.
+bool Type::isObjCLifetimeType() const {
+ const Type *type = this;
+ while (const ArrayType *array = type->getAsArrayTypeUnsafe())
+ type = array->getElementType().getTypePtr();
+ return type->isObjCRetainableType();
+}
+
+/// \brief Determine whether the given type T is a "bridgable" Objective-C type,
+/// which is either an Objective-C object pointer type or an
+bool Type::isObjCARCBridgableType() const {
+ return isObjCObjectPointerType() || isBlockPointerType();
+}
+
+/// \brief Determine whether the given type T is a "bridgeable" C type.
+bool Type::isCARCBridgableType() const {
+ const PointerType *Pointer = getAs<PointerType>();
+ if (!Pointer)
+ return false;
+
+ QualType Pointee = Pointer->getPointeeType();
+ return Pointee->isVoidType() || Pointee->isRecordType();
+}
+
bool Type::hasSizedVLAType() const {
if (!isVariablyModifiedType()) return false;
@@ -1919,6 +2211,18 @@ bool Type::hasSizedVLAType() const {
}
QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ return DK_objc_strong_lifetime;
+ case Qualifiers::OCL_Weak:
+ return DK_objc_weak_lifetime;
+ }
+
/// Currently, the only destruction kind we recognize is C++ objects
/// with non-trivial destructors.
const CXXRecordDecl *record =
@@ -1928,3 +2232,24 @@ QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
return DK_none;
}
+
+bool QualType::hasTrivialCopyAssignment(ASTContext &Context) const {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return !Context.getLangOptions().ObjCAutoRefCount;
+ }
+
+ if (const CXXRecordDecl *Record
+ = getTypePtr()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl())
+ return Record->hasTrivialCopyAssignment();
+
+ return true;
+}
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 4519606f6e82..b89d2aa31676 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -24,6 +24,23 @@
using namespace clang;
namespace {
+ /// \brief RAII object that enables printing of the ARC __strong lifetime
+ /// qualifier.
+ class IncludeStrongLifetimeRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
+ Policy.SuppressStrongLifetime = false;
+ }
+
+ ~IncludeStrongLifetimeRAII() {
+ Policy.SuppressStrongLifetime = Old;
+ }
+ };
+
class TypePrinter {
PrintingPolicy Policy;
@@ -78,7 +95,7 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
// "int * const", printing "const int *" is different. Only do this when the
// type expands to a simple string.
bool CanPrefixQualifiers = false;
-
+ bool NeedARCStrongQualifier = false;
Type::TypeClass TC = T->getTypeClass();
if (const AutoType *AT = dyn_cast<AutoType>(T))
TC = AT->desugar()->getTypeClass();
@@ -114,15 +131,18 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
break;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ NeedARCStrongQualifier = true;
+ // Fall through
+
case Type::Pointer:
case Type::BlockPointer:
case Type::LValueReference:
case Type::RValueReference:
case Type::MemberPointer:
- case Type::ConstantArray:
- case Type::IncompleteArray:
- case Type::VariableArray:
- case Type::DependentSizedArray:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
@@ -139,13 +159,20 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
if (!CanPrefixQualifiers && !Quals.empty()) {
std::string qualsBuffer;
- Quals.getAsStringInternal(qualsBuffer, Policy);
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ } else {
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ }
- if (!buffer.empty()) {
- qualsBuffer += ' ';
- qualsBuffer += buffer;
+ if (!qualsBuffer.empty()) {
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
}
- std::swap(buffer, qualsBuffer);
}
switch (T->getTypeClass()) {
@@ -159,13 +186,20 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
// If we're adding the qualifiers as a prefix, do it now.
if (CanPrefixQualifiers && !Quals.empty()) {
std::string qualsBuffer;
- Quals.getAsStringInternal(qualsBuffer, Policy);
-
- if (!buffer.empty()) {
- qualsBuffer += ' ';
- qualsBuffer += buffer;
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ } else {
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ }
+
+ if (!qualsBuffer.empty()) {
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
}
- std::swap(buffer, qualsBuffer);
}
}
@@ -192,6 +226,7 @@ void TypePrinter::printPointer(const PointerType *T, std::string &S) {
if (isa<ArrayType>(T->getPointeeType()))
S = '(' + S + ')';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getPointeeType(), S);
}
@@ -209,6 +244,7 @@ void TypePrinter::printLValueReference(const LValueReferenceType *T,
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
S = '(' + S + ')';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getPointeeTypeAsWritten(), S);
}
@@ -221,6 +257,7 @@ void TypePrinter::printRValueReference(const RValueReferenceType *T,
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
S = '(' + S + ')';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getPointeeTypeAsWritten(), S);
}
@@ -236,6 +273,7 @@ void TypePrinter::printMemberPointer(const MemberPointerType *T,
if (isa<ArrayType>(T->getPointeeType()))
S = '(' + S + ')';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getPointeeType(), S);
}
@@ -245,12 +283,14 @@ void TypePrinter::printConstantArray(const ConstantArrayType *T,
S += llvm::utostr(T->getSize().getZExtValue());
S += ']';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getElementType(), S);
}
void TypePrinter::printIncompleteArray(const IncompleteArrayType *T,
std::string &S) {
S += "[]";
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getElementType(), S);
}
@@ -276,6 +316,7 @@ void TypePrinter::printVariableArray(const VariableArrayType *T,
}
S += ']';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getElementType(), S);
}
@@ -291,6 +332,7 @@ void TypePrinter::printDependentSizedArray(const DependentSizedArrayType *T,
}
S += ']';
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getElementType(), S);
}
@@ -518,6 +560,7 @@ void TypePrinter::printUnaryTransform(const UnaryTransformType *T,
if (!S.empty())
S = ' ' + S;
std::string Str;
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getBaseType(), Str);
switch (T->getUTTKind()) {
@@ -552,6 +595,7 @@ void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
Buffer += "<anonymous>";
} else if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ IncludeStrongLifetimeRAII Strong(Policy);
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
std::string TemplateArgsStr
= TemplateSpecializationType::PrintTemplateArgumentList(
@@ -642,6 +686,7 @@ void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
Args = TemplateArgs.data();
NumArgs = TemplateArgs.size();
}
+ IncludeStrongLifetimeRAII Strong(Policy);
Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
NumArgs,
Policy);
@@ -677,18 +722,21 @@ void TypePrinter::printTemplateTypeParm(const TemplateTypeParmType *T,
void TypePrinter::printSubstTemplateTypeParm(const SubstTemplateTypeParmType *T,
std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
print(T->getReplacementType(), S);
}
void TypePrinter::printSubstTemplateTypeParmPack(
const SubstTemplateTypeParmPackType *T,
std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
printTemplateTypeParm(T->getReplacedParameter(), S);
}
void TypePrinter::printTemplateSpecialization(
const TemplateSpecializationType *T,
std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
std::string SpecString;
{
@@ -765,6 +813,7 @@ void TypePrinter::printDependentName(const DependentNameType *T, std::string &S)
void TypePrinter::printDependentTemplateSpecialization(
const DependentTemplateSpecializationType *T, std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
std::string MyString;
{
llvm::raw_string_ostream OS(MyString);
@@ -796,8 +845,9 @@ void TypePrinter::printPackExpansion(const PackExpansionType *T,
void TypePrinter::printAttributed(const AttributedType *T,
std::string &S) {
- // Prefer the macro forms of the GC qualifiers.
- if (T->getAttrKind() == AttributedType::attr_objc_gc)
+ // Prefer the macro forms of the GC and ownership qualifiers.
+ if (T->getAttrKind() == AttributedType::attr_objc_gc ||
+ T->getAttrKind() == AttributedType::attr_objc_ownership)
return print(T->getEquivalentType(), S);
print(T->getModifiedType(), S);
@@ -866,6 +916,18 @@ void TypePrinter::printAttributed(const AttributedType *T,
break;
}
+ case AttributedType::attr_objc_ownership:
+ S += "objc_ownership(";
+ switch (T->getEquivalentType().getObjCLifetime()) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); break;
+ case Qualifiers::OCL_ExplicitNone: S += "none"; break;
+ case Qualifiers::OCL_Strong: S += "strong"; break;
+ case Qualifiers::OCL_Weak: S += "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: S += "autoreleasing"; break;
+ }
+ S += ")";
+ break;
+
case AttributedType::attr_noreturn: S += "noreturn"; break;
case AttributedType::attr_cdecl: S += "cdecl"; break;
case AttributedType::attr_fastcall: S += "fastcall"; break;
@@ -1080,7 +1142,7 @@ std::string Qualifiers::getAsString() const {
// prefix a space if the string is non-empty. Will not append a final
// space.
void Qualifiers::getAsStringInternal(std::string &S,
- const PrintingPolicy&) const {
+ const PrintingPolicy& Policy) const {
AppendTypeQualList(S, getCVRQualifiers());
if (unsigned addrspace = getAddressSpace()) {
if (!S.empty()) S += ' ';
@@ -1095,6 +1157,23 @@ void Qualifiers::getAsStringInternal(std::string &S,
else
S += "__strong";
}
+ if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) {
+ if (!S.empty() &&
+ !(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
+ S += ' ';
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("none but true");
+ case Qualifiers::OCL_ExplicitNone: S += "__unsafe_unretained"; break;
+ case Qualifiers::OCL_Strong:
+ if (!Policy.SuppressStrongLifetime)
+ S += "__strong";
+ break;
+
+ case Qualifiers::OCL_Weak: S += "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: S += "__autoreleasing"; break;
+ }
+ }
}
std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 3e540203eaa0..f231c147f11e 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -776,7 +776,7 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl* VD,
QT = RT->getPointeeType();
if (!QT.isConstQualified())
return Scope;
- if (!VD->getInit() || !VD->getInit()->Classify(*Context).isRValue())
+ if (!VD->extendsLifetimeOfTemporary())
return Scope;
}
@@ -2763,6 +2763,10 @@ tryAgain:
case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
goto tryAgain;
+
+ case Stmt::MaterializeTemporaryExprClass:
+ E = cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr();
+ goto tryAgain;
}
}
diff --git a/lib/Analysis/CocoaConventions.cpp b/lib/Analysis/CocoaConventions.cpp
index 946c38c875d5..90f7092f90ee 100644
--- a/lib/Analysis/CocoaConventions.cpp
+++ b/lib/Analysis/CocoaConventions.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines
+// This file implements cocoa naming convention analysis.
//
//===----------------------------------------------------------------------===//
@@ -36,8 +36,10 @@ using llvm::StringRef;
// not release it."
//
-cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S) {
- switch (S.getMethodFamily()) {
+cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S,
+ const ObjCMethodDecl *MD) {
+ switch (MD && MD->hasAttr<ObjCMethodFamilyAttr>()? MD->getMethodFamily()
+ : S.getMethodFamily()) {
case OMF_None:
case OMF_autorelease:
case OMF_dealloc:
@@ -45,6 +47,7 @@ cocoa::NamingConvention cocoa::deriveNamingConvention(Selector S) {
case OMF_retain:
case OMF_retainCount:
case OMF_self:
+ case OMF_performSelector:
return NoConvention;
case OMF_init:
@@ -83,12 +86,12 @@ bool cocoa::isRefType(QualType RetTy, llvm::StringRef Prefix,
return Name.startswith(Prefix);
}
-bool cocoa::isCFObjectRef(QualType T) {
- return isRefType(T, "CF") || // Core Foundation.
- isRefType(T, "CG") || // Core Graphics.
- isRefType(T, "DADisk") || // Disk Arbitration API.
- isRefType(T, "DADissenter") ||
- isRefType(T, "DASessionRef");
+bool coreFoundation::isCFObjectRef(QualType T) {
+ return cocoa::isRefType(T, "CF") || // Core Foundation.
+ cocoa::isRefType(T, "CG") || // Core Graphics.
+ cocoa::isRefType(T, "DADisk") || // Disk Arbitration API.
+ cocoa::isRefType(T, "DADissenter") ||
+ cocoa::isRefType(T, "DASessionRef");
}
@@ -123,3 +126,47 @@ bool cocoa::isCocoaObjectRef(QualType Ty) {
return false;
}
+
+bool coreFoundation::followsCreateRule(llvm::StringRef functionName) {
+ llvm::StringRef::iterator it = functionName.begin();
+ llvm::StringRef::iterator start = it;
+ llvm::StringRef::iterator endI = functionName.end();
+
+ while (true) {
+ // Scan for the start of 'create' or 'copy'.
+ for ( ; it != endI ; ++it) {
+ // Search for the first character. It can either be 'C' or 'c'.
+ char ch = *it;
+ if (ch == 'C' || ch == 'c') {
+ ++it;
+ break;
+ }
+ }
+
+ // Did we hit the end of the string? If so, we didn't find a match.
+ if (it == endI)
+ return false;
+
+ // Scan for *lowercase* 'reate' or 'opy', followed by no lowercase
+ // character.
+ llvm::StringRef suffix = functionName.substr(it - start);
+ if (suffix.startswith("reate")) {
+ it += 5;
+ }
+ else if (suffix.startswith("opy")) {
+ it += 3;
+ }
+ else {
+ // Keep scanning.
+ continue;
+ }
+
+ if (it == endI || !islower(*it))
+ return true;
+
+ // If we matched a lowercase character, it isn't the end of the
+ // word. Keep scanning.
+ }
+
+ return false;
+}
diff --git a/lib/Analysis/FormatString.cpp b/lib/Analysis/FormatString.cpp
index c1b5ea8a652a..5f3cd4c61549 100644
--- a/lib/Analysis/FormatString.cpp
+++ b/lib/Analysis/FormatString.cpp
@@ -219,6 +219,7 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
if (T == argTy)
return true;
+ // Check for "compatible types".
if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
switch (BT->getKind()) {
default:
@@ -227,7 +228,7 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
case BuiltinType::SChar:
return T == C.UnsignedCharTy;
case BuiltinType::Char_U:
- case BuiltinType::UChar:
+ case BuiltinType::UChar:
return T == C.SignedCharTy;
case BuiltinType::Short:
return T == C.UnsignedShortTy;
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index 9ac456f53a67..c5b17fc77bb2 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/SmallVector.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Analysis/Analyses/ReachableCode.h"
#include "clang/Analysis/CFG.h"
@@ -108,6 +109,11 @@ static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1,
case Stmt::CXXTryStmtClass: {
return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
}
+ case Expr::ObjCBridgedCastExprClass: {
+ const ObjCBridgedCastExpr *CSC = cast<ObjCBridgedCastExpr>(S);
+ R1 = CSC->getSubExpr()->getSourceRange();
+ return CSC->getLParenLoc();
+ }
default: ;
}
R1 = S->getSourceRange();
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index e80e282813af..1d6959d81b16 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -288,28 +288,28 @@ class DataflowWorklist {
public:
DataflowWorklist(const CFG &cfg) : enqueuedBlocks(cfg.getNumBlockIDs()) {}
- void enqueue(const CFGBlock *block);
void enqueueSuccessors(const CFGBlock *block);
const CFGBlock *dequeue();
-
};
}
-void DataflowWorklist::enqueue(const CFGBlock *block) {
- if (!block)
- return;
- unsigned idx = block->getBlockID();
- if (enqueuedBlocks[idx])
- return;
- worklist.push_back(block);
- enqueuedBlocks[idx] = true;
-}
-
void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+ unsigned OldWorklistSize = worklist.size();
for (CFGBlock::const_succ_iterator I = block->succ_begin(),
E = block->succ_end(); I != E; ++I) {
- enqueue(*I);
+ const CFGBlock *Successor = *I;
+ if (!Successor || enqueuedBlocks[Successor->getBlockID()])
+ continue;
+ worklist.push_back(Successor);
+ enqueuedBlocks[Successor->getBlockID()] = true;
}
+ if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
+ return;
+
+ // Rotate the newly added blocks to the start of the worklist so that it forms
+ // a proper queue when we pop off the end of the worklist.
+ std::rotate(worklist.begin(), worklist.begin() + OldWorklistSize,
+ worklist.end());
}
const CFGBlock *DataflowWorklist::dequeue() {
@@ -654,15 +654,19 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
return vals.updateValueVectorWithScratch(block);
}
-void clang::runUninitializedVariablesAnalysis(const DeclContext &dc,
- const CFG &cfg,
- AnalysisContext &ac,
- UninitVariablesHandler &handler) {
+void clang::runUninitializedVariablesAnalysis(
+ const DeclContext &dc,
+ const CFG &cfg,
+ AnalysisContext &ac,
+ UninitVariablesHandler &handler,
+ UninitVariablesAnalysisStats &stats) {
CFGBlockValues vals(cfg);
vals.computeSetOfDeclarations(dc);
if (vals.hasNoDeclarations())
return;
+ stats.NumVariablesAnalyzed = vals.getNumEntries();
+
// Mark all variables uninitialized at the entry.
const CFGBlock &entry = cfg.getEntry();
for (CFGBlock::const_succ_iterator i = entry.succ_begin(),
@@ -684,7 +688,8 @@ void clang::runUninitializedVariablesAnalysis(const DeclContext &dc,
while (const CFGBlock *block = worklist.dequeue()) {
// Did the block change?
- bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed);
+ bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed);
+ ++stats.NumBlockVisits;
if (changed || !previouslyVisited[block->getBlockID()])
worklist.enqueueSuccessors(block);
previouslyVisited[block->getBlockID()] = true;
@@ -692,11 +697,12 @@ void clang::runUninitializedVariablesAnalysis(const DeclContext &dc,
// Run through the blocks one more time, and report uninitialized variabes.
for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
- if (wasAnalyzed[(*BI)->getBlockID()])
+ if (wasAnalyzed[(*BI)->getBlockID()]) {
runOnBlock(*BI, cfg, ac, vals, wasAnalyzed, &handler,
/* flagBlockUses */ true);
+ ++stats.NumBlockVisits;
+ }
}
}
UninitVariablesHandler::~UninitVariablesHandler() {}
-
diff --git a/lib/Basic/Builtins.cpp b/lib/Basic/Builtins.cpp
index 845ae81517da..7df24a03b03a 100644
--- a/lib/Basic/Builtins.cpp
+++ b/lib/Basic/Builtins.cpp
@@ -18,10 +18,10 @@
using namespace clang;
static const Builtin::Info BuiltinInfo[] = {
- { "not a builtin function", 0, 0, 0, ALL_LANGUAGES, false },
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+ { "not a builtin function", 0, 0, 0, ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, BUILTIN_LANG) { #ID, TYPE, ATTRS, HEADER,\
- BUILTIN_LANG, false },
+ BUILTIN_LANG },
#include "clang/Basic/Builtins.def"
};
@@ -46,8 +46,7 @@ void Builtin::Context::InitializeBuiltins(IdentifierTable &Table,
const LangOptions& LangOpts) {
// Step #1: mark all target-independent builtins with their ID's.
for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
- if (!BuiltinInfo[i].Suppressed &&
- (!LangOpts.NoBuiltin || !strchr(BuiltinInfo[i].Attributes, 'f'))) {
+ if (!LangOpts.NoBuiltin || !strchr(BuiltinInfo[i].Attributes, 'f')) {
if (LangOpts.ObjC1 ||
BuiltinInfo[i].builtin_lang != clang::OBJC_LANG)
Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
@@ -55,10 +54,7 @@ void Builtin::Context::InitializeBuiltins(IdentifierTable &Table,
// Step #2: Register target-specific builtins.
for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
- if (!TSRecords[i].Suppressed &&
- (!LangOpts.NoBuiltin ||
- (TSRecords[i].Attributes &&
- !strchr(TSRecords[i].Attributes, 'f'))))
+ if (!LangOpts.NoBuiltin || !strchr(TSRecords[i].Attributes, 'f'))
Table.get(TSRecords[i].Name).setBuiltinID(i+Builtin::FirstTSBuiltin);
}
@@ -67,16 +63,12 @@ Builtin::Context::GetBuiltinNames(llvm::SmallVectorImpl<const char *> &Names,
bool NoBuiltins) {
// Final all target-independent names
for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
- if (!BuiltinInfo[i].Suppressed &&
- (!NoBuiltins || !strchr(BuiltinInfo[i].Attributes, 'f')))
+ if (!NoBuiltins || !strchr(BuiltinInfo[i].Attributes, 'f'))
Names.push_back(BuiltinInfo[i].Name);
// Find target-specific names.
for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
- if (!TSRecords[i].Suppressed &&
- (!NoBuiltins ||
- (TSRecords[i].Attributes &&
- !strchr(TSRecords[i].Attributes, 'f'))))
+ if (!NoBuiltins || !strchr(TSRecords[i].Attributes, 'f'))
Names.push_back(TSRecords[i].Name);
}
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 11887ab0fe95..ae363a0df0d0 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -26,7 +26,8 @@ static void DummyArgToStringFn(Diagnostic::ArgumentKind AK, intptr_t QT,
const Diagnostic::ArgumentValue *PrevArgs,
unsigned NumPrevArgs,
llvm::SmallVectorImpl<char> &Output,
- void *Cookie) {
+ void *Cookie,
+ llvm::SmallVectorImpl<intptr_t> &QualTypeVals) {
const char *Str = "<can't format argument>";
Output.append(Str, Str+strlen(Str));
}
@@ -86,10 +87,12 @@ bool Diagnostic::popMappings(SourceLocation Loc) {
void Diagnostic::Reset() {
ErrorOccurred = false;
FatalErrorOccurred = false;
+ UnrecoverableErrorOccurred = false;
NumWarnings = 0;
NumErrors = 0;
NumErrorsSuppressed = 0;
+
CurDiagID = ~0U;
// Set LastDiagLevel to an "unset" state. If we set it to 'Ignored', notes
// using a Diagnostic associated to a translation unit that follow
@@ -542,7 +545,14 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
/// ConvertArgToString, allowing the implementation to avoid redundancies in
/// obvious cases.
llvm::SmallVector<Diagnostic::ArgumentValue, 8> FormattedArgs;
-
+
+ /// QualTypeVals - Pass a vector of arrays so that QualType names can be
+ /// compared to see if more information is needed to be printed.
+ llvm::SmallVector<intptr_t, 2> QualTypeVals;
+ for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
+ if (getArgKind(i) == Diagnostic::ak_qualtype)
+ QualTypeVals.push_back(getRawArg(i));
+
while (DiagStr != DiagEnd) {
if (DiagStr[0] != '%') {
// Append non-%0 substrings to Str if we have one.
@@ -673,7 +683,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
Modifier, ModifierLen,
Argument, ArgumentLen,
FormattedArgs.data(), FormattedArgs.size(),
- OutStr);
+ OutStr, QualTypeVals);
break;
}
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index 6d7e3204cba3..147ba7e99e74 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTDiagnostic.h"
#include "clang/Analysis/AnalysisDiagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Frontend/FrontendDiagnostic.h"
@@ -135,7 +136,7 @@ static const unsigned StaticDiagInfoSize =
sizeof(StaticDiagInfo)/sizeof(StaticDiagInfo[0])-1;
/// To be sorted before first use (since it's splitted among multiple files)
-static StaticDiagNameIndexRec StaticDiagNameIndex[] = {
+static const StaticDiagNameIndexRec StaticDiagNameIndex[] = {
#define DIAG_NAME_INDEX(ENUM) { #ENUM, diag::ENUM, STR_SIZE(#ENUM, uint8_t) },
#include "clang/Basic/DiagnosticIndexName.inc"
#undef DIAG_NAME_INDEX
@@ -199,19 +200,21 @@ unsigned DiagnosticIDs::getCategoryNumberForDiag(unsigned DiagID) {
return 0;
}
-// The diagnostic category names.
-struct StaticDiagCategoryRec {
- const char *NameStr;
- uint8_t NameLen;
+namespace {
+ // The diagnostic category names.
+ struct StaticDiagCategoryRec {
+ const char *NameStr;
+ uint8_t NameLen;
- llvm::StringRef getName() const {
- return llvm::StringRef(NameStr, NameLen);
- }
-};
+ llvm::StringRef getName() const {
+ return llvm::StringRef(NameStr, NameLen);
+ }
+ };
+}
-static StaticDiagCategoryRec CategoryNameTable[] = {
+static const StaticDiagCategoryRec CategoryNameTable[] = {
#define GET_CATEGORY_TABLE
-#define CATEGORY(X) { X, STR_SIZE(X, uint8_t) },
+#define CATEGORY(X, ENUM) { X, STR_SIZE(X, uint8_t) },
#include "clang/Basic/DiagnosticGroups.inc"
#undef GET_CATEGORY_TABLE
{ 0, 0 }
@@ -261,7 +264,7 @@ llvm::StringRef DiagnosticIDs::getName(unsigned DiagID) {
/// getIdFromName - Given a diagnostic name, return its ID, or 0
unsigned DiagnosticIDs::getIdFromName(llvm::StringRef Name) {
- StaticDiagNameIndexRec *StaticDiagNameIndexEnd =
+ const StaticDiagNameIndexRec *StaticDiagNameIndexEnd =
StaticDiagNameIndex + StaticDiagNameIndexSize;
if (Name.empty()) { return diag::DIAG_UPPER_LIMIT; }
@@ -543,17 +546,21 @@ DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass,
return Result;
}
-struct WarningOption {
- // Be safe with the size of 'NameLen' because we don't statically check if the
- // size will fit in the field; the struct size won't decrease with a shorter
- // type anyway.
- size_t NameLen;
- const char *NameStr;
- const short *Members;
- const short *SubGroups;
-
- llvm::StringRef getName() const { return llvm::StringRef(NameStr, NameLen); }
-};
+namespace {
+ struct WarningOption {
+ // Be safe with the size of 'NameLen' because we don't statically check if
+ // the size will fit in the field; the struct size won't decrease with a
+ // shorter type anyway.
+ size_t NameLen;
+ const char *NameStr;
+ const short *Members;
+ const short *SubGroups;
+
+ llvm::StringRef getName() const {
+ return llvm::StringRef(NameStr, NameLen);
+ }
+ };
+}
#define GET_DIAG_ARRAYS
#include "clang/Basic/DiagnosticGroups.inc"
@@ -678,6 +685,12 @@ bool DiagnosticIDs::ProcessDiag(Diagnostic &Diag) const {
return false;
if (DiagLevel >= DiagnosticIDs::Error) {
+ Diag.TrapErrorOccurred = true;
+ if (isUnrecoverable(DiagID)) {
+ Diag.TrapUnrecoverableErrorOccurred = true;
+ Diag.UnrecoverableErrorOccurred = true;
+ }
+
if (Diag.Client->IncludeInDiagnosticCounts()) {
Diag.ErrorOccurred = true;
++Diag.NumErrors;
@@ -714,3 +727,25 @@ bool DiagnosticIDs::ProcessDiag(Diagnostic &Diag) const {
return true;
}
+
+bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ // Custom diagnostics.
+ return CustomDiagInfo->getLevel(DiagID) >= DiagnosticIDs::Error;
+ }
+
+ // Only errors may be unrecoverable.
+ if (getBuiltinDiagClass(DiagID) < CLASS_ERROR)
+ return false;
+
+ if (DiagID == diag::err_unavailable ||
+ DiagID == diag::err_unavailable_message)
+ return false;
+
+ // Currently we consider all ARC errors as recoverable.
+ if (getCategoryNumberForDiag(DiagID) ==
+ diag::DiagCat_Automatic_Reference_Counting_Issue)
+ return false;
+
+ return true;
+}
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index 4711faa1a519..188e2d46f587 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
@@ -92,7 +93,8 @@ namespace {
KEYBORLAND = 0x100,
KEYOPENCL = 0x200,
KEYC1X = 0x400,
- KEYALL = 0x7ff
+ KEYARC = 0x800,
+ KEYALL = 0x0fff
};
}
@@ -120,7 +122,8 @@ static void AddKeyword(llvm::StringRef Keyword,
else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
else if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) AddResult = 2;
else if (LangOpts.C1X && (Flags & KEYC1X)) AddResult = 2;
-
+ else if (LangOpts.ObjCAutoRefCount && (Flags & KEYARC)) AddResult = 2;
+
// Don't add this keyword if disabled in this language.
if (AddResult == 0) return;
@@ -394,6 +397,8 @@ ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
if (name == "retainCount") return OMF_retainCount;
if (name == "self") return OMF_self;
}
+
+ if (name == "performSelector") return OMF_performSelector;
// The other method families may begin with a prefix of underscores.
while (!name.empty() && name.front() == '_')
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 2de8ab7528ab..45922c15527f 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -169,11 +169,11 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(Diagnostic &Diag,
return Buffer.getPointer();
}
-unsigned LineTableInfo::getLineTableFilenameID(const char *Ptr, unsigned Len) {
+unsigned LineTableInfo::getLineTableFilenameID(llvm::StringRef Name) {
// Look up the filename in the string table, returning the pre-existing value
// if it exists.
llvm::StringMapEntry<unsigned> &Entry =
- FilenameIDs.GetOrCreateValue(Ptr, Ptr+Len, ~0U);
+ FilenameIDs.GetOrCreateValue(Name, ~0U);
if (Entry.getValue() != ~0U)
return Entry.getValue();
@@ -277,10 +277,10 @@ void LineTableInfo::AddEntry(unsigned FID,
/// getLineTableFilenameID - Return the uniqued ID for the specified filename.
///
-unsigned SourceManager::getLineTableFilenameID(const char *Ptr, unsigned Len) {
+unsigned SourceManager::getLineTableFilenameID(llvm::StringRef Name) {
if (LineTable == 0)
LineTable = new LineTableInfo();
- return LineTable->getLineTableFilenameID(Ptr, Len);
+ return LineTable->getLineTableFilenameID(Name);
}
@@ -531,16 +531,31 @@ FileID SourceManager::createFileID(const ContentCache *File,
return LastFileIDLookup = FID;
}
-/// createInstantiationLoc - Return a new SourceLocation that encodes the fact
-/// that a token from SpellingLoc should actually be referenced from
-/// InstantiationLoc.
+SourceLocation
+SourceManager::createMacroArgInstantiationLoc(SourceLocation SpellingLoc,
+ SourceLocation ILoc,
+ unsigned TokLength) {
+ InstantiationInfo II =
+ InstantiationInfo::createForMacroArg(SpellingLoc, ILoc);
+ return createInstantiationLocImpl(II, TokLength);
+}
+
SourceLocation SourceManager::createInstantiationLoc(SourceLocation SpellingLoc,
SourceLocation ILocStart,
SourceLocation ILocEnd,
unsigned TokLength,
unsigned PreallocatedID,
unsigned Offset) {
- InstantiationInfo II = InstantiationInfo::get(ILocStart,ILocEnd, SpellingLoc);
+ InstantiationInfo II =
+ InstantiationInfo::create(SpellingLoc, ILocStart, ILocEnd);
+ return createInstantiationLocImpl(II, TokLength, PreallocatedID, Offset);
+}
+
+SourceLocation
+SourceManager::createInstantiationLocImpl(const InstantiationInfo &II,
+ unsigned TokLength,
+ unsigned PreallocatedID,
+ unsigned Offset) {
if (PreallocatedID) {
// If we're filling in a preallocated ID, just load in the
// instantiation entry and return.
@@ -749,18 +764,19 @@ SourceLocation SourceManager::getSpellingLocSlowCase(SourceLocation Loc) const {
std::pair<FileID, unsigned>
-SourceManager::getDecomposedInstantiationLocSlowCase(const SrcMgr::SLocEntry *E,
- unsigned Offset) const {
+SourceManager::getDecomposedInstantiationLocSlowCase(
+ const SrcMgr::SLocEntry *E) const {
// If this is an instantiation record, walk through all the instantiation
// points.
FileID FID;
SourceLocation Loc;
+ unsigned Offset;
do {
Loc = E->getInstantiation().getInstantiationLocStart();
FID = getFileID(Loc);
E = &getSLocEntry(FID);
- Offset += Loc.getOffset()-E->getOffset();
+ Offset = Loc.getOffset()-E->getOffset();
} while (!Loc.isFileID());
return std::make_pair(FID, Offset);
@@ -823,6 +839,14 @@ SourceManager::getInstantiationRange(SourceLocation Loc) const {
return Res;
}
+bool SourceManager::isMacroArgInstantiation(SourceLocation Loc) const {
+ if (!Loc.isMacroID()) return false;
+
+ FileID FID = getFileID(Loc);
+ const SrcMgr::SLocEntry *E = &getSLocEntry(FID);
+ const SrcMgr::InstantiationInfo &II = E->getInstantiation();
+ return II.isMacroArgInstantiation();
+}
//===----------------------------------------------------------------------===//
@@ -917,7 +941,7 @@ static void ComputeLineNumbers(Diagnostic &Diag, ContentCache *FI,
// Find the file offsets of all of the *physical* source lines. This does
// not look at trigraphs, escaped newlines, or anything else tricky.
- std::vector<unsigned> LineOffsets;
+ llvm::SmallVector<unsigned, 256> LineOffsets;
// Line #1 starts at char 0.
LineOffsets.push_back(0);
@@ -1213,73 +1237,6 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc) const {
return PresumedLoc(Filename, LineNo, ColNo, IncludeLoc);
}
-/// \brief Returns true if the given MacroID location points at the first
-/// token of the macro instantiation.
-bool SourceManager::isAtStartOfMacroInstantiation(SourceLocation loc) const {
- assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
-
- unsigned FID = getFileID(loc).ID;
- assert(FID > 1);
- std::pair<SourceLocation, SourceLocation>
- instRange = getImmediateInstantiationRange(loc);
-
- bool invalid = false;
- const SrcMgr::SLocEntry &Entry = getSLocEntry(FID-1, &invalid);
- if (invalid)
- return false;
-
- // If the FileID immediately before it is a file then this is the first token
- // in the macro.
- if (Entry.isFile())
- return true;
-
- // If the FileID immediately before it (which is a macro token) is the
- // immediate instantiated macro, check this macro token's location.
- if (getFileID(instRange.second).ID == FID-1)
- return isAtStartOfMacroInstantiation(instRange.first);
-
- // If the FileID immediately before it (which is a macro token) came from a
- // different instantiation, then this is the first token in the macro.
- if (getInstantiationLoc(Entry.getInstantiation().getInstantiationLocStart())
- != getInstantiationLoc(loc))
- return true;
-
- // It is inside the macro or the last token in the macro.
- return false;
-}
-
-/// \brief Returns true if the given MacroID location points at the last
-/// token of the macro instantiation.
-bool SourceManager::isAtEndOfMacroInstantiation(SourceLocation loc) const {
- assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
-
- unsigned FID = getFileID(loc).ID;
- assert(FID > 1);
- std::pair<SourceLocation, SourceLocation>
- instRange = getInstantiationRange(loc);
-
- // If there's no FileID after it, it is the last token in the macro.
- if (FID+1 == sloc_entry_size())
- return true;
-
- bool invalid = false;
- const SrcMgr::SLocEntry &Entry = getSLocEntry(FID+1, &invalid);
- if (invalid)
- return false;
-
- // If the FileID immediately after it is a file or a macro token which
- // came from a different instantiation, then this is the last token in the
- // macro.
- if (Entry.isFile())
- return true;
- if (getInstantiationLoc(Entry.getInstantiation().getInstantiationLocStart())
- != instRange.first)
- return true;
-
- // It is inside the macro or the first token in the macro.
- return false;
-}
-
//===----------------------------------------------------------------------===//
// Other miscellaneous methods.
//===----------------------------------------------------------------------===//
@@ -1474,8 +1431,6 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
// reflect the order that the tokens, pointed to by these locations, were
// instantiated (during parsing each token that is instantiated by a macro,
// expands the SLocEntries).
- if (LHS.isMacroID() && RHS.isMacroID())
- return LHS.getOffset() < RHS.getOffset();
std::pair<FileID, unsigned> LOffs = getDecomposedLoc(LHS);
std::pair<FileID, unsigned> ROffs = getDecomposedLoc(RHS);
@@ -1562,7 +1517,9 @@ void SourceManager::PrintStats() const {
llvm::errs() << "\n*** Source Manager Stats:\n";
llvm::errs() << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
<< " mem buffers mapped.\n";
- llvm::errs() << SLocEntryTable.size() << " SLocEntry's allocated, "
+ llvm::errs() << SLocEntryTable.size() << " SLocEntry's allocated ("
+ << SLocEntryTable.capacity()*sizeof(SrcMgr::SLocEntry)
+ << " bytes of capacity), "
<< NextOffset << "B of Sloc address space used.\n";
unsigned NumLineNumsComputed = 0;
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index dcf0cb4237a9..30a9bdb31774 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -181,6 +181,14 @@ static llvm::StringRef removeGCCRegisterPrefix(llvm::StringRef Name) {
return Name;
}
+/// isValidClobber - Returns whether the passed in string is
+/// a valid clobber in an inline asm statement. This is used by
+/// Sema.
+bool TargetInfo::isValidClobber(llvm::StringRef Name) const {
+ return (isValidGCCRegisterName(Name) ||
+ Name == "memory" || Name == "cc");
+}
+
/// isValidGCCRegisterName - Returns whether the passed in string
/// is a valid register name according to GCC. This is used by Sema for
/// inline asm statements.
@@ -194,9 +202,6 @@ bool TargetInfo::isValidGCCRegisterName(llvm::StringRef Name) const {
// Get rid of any register prefix.
Name = removeGCCRegisterPrefix(Name);
- if (Name == "memory" || Name == "cc")
- return true;
-
getGCCRegNames(Names, NumNames);
// If we have a number it maps to an entry in the register name array.
@@ -212,6 +217,20 @@ bool TargetInfo::isValidGCCRegisterName(llvm::StringRef Name) const {
return true;
}
+ // Check any additional names that we have.
+ const AddlRegName *AddlNames;
+ unsigned NumAddlNames;
+ getGCCAddlRegNames(AddlNames, NumAddlNames);
+ for (unsigned i = 0; i < NumAddlNames; i++)
+ for (unsigned j = 0; j < llvm::array_lengthof(AddlNames[i].Names); j++) {
+ if (!AddlNames[i].Names[j])
+ break;
+ // Make sure the register that the additional name is for is within
+ // the bounds of the register names from above.
+ if (AddlNames[i].Names[j] == Name && AddlNames[i].RegNum < NumNames)
+ return true;
+ }
+
// Now check aliases.
const GCCRegAlias *Aliases;
unsigned NumAliases;
@@ -251,6 +270,20 @@ TargetInfo::getNormalizedGCCRegisterName(llvm::StringRef Name) const {
}
}
+ // Check any additional names that we have.
+ const AddlRegName *AddlNames;
+ unsigned NumAddlNames;
+ getGCCAddlRegNames(AddlNames, NumAddlNames);
+ for (unsigned i = 0; i < NumAddlNames; i++)
+ for (unsigned j = 0; j < llvm::array_lengthof(AddlNames[i].Names); j++) {
+ if (!AddlNames[i].Names[j])
+ break;
+ // Make sure the register that the additional name is for is within
+ // the bounds of the register names from above.
+ if (AddlNames[i].Names[j] == Name && AddlNames[i].RegNum < NumNames)
+ return Name;
+ }
+
// Now check aliases.
const GCCRegAlias *Aliases;
unsigned NumAliases;
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index dd167dca47b2..3518ea6f7986 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -84,14 +84,28 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("__MACH__");
Builder.defineMacro("OBJC_NEW_PROPERTIES");
- // __weak is always defined, for use in blocks and with objc pointers.
- Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
+ if (!Opts.ObjCAutoRefCount) {
+ // __weak is always defined, for use in blocks and with objc pointers.
+ Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
- // Darwin defines __strong even in C mode (just to nothing).
- if (!Opts.ObjC1 || Opts.getGCMode() == LangOptions::NonGC)
- Builder.defineMacro("__strong", "");
- else
- Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
+ // Darwin defines __strong even in C mode (just to nothing).
+ if (Opts.getGCMode() != LangOptions::NonGC)
+ Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
+ else
+ Builder.defineMacro("__strong", "");
+
+ // __unsafe_unretained is defined to nothing in non-ARC mode. We even
+ // allow this in C, since one might have block pointers in structs that
+ // are used in pure C code and in Objective-C ARC.
+ Builder.defineMacro("__unsafe_unretained", "");
+
+ // The Objective-C bridged cast keywords are defined to nothing in non-ARC
+ // mode; then they become normal, C-style casts.
+ Builder.defineMacro("__bridge", "");
+ Builder.defineMacro("__bridge_transfer", "");
+ Builder.defineMacro("__bridge_retained", "");
+ Builder.defineMacro("__bridge_retain", "");
+ }
if (Opts.Static)
Builder.defineMacro("__STATIC__");
@@ -168,14 +182,15 @@ class DarwinTargetInfo : public OSTargetInfo<Target> {
protected:
virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const {
- getDarwinDefines(Builder, Opts, Triple, this->PlatformName,
+ getDarwinDefines(Builder, Opts, Triple, this->PlatformName,
this->PlatformMinVersion);
}
public:
DarwinTargetInfo(const std::string& triple) :
OSTargetInfo<Target>(triple) {
- this->TLSSupported = llvm::Triple(triple).getDarwinMajorNumber() > 10;
+ llvm::Triple T = llvm::Triple(triple);
+ this->TLSSupported = T.isMacOSX() && !T.isMacOSXVersionLT(10,7);
this->MCountName = "\01mcount";
}
@@ -512,7 +527,6 @@ class PPCTargetInfo : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
static const char * const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
-
public:
PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {}
@@ -633,9 +647,9 @@ public:
};
const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES, false },
+ ALL_LANGUAGES },
#include "clang/Basic/BuiltinsPPC.def"
};
@@ -660,7 +674,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Target properties.
- Builder.defineMacro("_BIG_ENDIAN");
+ if (getTriple().getOS() != llvm::Triple::NetBSD)
+ Builder.defineMacro("_BIG_ENDIAN");
Builder.defineMacro("__BIG_ENDIAN__");
// Subtarget options.
@@ -788,8 +803,14 @@ public:
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
- if (getTriple().getOS() == llvm::Triple::FreeBSD)
- SizeType = UnsignedInt;
+ switch (getTriple().getOS()) {
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ SizeType = UnsignedInt;
+ break;
+ default:
+ break;
+ }
}
virtual const char *getVAListDeclaration() const {
@@ -890,9 +911,9 @@ namespace {
};
const Builtin::Info PTXTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES, false },
+ ALL_LANGUAGES },
#include "clang/Basic/BuiltinsPTX.def"
};
@@ -1058,49 +1079,49 @@ void MBlazeTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
namespace {
// Namespace for x86 abstract base class
const Builtin::Info BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES, false },
+ ALL_LANGUAGES },
#include "clang/Basic/BuiltinsX86.def"
};
static const char* const GCCRegNames[] = {
"ax", "dx", "cx", "bx", "si", "di", "bp", "sp",
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
- "argp", "flags", "fspr", "dirflag", "frame",
+ "argp", "flags", "fpcr", "fpsr", "dirflag", "frame",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
};
-const TargetInfo::GCCRegAlias GCCRegAliases[] = {
- { { "al", "ah", "eax", "rax" }, "ax" },
- { { "bl", "bh", "ebx", "rbx" }, "bx" },
- { { "cl", "ch", "ecx", "rcx" }, "cx" },
- { { "dl", "dh", "edx", "rdx" }, "dx" },
- { { "esi", "rsi" }, "si" },
- { { "edi", "rdi" }, "di" },
- { { "esp", "rsp" }, "sp" },
- { { "ebp", "rbp" }, "bp" },
+const TargetInfo::AddlRegName AddlRegNames[] = {
+ { { "al", "ah", "eax", "rax" }, 0 },
+ { { "bl", "bh", "ebx", "rbx" }, 3 },
+ { { "cl", "ch", "ecx", "rcx" }, 2 },
+ { { "dl", "dh", "edx", "rdx" }, 1 },
+ { { "esi", "rsi" }, 4 },
+ { { "edi", "rdi" }, 5 },
+ { { "esp", "rsp" }, 7 },
+ { { "ebp", "rbp" }, 6 },
};
// X86 target abstract base class; x86-32 and x86-64 are very close, so
// most of the implementation can be shared.
class X86TargetInfo : public TargetInfo {
enum X86SSEEnum {
- NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42
+ NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42
} SSELevel;
- enum AMD3DNowEnum {
- NoAMD3DNow, AMD3DNow, AMD3DNowAthlon
- } AMD3DNowLevel;
+ enum MMX3DNowEnum {
+ NoMMX3DNow, MMX, AMD3DNow, AMD3DNowAthlon
+ } MMX3DNowLevel;
bool HasAES;
bool HasAVX;
public:
X86TargetInfo(const std::string& triple)
- : TargetInfo(triple), SSELevel(NoMMXSSE), AMD3DNowLevel(NoAMD3DNow),
+ : TargetInfo(triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
HasAES(false), HasAVX(false) {
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
}
@@ -1116,8 +1137,13 @@ public:
}
virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
unsigned &NumAliases) const {
- Aliases = GCCRegAliases;
- NumAliases = llvm::array_lengthof(GCCRegAliases);
+ Aliases = 0;
+ NumAliases = 0;
+ }
+ virtual void getGCCAddlRegNames(const AddlRegName *&Names,
+ unsigned &NumNames) const {
+ Names = AddlRegNames;
+ NumNames = llvm::array_lengthof(AddlRegNames);
}
virtual bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const;
@@ -1133,6 +1159,9 @@ public:
virtual void getDefaultFeatures(const std::string &CPU,
llvm::StringMap<bool> &Features) const;
virtual void HandleTargetFeatures(std::vector<std::string> &Features);
+ virtual const char* getABI() const {
+ return MMX3DNowLevel == NoMMX3DNow ? "no-mmx" : "";
+ }
};
void X86TargetInfo::getDefaultFeatures(const std::string &CPU,
@@ -1164,31 +1193,38 @@ void X86TargetInfo::getDefaultFeatures(const std::string &CPU,
;
else if (CPU == "pentium-mmx" || CPU == "pentium2")
setFeatureEnabled(Features, "mmx", true);
- else if (CPU == "pentium3")
+ else if (CPU == "pentium3") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse", true);
- else if (CPU == "pentium-m" || CPU == "pentium4" || CPU == "x86-64")
+ } else if (CPU == "pentium-m" || CPU == "pentium4" || CPU == "x86-64") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse2", true);
- else if (CPU == "yonah" || CPU == "prescott" || CPU == "nocona")
+ } else if (CPU == "yonah" || CPU == "prescott" || CPU == "nocona") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse3", true);
- else if (CPU == "core2")
+ } else if (CPU == "core2") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "ssse3", true);
- else if (CPU == "penryn") {
+ } else if (CPU == "penryn") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse4", true);
Features["sse42"] = false;
- } else if (CPU == "atom")
+ } else if (CPU == "atom") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse3", true);
- else if (CPU == "corei7") {
+ } else if (CPU == "corei7") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse4", true);
setFeatureEnabled(Features, "aes", true);
} else if (CPU == "corei7-avx") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse4", true);
setFeatureEnabled(Features, "aes", true);
-// setFeatureEnabled(Features, "avx", true);
+ //setFeatureEnabled(Features, "avx", true);
} else if (CPU == "k6" || CPU == "winchip-c6")
setFeatureEnabled(Features, "mmx", true);
else if (CPU == "k6-2" || CPU == "k6-3" || CPU == "athlon" ||
CPU == "athlon-tbird" || CPU == "winchip2" || CPU == "c3") {
- setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "3dnow", true);
} else if (CPU == "athlon-4" || CPU == "athlon-xp" || CPU == "athlon-mp") {
setFeatureEnabled(Features, "sse", true);
@@ -1200,8 +1236,10 @@ void X86TargetInfo::getDefaultFeatures(const std::string &CPU,
} else if (CPU == "k8-sse3") {
setFeatureEnabled(Features, "sse3", true);
setFeatureEnabled(Features, "3dnowa", true);
- } else if (CPU == "c3-2")
+ } else if (CPU == "c3-2") {
+ setFeatureEnabled(Features, "mmx", true);
setFeatureEnabled(Features, "sse", true);
+ }
}
bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
@@ -1217,34 +1255,31 @@ bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
if (Name == "mmx")
Features["mmx"] = true;
else if (Name == "sse")
- Features["mmx"] = Features["sse"] = true;
+ Features["sse"] = true;
else if (Name == "sse2")
- Features["mmx"] = Features["sse"] = Features["sse2"] = true;
+ Features["sse"] = Features["sse2"] = true;
else if (Name == "sse3")
- Features["mmx"] = Features["sse"] = Features["sse2"] =
- Features["sse3"] = true;
+ Features["sse"] = Features["sse2"] = Features["sse3"] = true;
else if (Name == "ssse3")
- Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = true;
else if (Name == "sse4" || Name == "sse4.2")
- Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] = true;
else if (Name == "sse4.1")
- Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = true;
else if (Name == "3dnow")
- Features["3dnowa"] = true;
+ Features["mmx"] = Features["3dnow"] = true;
else if (Name == "3dnowa")
- Features["3dnow"] = Features["3dnowa"] = true;
+ Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = true;
else if (Name == "aes")
Features["aes"] = true;
else if (Name == "avx")
Features["avx"] = true;
} else {
if (Name == "mmx")
- Features["mmx"] = Features["3dnow"] = Features["3dnowa"] =
- Features["sse"] = Features["sse2"] = Features["sse3"] =
- Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = false;
else if (Name == "sse")
Features["sse"] = Features["sse2"] = Features["sse3"] =
Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
@@ -1302,18 +1337,25 @@ void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
.Case("sse3", SSE3)
.Case("sse2", SSE2)
.Case("sse", SSE1)
- .Case("mmx", MMX)
- .Default(NoMMXSSE);
+ .Default(NoSSE);
SSELevel = std::max(SSELevel, Level);
- AMD3DNowEnum ThreeDNowLevel =
- llvm::StringSwitch<AMD3DNowEnum>(Features[i].substr(1))
+ MMX3DNowEnum ThreeDNowLevel =
+ llvm::StringSwitch<MMX3DNowEnum>(Features[i].substr(1))
.Case("3dnowa", AMD3DNowAthlon)
.Case("3dnow", AMD3DNow)
- .Default(NoAMD3DNow);
+ .Case("mmx", MMX)
+ .Default(NoMMX3DNow);
- AMD3DNowLevel = std::max(AMD3DNowLevel, ThreeDNowLevel);
+ MMX3DNowLevel = std::max(MMX3DNowLevel, ThreeDNowLevel);
}
+
+ // Don't tell the backend if we're turning off mmx; it will end up disabling
+ // SSE, which we don't want.
+ std::vector<std::string>::iterator it;
+ it = std::find(Features.begin(), Features.end(), "-mmx");
+ if (it != Features.end())
+ Features.erase(it);
}
/// X86TargetInfo::getTargetDefines - Return a set of the X86-specific #defines
@@ -1368,9 +1410,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case SSE1:
Builder.defineMacro("__SSE__");
Builder.defineMacro("__SSE_MATH__"); // -mfp-math=sse always implied.
- case MMX:
- Builder.defineMacro("__MMX__");
- case NoMMXSSE:
+ case NoSSE:
break;
}
@@ -1392,12 +1432,14 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Each case falls through to the previous one here.
- switch (AMD3DNowLevel) {
+ switch (MMX3DNowLevel) {
case AMD3DNowAthlon:
Builder.defineMacro("__3dNOW_A__");
case AMD3DNow:
Builder.defineMacro("__3dNOW__");
- case NoAMD3DNow:
+ case MMX:
+ Builder.defineMacro("__MMX__");
+ case NoMMX3DNow:
break;
}
}
@@ -1647,6 +1689,65 @@ public:
};
} // end anonymous namespace
+// RTEMS Target
+template<typename Target>
+class RTEMSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // RTEMS defines; list based off of gcc output
+
+ // FIXME: Move version number handling to llvm::Triple.
+ llvm::StringRef Release = Triple.getOSName().substr(strlen("rtems"), 1);
+
+ Builder.defineMacro("__rtems__");
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ RTEMSTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ // this->MCountName = ".mcount";
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ // this->MCountName = "_mcount";
+ break;
+ case llvm::Triple::arm:
+ // this->MCountName = "__mcount";
+ break;
+ }
+
+ }
+};
+
+namespace {
+// x86-32 RTEMS target
+class RTEMSX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ RTEMSX86_32TargetInfo(const std::string& triple)
+ : X86_32TargetInfo(triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ this->UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__INTEL__");
+ Builder.defineMacro("__rtems__");
+ }
+};
+} // end anonymous namespace
+
namespace {
// x86-64 generic target
class X86_64TargetInfo : public X86TargetInfo {
@@ -1861,7 +1962,7 @@ public:
// Thumb1 add sp, #imm requires the immediate value be multiple of 4,
// so set preferred for small types to 32.
DescriptionString = ("e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
- "i64:32:32-f32:32:32-f64:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-"
"v64:32:64-v128:32:128-a0:0:32-n32");
} else {
DescriptionString = ("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
@@ -1882,11 +1983,6 @@ public:
void getDefaultFeatures(const std::string &CPU,
llvm::StringMap<bool> &Features) const {
- // FIXME: This should not be here.
- Features["vfp2"] = false;
- Features["vfp3"] = false;
- Features["neon"] = false;
-
if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore")
Features["vfp2"] = true;
else if (CPU == "cortex-a8" || CPU == "cortex-a9")
@@ -1896,12 +1992,8 @@ public:
virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
const std::string &Name,
bool Enabled) const {
- if (Name == "soft-float" || Name == "soft-float-abi") {
- Features[Name] = Enabled;
- } else if (Name == "vfp2" || Name == "vfp3" || Name == "neon") {
- // These effectively are a single option, reset them when any is enabled.
- if (Enabled)
- Features["vfp2"] = Features["vfp3"] = Features["neon"] = false;
+ if (Name == "soft-float" || Name == "soft-float-abi" ||
+ Name == "vfp2" || Name == "vfp3" || Name == "neon") {
Features[Name] = Enabled;
} else
return false;
@@ -2042,6 +2134,12 @@ public:
case 'q': // ...ARMV4 ldrsb
case 'v': // ...VFP load/store (reg+constant offset)
case 'y': // ...iWMMXt load/store
+ case 't': // address valid for load/store opaque types wider
+ // than 128-bits
+ case 'n': // valid address for Neon doubleword vector load/store
+ case 'm': // valid address for Neon element and structure load/store
+ case 's': // valid address for non-offset loads/stores of quad-word
+ // values in four ARM registers
Info.setAllowsMemory();
Name++;
return true;
@@ -2049,14 +2147,16 @@ public:
}
return false;
}
- std::string
- virtual convertConstraint(const char *&Constraint) const {
+ virtual std::string convertConstraint(const char *&Constraint) const {
std::string R;
switch (*Constraint) {
case 'U': // Two-character constraint; add "^" hint for later parsing.
R = std::string("^") + std::string(Constraint, 2);
Constraint++;
break;
+ case 'p': // 'p' should be translated to 'r' by default.
+ R = std::string("r");
+ break;
default:
return std::string(1, *Constraint);
}
@@ -2124,9 +2224,9 @@ void ARMTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
}
const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
- ALL_LANGUAGES, false },
+ ALL_LANGUAGES },
#include "clang/Basic/BuiltinsARM.def"
};
} // end anonymous namespace.
@@ -2727,45 +2827,81 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new LinuxTargetInfo<ARMTargetInfo>(T);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<ARMTargetInfo>(T);
default:
return new ARMTargetInfo(T);
}
case llvm::Triple::bfin:
+ if ( os == llvm::Triple::RTEMS )
+ return new RTEMSTargetInfo<BlackfinTargetInfo>(T);
return new BlackfinTargetInfo(T);
case llvm::Triple::msp430:
return new MSP430TargetInfo(T);
case llvm::Triple::mips:
- if (os == llvm::Triple::Psp)
+ switch (os) {
+ case llvm::Triple::Psp:
return new PSPTargetInfo<MipsTargetInfo>(T);
- if (os == llvm::Triple::Linux)
+ case llvm::Triple::Linux:
return new LinuxTargetInfo<MipsTargetInfo>(T);
- return new MipsTargetInfo(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<MipsTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<MipsTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<MipsTargetInfo>(T);
+ default:
+ return new MipsTargetInfo(T);
+ }
case llvm::Triple::mipsel:
- if (os == llvm::Triple::Psp)
+ switch (os) {
+ case llvm::Triple::Psp:
return new PSPTargetInfo<MipselTargetInfo>(T);
- if (os == llvm::Triple::Linux)
+ case llvm::Triple::Linux:
return new LinuxTargetInfo<MipselTargetInfo>(T);
- return new MipselTargetInfo(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<MipselTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<MipselTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<MipselTargetInfo>(T);
+ default:
+ return new MipsTargetInfo(T);
+ }
case llvm::Triple::ppc:
if (Triple.isOSDarwin())
return new DarwinPPC32TargetInfo(T);
- else if (os == llvm::Triple::FreeBSD)
+ switch (os) {
+ case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
- return new PPC32TargetInfo(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC32TargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<PPC32TargetInfo>(T);
+ default:
+ return new PPC32TargetInfo(T);
+ }
case llvm::Triple::ppc64:
if (Triple.isOSDarwin())
return new DarwinPPC64TargetInfo(T);
- else if (os == llvm::Triple::Lv2)
+ switch (os) {
+ case llvm::Triple::Lv2:
return new PS3PPUTargetInfo<PPC64TargetInfo>(T);
- else if (os == llvm::Triple::FreeBSD)
+ case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<PPC64TargetInfo>(T);
- return new PPC64TargetInfo(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC64TargetInfo>(T);
+ default:
+ return new PPC64TargetInfo(T);
+ }
case llvm::Triple::ptx32:
return new PTX32TargetInfo(T);
@@ -2776,11 +2912,18 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new MBlazeTargetInfo(T);
case llvm::Triple::sparc:
- if (os == llvm::Triple::AuroraUX)
+ switch (os) {
+ case llvm::Triple::AuroraUX:
return new AuroraUXSparcV8TargetInfo(T);
- if (os == llvm::Triple::Solaris)
+ case llvm::Triple::Solaris:
return new SolarisSparcV8TargetInfo(T);
- return new SparcV8TargetInfo(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<SparcV8TargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<SparcV8TargetInfo>(T);
+ default:
+ return new SparcV8TargetInfo(T);
+ }
// FIXME: Need a real SPU target.
case llvm::Triple::cellspu:
@@ -2821,6 +2964,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new VisualStudioWindowsX86_32TargetInfo(T);
case llvm::Triple::Haiku:
return new HaikuX86_32TargetInfo(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSX86_32TargetInfo(T);
default:
return new X86_32TargetInfo(T);
}
@@ -2910,7 +3055,8 @@ TargetInfo *TargetInfo::CreateTargetInfo(Diagnostic &Diags,
Opts.Features.clear();
for (llvm::StringMap<bool>::const_iterator it = Features.begin(),
ie = Features.end(); it != ie; ++it)
- Opts.Features.push_back(std::string(it->second ? "+" : "-") + it->first());
+ Opts.Features.push_back(std::string(it->second ? "+" : "-") +
+ it->first().str());
Target->HandleTargetFeatures(Opts.Features);
return Target.take();
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index b4574344bc5f..7b46c6040a9c 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -7,6 +7,7 @@ add_subdirectory(Sema)
add_subdirectory(CodeGen)
add_subdirectory(Analysis)
add_subdirectory(Rewrite)
+add_subdirectory(ARCMigrate)
add_subdirectory(Driver)
add_subdirectory(Serialization)
add_subdirectory(Frontend)
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index ce1039849b7c..138123816c3e 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -68,22 +68,22 @@ namespace clang {
private:
Kind TheKind;
- llvm::PATypeHolder TypeData;
+ llvm::Type *TypeData;
unsigned UIntData;
bool BoolData0;
bool BoolData1;
- ABIArgInfo(Kind K, const llvm::Type *TD=0,
+ ABIArgInfo(Kind K, llvm::Type *TD=0,
unsigned UI=0, bool B0 = false, bool B1 = false)
: TheKind(K), TypeData(TD), UIntData(UI), BoolData0(B0), BoolData1(B1) {}
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) {
+ static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0) {
return ABIArgInfo(Direct, T, Offset);
}
- static ABIArgInfo getExtend(const llvm::Type *T = 0) {
+ static ABIArgInfo getExtend(llvm::Type *T = 0) {
return ABIArgInfo(Extend, T, 0);
}
static ABIArgInfo getIgnore() {
@@ -113,12 +113,12 @@ namespace clang {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
return UIntData;
}
- const llvm::Type *getCoerceToType() const {
+ llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
}
- void setCoerceToType(const llvm::Type *T) {
+ void setCoerceToType(llvm::Type *T) {
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 01d15ff7f7fd..85f42db81f59 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -10,6 +10,7 @@
#include "clang/CodeGen/BackendUtil.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Module.h"
@@ -18,13 +19,13 @@
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/PassManagerBuilder.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/SubtargetFeature.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -39,6 +40,7 @@ class EmitAssemblyHelper {
Diagnostic &Diags;
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
Module *TheModule;
Timer CodeGenerationTime;
@@ -82,8 +84,9 @@ private:
public:
EmitAssemblyHelper(Diagnostic &_Diags,
const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ const LangOptions &LOpts,
Module *M)
- : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts),
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
TheModule(M), CodeGenerationTime("Code Generation Time"),
CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
@@ -98,6 +101,16 @@ public:
}
+static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCExpandPass());
+}
+
+static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCOptPass());
+}
+
void EmitAssemblyHelper::CreatePasses() {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
@@ -116,6 +129,14 @@ void EmitAssemblyHelper::CreatePasses() {
PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls;
PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+
+ // In ObjC ARC mode, add the main ARC optimization passes.
+ if (LangOpts.ObjCAutoRefCount) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addObjCARCOptPass);
+ }
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
@@ -258,16 +279,16 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
const_cast<char **>(&BackendArgs[0]));
std::string FeaturesStr;
- if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
+ if (TargetOpts.Features.size()) {
SubtargetFeatures Features;
- Features.setCPU(TargetOpts.CPU);
for (std::vector<std::string>::const_iterator
it = TargetOpts.Features.begin(),
ie = TargetOpts.Features.end(); it != ie; ++it)
Features.AddFeature(*it);
FeaturesStr = Features.getString();
}
- TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
+ FeaturesStr);
if (CodeGenOpts.RelaxAll)
TM->setMCRelaxAll(true);
@@ -275,6 +296,8 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TM->setMCSaveTempLabels(true);
if (CodeGenOpts.NoDwarf2CFIAsm)
TM->setMCUseCFI(false);
+ if (CodeGenOpts.NoExecStack)
+ TM->setMCNoExecStack(true);
// Create the code generator passes.
PassManager *PM = getCodeGenPasses();
@@ -295,6 +318,13 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
CGFT = TargetMachine::CGFT_Null;
else
assert(Action == Backend_EmitAssembly && "Invalid action!");
+
+ // Add ObjC ARC final-cleanup optimizations. This is done as part of the
+ // "codegen" passes so that it isn't run multiple times when there is
+ // inlining happening.
+ if (LangOpts.ObjCAutoRefCount)
+ PM->add(createObjCARCContractPass());
+
if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
/*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
Diags.Report(diag::err_fe_unable_to_interface_with_target);
@@ -357,9 +387,11 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
}
void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
- const TargetOptions &TOpts, Module *M,
+ const TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ Module *M,
BackendAction Action, raw_ostream *OS) {
- EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M);
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
AsmHelper.EmitAssembly(Action, OS);
}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index e5da703a61b2..9815d1d4ef4d 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -95,9 +95,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
else
elements.push_back(llvm::Constant::getNullValue(i8p));
- llvm::Constant *init =
- llvm::ConstantStruct::get(CGM.getLLVMContext(), elements.data(),
- elements.size(), false);
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
llvm::GlobalVariable *global =
new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
@@ -166,11 +164,11 @@ namespace {
CharUnits Alignment;
CharUnits Size;
const BlockDecl::Capture *Capture; // null for 'this'
- const llvm::Type *Type;
+ llvm::Type *Type;
BlockLayoutChunk(CharUnits align, CharUnits size,
const BlockDecl::Capture *capture,
- const llvm::Type *type)
+ llvm::Type *type)
: Alignment(align), Size(size), Capture(capture), Type(type) {}
/// Tell the block info that this chunk has the given field index.
@@ -245,7 +243,7 @@ static CharUnits getLowBit(CharUnits v) {
}
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
- llvm::SmallVectorImpl<const llvm::Type*> &elementTypes) {
+ llvm::SmallVectorImpl<llvm::Type*> &elementTypes) {
ASTContext &C = CGM.getContext();
// The header is basically a 'struct { void *; int; int; void *; void *; }'.
@@ -265,8 +263,8 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
info.BlockSize = headerSize;
assert(elementTypes.empty());
- const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
- const llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
elementTypes.push_back(i8p);
elementTypes.push_back(intTy);
elementTypes.push_back(intTy);
@@ -282,7 +280,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
ASTContext &C = CGM.getContext();
const BlockDecl *block = info.getBlockDecl();
- llvm::SmallVector<const llvm::Type*, 8> elementTypes;
+ llvm::SmallVector<llvm::Type*, 8> elementTypes;
initializeForBlockHeader(CGM, info, elementTypes);
if (!block->hasCaptures()) {
@@ -310,7 +308,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
else
thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
- const llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
std::pair<CharUnits,CharUnits> tinfo
= CGM.getContext().getTypeInfoInChars(thisType);
maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
@@ -330,7 +328,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// Just use void* instead of a pointer to the byref type.
QualType byRefPtrTy = C.VoidPtrTy;
- const llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
std::pair<CharUnits,CharUnits> tinfo
= CGM.getContext().getTypeInfoInChars(byRefPtrTy);
maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
@@ -347,13 +345,23 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
continue;
}
- // Block pointers require copy/dispose.
- if (variable->getType()->isBlockPointerType()) {
- info.NeedsCopyDispose = true;
+ // If we have a lifetime qualifier, honor it for capture purposes.
+ // That includes *not* copying it if it's __unsafe_unretained.
+ if (Qualifiers::ObjCLifetime lifetime
+ = variable->getType().getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ info.NeedsCopyDispose = true;
+ }
- // So do Objective-C pointers.
- } else if (variable->getType()->isObjCObjectPointerType() ||
- C.isObjCNSObjectType(variable->getType())) {
+ // Block pointers require copy/dispose. So do Objective-C pointers.
+ } else if (variable->getType()->isObjCRetainableType()) {
info.NeedsCopyDispose = true;
// So do types that require non-trivial copy construction.
@@ -376,7 +384,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
CharUnits align = C.getDeclAlign(variable);
maxFieldAlign = std::max(maxFieldAlign, align);
- const llvm::Type *llvmType =
+ llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(variable->getType());
layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
@@ -591,6 +599,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Otherwise, fake up a POD copy into the block field.
} else {
+ // Fake up a new variable so that EmitScalarInit doesn't think
+ // we're referring to the variable in its own initializer.
+ ImplicitParamDecl blockFieldPseudoVar(/*DC*/ 0, SourceLocation(),
+ /*name*/ 0, type);
+
// We use one of these or the other depending on whether the
// reference is nested.
DeclRefExpr notNested(const_cast<VarDecl*>(variable), type, VK_LValue,
@@ -603,15 +616,37 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
declRef, VK_RValue);
- EmitExprAsInit(&l2r, variable, blockField,
- getContext().getDeclAlign(variable),
+ EmitExprAsInit(&l2r, &blockFieldPseudoVar,
+ LValue::MakeAddr(blockField, type,
+ getContext().getDeclAlign(variable)
+ .getQuantity(),
+ getContext()),
/*captured by init*/ false);
}
// Push a destructor if necessary. The semantics for when this
// actually gets run are really obscure.
- if (!ci->isByRef() && CGM.getLangOptions().CPlusPlus)
- PushDestructorCleanup(type, blockField);
+ if (!ci->isByRef()) {
+ switch (QualType::DestructionKind dtorKind = type.isDestructedType()) {
+ case QualType::DK_none:
+ break;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ case QualType::DK_objc_strong_lifetime: {
+ // This local is a GCC and MSVC compiler workaround.
+ Destroyer *destroyer = &destroyARCStrongImprecise;
+ pushDestroy(getCleanupKind(dtorKind), blockField, type,
+ *destroyer, /*useEHCleanupForArray*/ false);
+ break;
+ }
+
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_cxx_destructor:
+ pushDestroy(dtorKind, blockField, type);
+ break;
+ }
+ }
}
// Cast to the converted block-pointer type, which happens (somewhat
@@ -624,11 +659,11 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
}
-const llvm::Type *CodeGenModule::getBlockDescriptorType() {
+llvm::Type *CodeGenModule::getBlockDescriptorType() {
if (BlockDescriptorType)
return BlockDescriptorType;
- const llvm::Type *UnsignedLongTy =
+ llvm::Type *UnsignedLongTy =
getTypes().ConvertType(getContext().UnsignedLongTy);
// struct __block_descriptor {
@@ -645,24 +680,20 @@ const llvm::Type *CodeGenModule::getBlockDescriptorType() {
// const char *signature; // the block signature
// const char *layout; // reserved
// };
- BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
- UnsignedLongTy,
- UnsignedLongTy,
- NULL);
-
- getModule().addTypeName("struct.__block_descriptor",
- BlockDescriptorType);
+ BlockDescriptorType =
+ llvm::StructType::createNamed("struct.__block_descriptor",
+ UnsignedLongTy, UnsignedLongTy, NULL);
// Now form a pointer to that.
BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
return BlockDescriptorType;
}
-const llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
+llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
if (GenericBlockLiteralType)
return GenericBlockLiteralType;
- const llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
+ llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
// struct __block_literal_generic {
// void *__isa;
@@ -671,16 +702,14 @@ const llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
// };
- GenericBlockLiteralType = llvm::StructType::get(getLLVMContext(),
- VoidPtrTy,
- IntTy,
- IntTy,
- VoidPtrTy,
- BlockDescPtrTy,
- NULL);
-
- getModule().addTypeName("struct.__block_literal_generic",
- GenericBlockLiteralType);
+ GenericBlockLiteralType =
+ llvm::StructType::createNamed("struct.__block_literal_generic",
+ VoidPtrTy,
+ IntTy,
+ IntTy,
+ VoidPtrTy,
+ BlockDescPtrTy,
+ NULL);
return GenericBlockLiteralType;
}
@@ -822,9 +851,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
// Descriptor
fields[4] = buildBlockDescriptor(CGM, blockInfo);
- llvm::Constant *init =
- llvm::ConstantStruct::get(CGM.getLLVMContext(), fields, BlockHeaderSize,
- /*packed*/ false);
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
llvm::GlobalVariable *literal =
new llvm::GlobalVariable(CGM.getModule(),
@@ -1023,8 +1050,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
-
-
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
ASTContext &C = getContext();
@@ -1084,21 +1109,40 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
if (capture.isConstant()) continue;
const Expr *copyExpr = ci->getCopyExpr();
- unsigned flags = 0;
+ BlockFieldFlags flags;
+
+ bool isARCWeakCapture = false;
if (copyExpr) {
assert(!ci->isByRef());
// don't bother computing flags
+
} else if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- flags = BLOCK_FIELD_IS_BLOCK;
- } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ } else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- }
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
- if (!copyExpr && !flags) continue;
+ // Don't generate special copy logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
unsigned index = capture.getIndex();
llvm::Value *srcField = Builder.CreateStructGEP(src, index);
@@ -1107,12 +1151,14 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// If there's an explicit copy expression, we do that.
if (copyExpr) {
EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else if (isARCWeakCapture) {
+ EmitARCCopyWeak(dstField, srcField);
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
- llvm::ConstantInt::get(Int32Ty, flags));
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
}
}
@@ -1176,20 +1222,37 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
BlockFieldFlags flags;
const CXXDestructorDecl *dtor = 0;
+ bool isARCWeakCapture = false;
+
if (ci->isByRef()) {
flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- flags = BLOCK_FIELD_IS_BLOCK;
- } else if (type->isObjCObjectPointerType() || C.isObjCNSObjectType(type)) {
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ if (record->hasTrivialDestructor())
+ continue;
+ dtor = record->getDestructor();
+ } else if (type->isObjCRetainableType()) {
flags = BLOCK_FIELD_IS_OBJECT;
- } else if (C.getLangOptions().CPlusPlus) {
- if (const CXXRecordDecl *record = type->getAsCXXRecordDecl())
- if (!record->hasTrivialDestructor())
- dtor = record->getDestructor();
- }
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
- if (!dtor && flags.empty()) continue;
+ // Special rules for ARC captures.
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special dispose logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
unsigned index = capture.getIndex();
llvm::Value *srcField = Builder.CreateStructGEP(src, index);
@@ -1198,6 +1261,10 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
if (dtor) {
PushDestructorCleanup(dtor, srcField);
+ // If this is a __weak capture, emit the release directly.
+ } else if (isARCWeakCapture) {
+ EmitARCDestroyWeak(srcField);
+
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
// that things were done in reverse.
@@ -1251,6 +1318,55 @@ public:
}
};
+/// Emits the copy/dispose helpers for an ARC __block __weak variable.
+class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ CGF.EmitARCMoveWeak(destField, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ CGF.EmitARCDestroyWeak(field);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 0 is distinguishable from all pointers and byref flags
+ id.AddInteger(0);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong variable
+/// that's not of block-pointer type.
+class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do a "move" by copying the value and then zeroing out the old
+ // variable.
+
+ llvm::Value *value = CGF.Builder.CreateLoad(srcField);
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
+ CGF.Builder.CreateStore(value, destField);
+ CGF.Builder.CreateStore(null, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 1 is distinguishable from all pointers and byref flags
+ id.AddInteger(1);
+ }
+};
+
/// Emits the copy/dispose helpers for a __block variable with a
/// nontrivial copy constructor or destructor.
class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
@@ -1318,6 +1434,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SC_Static,
SC_None,
false, true);
+
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
@@ -1449,6 +1566,52 @@ CodeGenFunction::buildByrefHelpers(const llvm::StructType &byrefType,
return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
}
+ // Otherwise, if we don't have a retainable type, there's nothing to do.
+ // that the runtime does extra copies.
+ if (!type->isObjCRetainableType()) return 0;
+
+ Qualifiers qs = type.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ assert(getLangOptions().ObjCAutoRefCount);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return 0;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak: {
+ ARCWeakByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ // Block-pointers need to be _Block_copy'ed, so we let the
+ // runtime be in charge. But we can't use the code below
+ // because we don't want to set BYREF_CALLER, which will
+ // just make the runtime ignore us.
+ if (type->isBlockPointerType()) {
+ BlockFieldFlags flags = BLOCK_FIELD_IS_BLOCK;
+ ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+
+ // Otherwise, we transfer ownership of the retain from the stack
+ // to the heap.
+ } else {
+ ARCStrongByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+
BlockFieldFlags flags;
if (type->isBlockPointerType()) {
flags |= BLOCK_FIELD_IS_BLOCK;
@@ -1502,15 +1665,17 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
QualType Ty = D->getType();
- llvm::SmallVector<const llvm::Type *, 8> types;
+ llvm::SmallVector<llvm::Type *, 8> types;
- llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
+ llvm::StructType *ByRefType =
+ llvm::StructType::createNamed(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
// void *__isa;
types.push_back(Int8PtrTy);
// void *__forwarding;
- types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+ types.push_back(llvm::PointerType::getUnqual(ByRefType));
// int32_t __flags;
types.push_back(Int32Ty);
@@ -1545,7 +1710,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
if (NumPaddingBytes > 0) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
// FIXME: We need a sema error for alignment larger than the minimum of
// the maximal stack alignment and the alignment of malloc on the system.
if (NumPaddingBytes > 1)
@@ -1561,13 +1726,9 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
// T x;
types.push_back(ConvertTypeForMem(Ty));
- const llvm::Type *T = llvm::StructType::get(getLLVMContext(), types, Packed);
-
- cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
- CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
- ByRefTypeHolder.get());
+ ByRefType->setBody(types, Packed);
- Info.first = ByRefTypeHolder.get();
+ Info.first = ByRefType;
Info.second = types.size() - 1;
@@ -1638,7 +1799,8 @@ namespace {
llvm::Value *Addr;
CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Should we be passing FIELD_IS_WEAK here?
CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
}
};
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 9bd18e5fde74..4d8dead2be8c 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -17,7 +17,6 @@
#include "CodeGenTypes.h"
#include "clang/AST/Type.h"
#include "llvm/Module.h"
-#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
@@ -89,7 +88,7 @@ enum BlockFieldFlag_t {
variable */
BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
helpers */
-
+ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
BLOCK_BYREF_CURRENT_MAX = 256
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 14bebaf3c1cb..1566bd9e6697 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -22,6 +22,7 @@
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
+
using namespace clang;
using namespace CodeGen;
using namespace llvm;
@@ -37,8 +38,7 @@ static void EmitMemoryBarrier(CodeGenFunction &CGF,
StoreLoad ? True : False,
StoreStore ? True : False,
Device ? True : False };
- CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier),
- C, C + 5);
+ CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), C);
}
/// Emit the conversions required to turn the given value into an
@@ -68,14 +68,14 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
// The atomic builtins are also full memory barriers. This is a utility for
// wrapping a call to the builtins with memory barriers.
static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
- Value **ArgBegin, Value **ArgEnd) {
+ ArrayRef<Value *> Args) {
// FIXME: We need a target hook for whether this applies to device memory or
// not.
bool Device = true;
// Create barriers both before and after the call.
EmitMemoryBarrier(CGF, true, true, true, true, Device);
- Value *Result = CGF.Builder.CreateCall(Fn, ArgBegin, ArgEnd);
+ Value *Result = CGF.Builder.CreateCall(Fn, Args);
EmitMemoryBarrier(CGF, true, true, true, true, Device);
return Result;
}
@@ -94,13 +94,13 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -108,7 +108,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
const llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
@@ -129,13 +129,13 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
@@ -143,7 +143,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
+ llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
@@ -164,7 +164,8 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
}
// The prototype is something that takes and returns whatever V's type is.
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
+ llvm::Type *ArgTys[] = { V->getType() };
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), ArgTys,
false);
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
@@ -232,8 +233,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
@@ -247,8 +248,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
@@ -263,8 +264,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
@@ -283,8 +284,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
@@ -300,8 +301,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
@@ -311,25 +312,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
case Builtin::BI__builtin_expect: {
- // FIXME: pass expect through to LLVM
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- if (E->getArg(1)->HasSideEffects(getContext()))
- (void)EmitScalarExpr(E->getArg(1));
- return RValue::get(ArgValue);
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+
+ Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
+ "expval");
+ return RValue::get(Result);
}
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgType = ArgValue->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
}
case Builtin::BI__builtin_object_size: {
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
- const llvm::Type *ResType[] = {
- ConvertType(E->getType())
- };
+ llvm::Type *ResType = ConvertType(E->getType());
// LLVM only supports 0 and 2, make sure that we pass along that
// as a boolean.
@@ -339,7 +342,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
uint64_t val = CI->getZExtValue();
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
- Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
return RValue::get(Builder.CreateCall2(F,
EmitScalarExpr(E->getArg(0)),
CI));
@@ -351,11 +354,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(Int32Ty, 3);
- Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
- return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+ Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
}
case Builtin::BI__builtin_trap: {
- Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::trap);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_unreachable: {
@@ -375,8 +379,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_powil: {
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ArgType = Base->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
}
@@ -630,20 +634,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// this instead of hard-coding 0, which is correct for most targets.
int32_t Offset = 0;
- Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
- Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
- Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
@@ -681,8 +685,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
? Intrinsic::eh_return_i32
- : Intrinsic::eh_return_i64,
- 0, 0);
+ : Intrinsic::eh_return_i64);
Builder.CreateCall2(F, Int, Ptr);
Builder.CreateUnreachable();
@@ -692,7 +695,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
case Builtin::BI__builtin_unwind_init: {
- Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_extend_pointer: {
@@ -860,13 +863,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes, 2);
+ IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -875,7 +878,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args[1] = EmitToInt(*this, Args[1], T, IntType);
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
- Value *Result = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *Result = EmitCallWithBarrier(*this, AtomF, Args);
Result = EmitFromInt(*this, Result, T, ValueType);
return RValue::get(Result);
}
@@ -890,13 +893,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned AddrSpace =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::IntegerType *IntType =
+ llvm::IntegerType *IntType =
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
- const llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- const llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes, 2);
+ IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -904,7 +907,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
Value *OldVal = Args[1];
- Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
+ Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
@@ -953,7 +956,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
EmitScalarExpr(E->getArg(3)),
EmitScalarExpr(E->getArg(4))
};
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C);
return RValue::get(0);
}
@@ -977,11 +980,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ArgType = Base->getType();
- Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
}
+ case Builtin::BIfma:
+ case Builtin::BIfmaf:
+ case Builtin::BIfmal:
+ case Builtin::BI__builtin_fma:
+ case Builtin::BI__builtin_fmaf:
+ case Builtin::BI__builtin_fmal: {
+ // Rewrite fma to intrinsic.
+ Value *FirstArg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = FirstArg->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
+ return RValue::get(Builder.CreateCall3(F, FirstArg,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ "tmp"));
+ }
+
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl: {
@@ -1055,7 +1074,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args.push_back(ArgValue);
}
- Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
@@ -1099,8 +1118,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
}
-static const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type,
- bool q) {
+static llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
switch (type) {
default: break;
case 0:
@@ -1133,7 +1151,7 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
- return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
+ return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
@@ -1181,8 +1199,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
llvm::StringRef Name = FD->getName();
- return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
- Ops.begin(), Ops.end());
+ return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
@@ -1203,8 +1220,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (BuiltinID == ARM::BI__builtin_arm_strexd) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
- llvm::Type *STy = llvm::StructType::get(getLLVMContext(), Int32Ty, Int32Ty,
- NULL);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp");
@@ -1232,7 +1248,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
- const llvm::Type *Ty;
+ llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
Ty = llvm::Type::getFloatTy(getLLVMContext());
else
@@ -1243,8 +1259,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
// Call the appropriate intrinsic.
- Function *F = CGM.getIntrinsic(Int, &Ty, 1);
- return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr");
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ return Builder.CreateCall(F, Ops, "vcvtr");
}
// Determine the type of this overloaded NEON intrinsic.
@@ -1255,8 +1271,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
(void)poly; // Only used in assert()s.
bool rightShift = false;
- const llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
- const llvm::Type *Ty = VTy;
+ llvm::VectorType *VTy = GetNeonType(getLLVMContext(), type & 0x7, quad);
+ llvm::Type *Ty = VTy;
if (!Ty)
return 0;
@@ -1266,13 +1282,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
case ARM::BI__builtin_neon_vabs_v:
case ARM::BI__builtin_neon_vabsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
Ops, "vabs");
case ARM::BI__builtin_neon_vaddhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty),
Ops, "vaddhn");
case ARM::BI__builtin_neon_vcale_v:
std::swap(Ops[0], Ops[1]);
@@ -1300,17 +1316,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vcls_v:
case ARM::BI__builtin_neon_vclsq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
return EmitNeonCall(F, Ops, "vcls");
}
case ARM::BI__builtin_neon_vclz_v:
case ARM::BI__builtin_neon_vclzq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty);
return EmitNeonCall(F, Ops, "vclz");
}
case ARM::BI__builtin_neon_vcnt_v:
case ARM::BI__builtin_neon_vcntq_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty);
return EmitNeonCall(F, Ops, "vcnt");
}
case ARM::BI__builtin_neon_vcvt_f16_v: {
@@ -1340,18 +1356,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vcvt_n_f32_v:
case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
- const llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
+ llvm::Type *Tys[2] = { GetNeonType(getLLVMContext(), 4, quad), Ty };
Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
- Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case ARM::BI__builtin_neon_vcvt_n_s32_v:
case ARM::BI__builtin_neon_vcvt_n_u32_v:
case ARM::BI__builtin_neon_vcvtq_n_s32_v:
case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
- const llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
+ llvm::Type *Tys[2] = { Ty, GetNeonType(getLLVMContext(), 4, quad) };
Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
- Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case ARM::BI__builtin_neon_vext_v:
@@ -1381,15 +1397,15 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vhadd_v:
case ARM::BI__builtin_neon_vhaddq_v:
Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
case ARM::BI__builtin_neon_vhsub_v:
case ARM::BI__builtin_neon_vhsubq_v:
Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1_lane_v:
case ARM::BI__builtin_neon_vld1q_lane_v:
@@ -1410,7 +1426,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1419,7 +1435,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1428,7 +1444,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1437,36 +1453,42 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
case ARM::BI__builtin_neon_vld2_lane_v:
case ARM::BI__builtin_neon_vld2q_lane_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case ARM::BI__builtin_neon_vld3_lane_v:
case ARM::BI__builtin_neon_vld3q_lane_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case ARM::BI__builtin_neon_vld4_lane_v:
case ARM::BI__builtin_neon_vld4q_lane_v: {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F,
+ ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
+ "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1488,7 +1510,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
break;
default: assert(0 && "unknown vld_dup intrinsic?");
}
- Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Int, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
@@ -1507,7 +1529,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
break;
default: assert(0 && "unknown vld_dup intrinsic?");
}
- Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ Function *F = CGM.getIntrinsic(Int, Ty);
const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
@@ -1518,7 +1540,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Args.push_back(CI);
Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Value *Val = Builder.CreateExtractValue(Ops[1], i);
@@ -1534,11 +1556,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vmax_v:
case ARM::BI__builtin_neon_vmaxq_v:
Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
case ARM::BI__builtin_neon_vmin_v:
case ARM::BI__builtin_neon_vminq_v:
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case ARM::BI__builtin_neon_vmovl_v: {
const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
@@ -1554,12 +1576,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vmul_v:
case ARM::BI__builtin_neon_vmulq_v:
assert(poly && "vmul builtin only supported for polynomial types");
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
Ops, "vmul");
case ARM::BI__builtin_neon_vmull_v:
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmull");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v: {
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
@@ -1567,13 +1589,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
const llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- const llvm::Type *NarrowTy =
+ llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
- const llvm::Type *Tys[2] = { Ty, NarrowTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpadal");
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
}
case ARM::BI__builtin_neon_vpadd_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
Ops, "vpadd");
case ARM::BI__builtin_neon_vpaddl_v:
case ARM::BI__builtin_neon_vpaddlq_v: {
@@ -1581,120 +1603,120 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
- const llvm::Type *NarrowTy =
+ llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
- const llvm::Type *Tys[2] = { Ty, NarrowTy };
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys, 2), Ops, "vpaddl");
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
case ARM::BI__builtin_neon_vpmax_v:
Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
case ARM::BI__builtin_neon_vpmin_v:
Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
case ARM::BI__builtin_neon_vqabs_v:
case ARM::BI__builtin_neon_vqabsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
Ops, "vqabs");
case ARM::BI__builtin_neon_vqadd_v:
case ARM::BI__builtin_neon_vqaddq_v:
Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
case ARM::BI__builtin_neon_vqdmlal_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty),
Ops, "vqdmlal");
case ARM::BI__builtin_neon_vqdmlsl_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty),
Ops, "vqdmlsl");
case ARM::BI__builtin_neon_vqdmulh_v:
case ARM::BI__builtin_neon_vqdmulhq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
Ops, "vqdmulh");
case ARM::BI__builtin_neon_vqdmull_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
Ops, "vqdmull");
case ARM::BI__builtin_neon_vqmovn_v:
Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
case ARM::BI__builtin_neon_vqmovun_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
Ops, "vqdmull");
case ARM::BI__builtin_neon_vqneg_v:
case ARM::BI__builtin_neon_vqnegq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
Ops, "vqneg");
case ARM::BI__builtin_neon_vqrdmulh_v:
case ARM::BI__builtin_neon_vqrdmulhq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
Ops, "vqrdmulh");
case ARM::BI__builtin_neon_vqrshl_v:
case ARM::BI__builtin_neon_vqrshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
case ARM::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n",
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case ARM::BI__builtin_neon_vqrshrun_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
case ARM::BI__builtin_neon_vqshl_v:
case ARM::BI__builtin_neon_vqshlq_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
case ARM::BI__builtin_neon_vqshl_n_v:
case ARM::BI__builtin_neon_vqshlq_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n",
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
case ARM::BI__builtin_neon_vqshlu_n_v:
case ARM::BI__builtin_neon_vqshluq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
Ops, "vqshlu", 1, false);
case ARM::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n",
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
1, true);
case ARM::BI__builtin_neon_vqshrun_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
Ops, "vqshrun_n", 1, true);
case ARM::BI__builtin_neon_vqsub_v:
case ARM::BI__builtin_neon_vqsubq_v:
Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
case ARM::BI__builtin_neon_vraddhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
Ops, "vraddhn");
case ARM::BI__builtin_neon_vrecpe_v:
case ARM::BI__builtin_neon_vrecpeq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
Ops, "vrecpe");
case ARM::BI__builtin_neon_vrecps_v:
case ARM::BI__builtin_neon_vrecpsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
Ops, "vrecps");
case ARM::BI__builtin_neon_vrhadd_v:
case ARM::BI__builtin_neon_vrhaddq_v:
Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
case ARM::BI__builtin_neon_vrshl_v:
case ARM::BI__builtin_neon_vrshlq_v:
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
case ARM::BI__builtin_neon_vrshrn_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
case ARM::BI__builtin_neon_vrshr_n_v:
case ARM::BI__builtin_neon_vrshrq_n_v:
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", 1, true);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
case ARM::BI__builtin_neon_vrsqrte_v:
case ARM::BI__builtin_neon_vrsqrteq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
Ops, "vrsqrte");
case ARM::BI__builtin_neon_vrsqrts_v:
case ARM::BI__builtin_neon_vrsqrtsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
Ops, "vrsqrts");
case ARM::BI__builtin_neon_vrsra_n_v:
case ARM::BI__builtin_neon_vrsraq_n_v:
@@ -1702,10 +1724,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]);
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case ARM::BI__builtin_neon_vrsubhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
Ops, "vrsubhn");
case ARM::BI__builtin_neon_vset_lane_i8:
case ARM::BI__builtin_neon_vset_lane_i16:
@@ -1722,16 +1744,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vshl_v:
case ARM::BI__builtin_neon_vshlq_v:
Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
case ARM::BI__builtin_neon_vshll_n_v:
Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", 1);
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
case ARM::BI__builtin_neon_vshl_n_v:
case ARM::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
case ARM::BI__builtin_neon_vshrn_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
Ops, "vshrn_n", 1, true);
case ARM::BI__builtin_neon_vshr_n_v:
case ARM::BI__builtin_neon_vshrq_n_v:
@@ -1747,7 +1769,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vsli_n_v:
case ARM::BI__builtin_neon_vsliq_n_v:
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
Ops, "vsli_n");
case ARM::BI__builtin_neon_vsra_n_v:
case ARM::BI__builtin_neon_vsraq_n_v:
@@ -1762,7 +1784,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst1_lane_v:
case ARM::BI__builtin_neon_vst1q_lane_v:
@@ -1773,35 +1795,35 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty),
Ops, "vsubhn");
case ARM::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
@@ -2005,7 +2027,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
@@ -2065,15 +2087,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
case X86::BI__builtin_ia32_cmpps: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ return Builder.CreateCall(F, Ops, "cmpps");
}
case X86::BI__builtin_ia32_cmpss: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ return Builder.CreateCall(F, Ops, "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
const llvm::Type *PtrTy = Int8PtrTy;
@@ -2093,11 +2115,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_cmppd: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ return Builder.CreateCall(F, Ops, "cmppd");
}
case X86::BI__builtin_ia32_cmpsd: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ return Builder.CreateCall(F, Ops, "cmpsd");
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
@@ -2141,7 +2163,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -2171,7 +2193,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+ return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -2323,7 +2345,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, Ops, name);
}
}
}
@@ -2379,7 +2401,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ return Builder.CreateCall(F, Ops, "");
}
// vec_st
@@ -2412,7 +2434,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ return Builder.CreateCall(F, Ops, "");
}
}
return 0;
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 92f1c63a3829..dcc28b45cfc3 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -34,7 +34,7 @@ static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
}
-const llvm::Type *
+llvm::Type *
CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index de4df3dcbe84..29f299a43e37 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -82,7 +82,7 @@ public:
/// Find the LLVM type used to represent the given member pointer
/// type.
- virtual const llvm::Type *
+ virtual llvm::Type *
ConvertMemberPointerType(const MemberPointerType *MPT);
/// Load a member function from an object and a member function
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 712ae89a4888..f8783ad08d00 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1,4 +1,4 @@
-//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -25,6 +25,8 @@
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Transforms/Utils/Local.h"
using namespace clang;
using namespace CodeGen;
@@ -65,31 +67,28 @@ static CanQualType GetReturnType(QualType RetTy) {
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
- bool IsRecursive) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
llvm::SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo(), IsRecursive);
+ FTNP->getExtInfo());
}
/// \param Args - contains any initial parameters besides those
/// in the formal type
static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
llvm::SmallVectorImpl<CanQualType> &ArgTys,
- CanQual<FunctionProtoType> FTP,
- bool IsRecursive = false) {
+ CanQual<FunctionProtoType> FTP) {
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
+ return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
- bool IsRecursive) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
llvm::SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
+ return ::getFunctionInfo(*this, ArgTys, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -189,13 +188,15 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
e = MD->param_end(); i != e; ++i) {
ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
- return getFunctionInfo(GetReturnType(MD->getResultType()),
- ArgTys,
- FunctionType::ExtInfo(
- /*NoReturn*/ false,
- /*HasRegParm*/ false,
- /*RegParm*/ 0,
- getCallingConventionForDecl(MD)));
+
+ FunctionType::ExtInfo einfo;
+ einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
+
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ MD->hasAttr<NSReturnsRetainedAttr>())
+ einfo = einfo.withProducesResult(true);
+
+ return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
@@ -240,8 +241,7 @@ const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info,
- bool IsRecursive) {
+ const FunctionType::ExtInfo &Info) {
#ifndef NDEBUG
for (llvm::SmallVectorImpl<CanQualType>::const_iterator
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
@@ -252,8 +252,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, Info, ResTy,
- ArgTys.begin(), ArgTys.end());
+ CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
void *InsertPos = 0;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
@@ -261,10 +260,14 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy,
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
+ Info.getHasRegParm(), Info.getRegParm(), ResTy,
ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
+ bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
// Compute ABI information.
getABIInfo().computeInfo(*FI);
@@ -273,30 +276,29 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
// default now.
ABIArgInfo &RetInfo = FI->getReturnInfo();
if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
- RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
+ RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
I != E; ++I)
if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
- I->info.setCoerceToType(ConvertTypeRecursive(I->type));
-
- // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
- // types, resolve them now. These pointers may point to this function, which
- // we *just* filled in the FunctionInfo for.
- if (!IsRecursive && !PointersToResolve.empty())
- HandleLateResolvedPointers();
+ I->info.setCoerceToType(ConvertType(I->type));
+ bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
return *FI;
}
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn, bool _HasRegParm, unsigned _RegParm,
+ bool _NoReturn, bool returnsRetained,
+ bool _HasRegParm, unsigned _RegParm,
CanQualType ResTy,
const CanQualType *ArgTys,
unsigned NumArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm)
+ NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
+ HasRegParm(_HasRegParm), RegParm(_RegParm)
{
NumArgs = NumArgTys;
@@ -310,8 +312,7 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
/***/
void CodeGenTypes::GetExpandedTypes(QualType type,
- llvm::SmallVectorImpl<const llvm::Type*> &expandedTypes,
- bool isRecursive) {
+ llvm::SmallVectorImpl<llvm::Type*> &expandedTypes) {
const RecordType *RT = type->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
@@ -326,9 +327,9 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
QualType fieldType = FD->getType();
if (fieldType->isRecordType())
- GetExpandedTypes(fieldType, expandedTypes, isRecursive);
+ GetExpandedTypes(fieldType, expandedTypes);
else
- expandedTypes.push_back(ConvertType(fieldType, isRecursive));
+ expandedTypes.push_back(ConvertType(fieldType));
}
}
@@ -352,7 +353,7 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
AI = ExpandTypeFromArgs(FT, LV, AI);
} else {
- EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+ EmitStoreThroughLValue(RValue::get(AI), LV);
++AI;
}
}
@@ -360,33 +361,6 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
return AI;
}
-void
-CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- llvm::SmallVector<llvm::Value*, 16> &Args) {
- const RecordType *RT = Ty->getAsStructureType();
- assert(RT && "Can only expand structure types.");
-
- RecordDecl *RD = RT->getDecl();
- assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
- llvm::Value *Addr = RV.getAggregateAddr();
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- FieldDecl *FD = *i;
- QualType FT = FD->getType();
-
- // FIXME: What are the right qualifiers here?
- LValue LV = EmitLValueForField(Addr, FD, 0);
- if (CodeGenFunction::hasAggregateLLVMType(FT)) {
- ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
- } else {
- RValue RV = EmitLoadOfLValue(LV, FT);
- assert(RV.isScalar() &&
- "Unexpected non-scalar rvalue during struct expansion.");
- Args.push_back(RV.getScalarVal());
- }
- }
-}
-
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
/// accessing some number of bytes out of it, try to gep into the struct to get
/// at its inner goodness. Dive as deep as possible without entering an element
@@ -622,7 +596,7 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
return false;
}
-const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
// For definition purposes, don't consider a K&R function variadic.
@@ -631,13 +605,16 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
Variadic = FPT->isVariadic();
- return GetFunctionType(FI, Variadic, false);
+ return GetFunctionType(FI, Variadic);
}
-const llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
- bool isRecursive) {
- llvm::SmallVector<const llvm::Type*, 8> argTypes;
+llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
+ llvm::SmallVector<llvm::Type*, 8> argTypes;
const llvm::Type *resultType = 0;
const ABIArgInfo &retAI = FI.getReturnInfo();
@@ -655,7 +632,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
resultType = llvm::Type::getVoidTy(getLLVMContext());
QualType ret = FI.getReturnType();
- const llvm::Type *ty = ConvertType(ret, isRecursive);
+ const llvm::Type *ty = ConvertType(ret);
unsigned addressSpace = Context.getTargetAddressSpace(ret);
argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
break;
@@ -676,7 +653,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type, isRecursive);
+ const llvm::Type *LTy = ConvertTypeForMem(it->type);
argTypes.push_back(LTy->getPointerTo());
break;
}
@@ -686,7 +663,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
- const llvm::Type *argType = argAI.getCoerceToType();
+ llvm::Type *argType = argAI.getCoerceToType();
if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
argTypes.push_back(st->getElementType(i));
@@ -697,11 +674,14 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
}
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, argTypes, isRecursive);
+ GetExpandedTypes(it->type, argTypes);
break;
}
}
+ bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
return llvm::FunctionType::get(resultType, argTypes, isVariadic);
}
@@ -709,16 +689,15 @@ const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- if (!VerifyFuncTypeComplete(FPT)) {
- const CGFunctionInfo *Info;
- if (isa<CXXDestructorDecl>(MD))
- Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
- else
- Info = &getFunctionInfo(MD);
- return GetFunctionType(*Info, FPT->isVariadic(), false);
- }
-
- return llvm::OpaqueType::get(getLLVMContext());
+ if (!isFuncTypeConvertible(FPT))
+ return llvm::StructType::get(getLLVMContext());
+
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &getFunctionInfo(MD);
+ return GetFunctionType(*Info, FPT->isVariadic());
}
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
@@ -845,11 +824,11 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
continue;
case ABIArgInfo::Expand: {
- llvm::SmallVector<const llvm::Type*, 8> types;
+ llvm::SmallVector<llvm::Type*, 8> types;
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, types, false);
+ getTypes().GetExpandedTypes(ParamType, types);
Index += types.size();
continue;
}
@@ -1067,6 +1046,95 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
+/// Try to emit a fused autorelease of a return result.
+static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // We must be immediately followed the cast.
+ llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
+ if (BB->empty()) return 0;
+ if (&BB->back() != result) return 0;
+
+ const llvm::Type *resultType = result->getType();
+
+ // result is in a BasicBlock and is therefore an Instruction.
+ llvm::Instruction *generator = cast<llvm::Instruction>(result);
+
+ llvm::SmallVector<llvm::Instruction*,4> insnsToKill;
+
+ // Look for:
+ // %generator = bitcast %type1* %generator2 to %type2*
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
+ // We would have emitted this as a constant if the operand weren't
+ // an Instruction.
+ generator = cast<llvm::Instruction>(bitcast->getOperand(0));
+
+ // Require the generator to be immediately followed by the cast.
+ if (generator->getNextNode() != bitcast)
+ return 0;
+
+ insnsToKill.push_back(bitcast);
+ }
+
+ // Look for:
+ // %generator = call i8* @objc_retain(i8* %originalResult)
+ // or
+ // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
+ llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
+ if (!call) return 0;
+
+ bool doRetainAutorelease;
+
+ if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
+ doRetainAutorelease = true;
+ } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
+ .objc_retainAutoreleasedReturnValue) {
+ doRetainAutorelease = false;
+
+ // Look for an inline asm immediately preceding the call and kill it, too.
+ llvm::Instruction *prev = call->getPrevNode();
+ if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
+ if (asmCall->getCalledValue()
+ == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
+ insnsToKill.push_back(prev);
+ } else {
+ return 0;
+ }
+
+ result = call->getArgOperand(0);
+ insnsToKill.push_back(call);
+
+ // Keep killing bitcasts, for sanity. Note that we no longer care
+ // about precise ordering as long as there's exactly one use.
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
+ if (!bitcast->hasOneUse()) break;
+ insnsToKill.push_back(bitcast);
+ result = bitcast->getOperand(0);
+ }
+
+ // Delete all the unnecessary instructions, from latest to earliest.
+ for (llvm::SmallVectorImpl<llvm::Instruction*>::iterator
+ i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
+ (*i)->eraseFromParent();
+
+ // Do the fused retain/autorelease if we were asked to.
+ if (doRetainAutorelease)
+ result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
+
+ // Cast back to the result type.
+ return CGF.Builder.CreateBitCast(result, resultType);
+}
+
+/// Emit an ARC autorelease of the result of a function.
+static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // At -O0, try to emit a fused retain/autorelease.
+ if (CGF.shouldUseFusedARCCalls())
+ if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
+ return fused;
+
+ return CGF.EmitARCAutoreleaseReturnValue(result);
+}
+
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
if (ReturnValue == 0) {
@@ -1134,6 +1202,16 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
+
+ // In ARC, end functions that return a retainable type with a call
+ // to objc_autoreleaseReturnValue.
+ if (AutoreleaseResult) {
+ assert(getLangOptions().ObjCAutoRefCount &&
+ !FI.isReturnsRetained() &&
+ RetTy->isObjCRetainableType());
+ RV = emitAutoreleaseOfResult(*this, RV);
+ }
+
break;
case ABIArgInfo::Ignore:
@@ -1183,13 +1261,157 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(value), type);
}
+static bool isProvablyNull(llvm::Value *addr) {
+ return isa<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(llvm::Value *addr) {
+ return isa<llvm::AllocaInst>(addr);
+}
+
+/// Emit the actual writing-back of a writeback.
+static void emitWriteback(CodeGenFunction &CGF,
+ const CallArgList::Writeback &writeback) {
+ llvm::Value *srcAddr = writeback.Address;
+ assert(!isProvablyNull(srcAddr) &&
+ "shouldn't have writeback for provably null argument");
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the argument wasn't provably non-null, we need to null check
+ // before doing the store.
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (!provablyNonNull) {
+ llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
+ contBB = CGF.createBasicBlock("icr.done");
+
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
+ CGF.EmitBlock(writebackBB);
+ }
+
+ // Load the value to writeback.
+ llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
+
+ // Cast it back, in case we're writing an id to a Foo* or something.
+ value = CGF.Builder.CreateBitCast(value,
+ cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
+ "icr.writeback-cast");
+
+ // Perform the writeback.
+ QualType srcAddrType = writeback.AddressType;
+ CGF.EmitStoreThroughLValue(RValue::get(value),
+ CGF.MakeAddrLValue(srcAddr, srcAddrType));
+
+ // Jump to the continuation block.
+ if (!provablyNonNull)
+ CGF.EmitBlock(contBB);
+}
+
+static void emitWritebacks(CodeGenFunction &CGF,
+ const CallArgList &args) {
+ for (CallArgList::writeback_iterator
+ i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
+ emitWriteback(CGF, *i);
+}
+
+/// Emit an argument that's being passed call-by-writeback. That is,
+/// we are passing the address of
+static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
+ const ObjCIndirectCopyRestoreExpr *CRE) {
+ llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+
+ // The dest and src types don't necessarily match in LLVM terms
+ // because of the crazy ObjC compatibility rules.
+
+ const llvm::PointerType *destType =
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+
+ // If the address is a constant null, just pass the appropriate null.
+ if (isProvablyNull(srcAddr)) {
+ args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
+ CRE->getType());
+ return;
+ }
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ // Create the temporary.
+ llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
+ "icr.temp");
+
+ // Zero-initialize it if we're not doing a copy-initialization.
+ bool shouldCopy = CRE->shouldCopy();
+ if (!shouldCopy) {
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getElementType()));
+ CGF.Builder.CreateStore(null, temp);
+ }
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the address is *not* known to be non-null, we need to switch.
+ llvm::Value *finalArgument;
+
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (provablyNonNull) {
+ finalArgument = temp;
+ } else {
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+
+ finalArgument = CGF.Builder.CreateSelect(isNull,
+ llvm::ConstantPointerNull::get(destType),
+ temp, "icr.argument");
+
+ // If we need to copy, then the load has to be conditional, which
+ // means we need control flow.
+ if (shouldCopy) {
+ contBB = CGF.createBasicBlock("icr.cont");
+ llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
+ CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
+ CGF.EmitBlock(copyBB);
+ }
+ }
+
+ // Perform a copy if necessary.
+ if (shouldCopy) {
+ LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
+ assert(srcRV.isScalar());
+
+ llvm::Value *src = srcRV.getScalarVal();
+ src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ "icr.cast");
+
+ // Use an ordinary store, not a store-to-lvalue.
+ CGF.Builder.CreateStore(src, temp);
+ }
+
+ // Finish the control flow if we needed it.
+ if (shouldCopy && !provablyNonNull)
+ CGF.EmitBlock(contBB);
+
+ args.addWriteback(srcAddr, srcAddrType, temp);
+ args.add(RValue::get(finalArgument), CRE->getType());
+}
+
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
+ if (const ObjCIndirectCopyRestoreExpr *CRE
+ = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+ assert(getContext().getLangOptions().ObjCAutoRefCount);
+ assert(getContext().hasSameType(E->getType(), type));
+ return emitWritebackArg(*this, args, CRE);
+ }
+
if (type->isReferenceType())
return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
type);
- if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) &&
+ if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
+ isa<ImplicitCastExpr>(E) &&
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
@@ -1205,20 +1427,71 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
/// on the current state of the EH stack.
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- llvm::Value * const *ArgBegin,
- llvm::Value * const *ArgEnd,
+ llvm::ArrayRef<llvm::Value *> Args,
const llvm::Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
if (!InvokeDest)
- return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
+ return Builder.CreateCall(Callee, Args, Name);
llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
- ArgBegin, ArgEnd, Name);
+ Args, Name);
EmitBlock(ContBB);
return Invoke;
}
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ const llvm::Twine &Name) {
+ return EmitCallOrInvoke(Callee, llvm::ArrayRef<llvm::Value *>(), Name);
+}
+
+static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
+ llvm::FunctionType *FTy) {
+ if (ArgNo < FTy->getNumParams())
+ assert(Elt->getType() == FTy->getParamType(ArgNo));
+ else
+ assert(FTy->isVarArg());
+ ++ArgNo;
+}
+
+void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ llvm::SmallVector<llvm::Value*,16> &Args,
+ llvm::FunctionType *IRFuncTy) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()),
+ Args, IRFuncTy);
+ continue;
+ }
+
+ RValue RV = EmitLoadOfLValue(LV);
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (Args.size() < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(Args.size()))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
+
+ Args.push_back(V);
+ }
+}
+
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
@@ -1233,6 +1506,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+ // IRArgNo - Keep track of the argument number in the callee we're looking at.
+ unsigned IRArgNo = 0;
+ llvm::FunctionType *IRFuncTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(Callee->getType())->getElementType());
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
@@ -1241,6 +1519,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!Value)
Value = CreateMemTemp(RetTy);
Args.push_back(Value);
+ checkArgMatches(Value, IRArgNo, IRFuncTy);
}
assert(CallInfo.arg_size() == CallArgs.size() &&
@@ -1251,24 +1530,54 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- unsigned Alignment =
+ unsigned TypeAlign =
getContext().getTypeAlignInChars(I->Ty).getQuantity();
switch (ArgInfo.getKind()) {
case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
- Args.push_back(CreateMemTemp(I->Ty));
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (ArgInfo.getIndirectAlign() > AI->getAlignment())
+ AI->setAlignment(ArgInfo.getIndirectAlign());
+ Args.push_back(AI);
+
if (RV.isScalar())
EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
- Alignment, I->Ty);
+ TypeAlign, I->Ty);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
- } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) {
- Args.push_back(CreateMemTemp(I->Ty));
- EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty,
- RV.isVolatileQualified());
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
- Args.push_back(RV.getAggregateAddr());
+ // We want to avoid creating an unnecessary temporary+copy here;
+ // however, we need one in two cases:
+ // 1. If the argument is not byval, and we are required to copy the
+ // source. (This case doesn't occur on any common architecture.)
+ // 2. If the argument is byval, RV is not sufficiently aligned, and
+ // we cannot force it to be sufficiently aligned.
+ llvm::Value *Addr = RV.getAggregateAddr();
+ unsigned Align = ArgInfo.getIndirectAlign();
+ const llvm::TargetData *TD = &CGM.getTargetData();
+ if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
+ (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
+ // Create an aligned temporary, and copy to it.
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (Align > AI->getAlignment())
+ AI->setAlignment(Align);
+ Args.push_back(AI);
+ EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
+ } else {
+ // Skip the extra memcpy call.
+ Args.push_back(Addr);
+
+ // Validate argument match.
+ checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ }
}
break;
}
@@ -1281,10 +1590,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
+ llvm::Value *V;
if (RV.isScalar())
- Args.push_back(RV.getScalarVal());
+ V = RV.getScalarVal();
else
- Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ V = Builder.CreateLoad(RV.getAggregateAddr());
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ if (IRArgNo < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRArgNo))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
+ Args.push_back(V);
+
+ checkArgMatches(V, IRArgNo, IRFuncTy);
break;
}
@@ -1292,7 +1611,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *SrcPtr;
if (RV.isScalar()) {
SrcPtr = CreateMemTemp(I->Ty, "coerce");
- EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, I->Ty);
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
} else if (RV.isComplex()) {
SrcPtr = CreateMemTemp(I->Ty, "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
@@ -1321,18 +1640,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// We don't know what we're loading from.
LI->setAlignment(1);
Args.push_back(LI);
+
+ // Validate argument match.
+ checkArgMatches(LI, IRArgNo, IRFuncTy);
}
} else {
// In the simple case, just pass the coerced loaded value.
Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
*this));
+
+ // Validate argument match.
+ checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
}
break;
}
case ABIArgInfo::Expand:
- ExpandTypeToArgs(I->Ty, RV, Args);
+ ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
+ IRArgNo = Args.size();
break;
}
}
@@ -1367,7 +1693,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
-
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
@@ -1380,11 +1705,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+ CS = Builder.CreateCall(Callee, Args);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
- Args.data(), Args.data()+Args.size());
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
EmitBlock(Cont);
}
if (callOrInvoke)
@@ -1413,6 +1737,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
CI->setName("call");
+ // Emit any writebacks immediately. Arguably this should happen
+ // after any return-value munging.
+ if (CallArgs.hasWritebacks())
+ emitWritebacks(*this, CallArgs);
+
switch (RetAI.getKind()) {
case ABIArgInfo::Indirect: {
unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
@@ -1430,8 +1759,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
- RetAI.getDirectOffset() == 0) {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
if (RetTy->isAnyComplexType()) {
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
@@ -1448,7 +1777,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
return RValue::getAggregate(DestPtr);
}
- return RValue::get(CI);
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
}
llvm::Value *DestPtr = ReturnValue.getValue();
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 160a62eab367..343b944bf6c9 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -58,9 +58,44 @@ namespace CodeGen {
class CallArgList :
public llvm::SmallVector<CallArg, 16> {
public:
+ struct Writeback {
+ /// The original argument.
+ llvm::Value *Address;
+
+ /// The pointee type of the original argument.
+ QualType AddressType;
+
+ /// The temporary alloca.
+ llvm::Value *Temporary;
+ };
+
void add(RValue rvalue, QualType type, bool needscopy = false) {
push_back(CallArg(rvalue, type, needscopy));
}
+
+ void addFrom(const CallArgList &other) {
+ insert(end(), other.begin(), other.end());
+ Writebacks.insert(Writebacks.end(),
+ other.Writebacks.begin(), other.Writebacks.end());
+ }
+
+ void addWriteback(llvm::Value *address, QualType addressType,
+ llvm::Value *temporary) {
+ Writeback writeback;
+ writeback.Address = address;
+ writeback.AddressType = addressType;
+ writeback.Temporary = temporary;
+ Writebacks.push_back(writeback);
+ }
+
+ bool hasWritebacks() const { return !Writebacks.empty(); }
+
+ typedef llvm::SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
+ writeback_iterator writeback_begin() const { return Writebacks.begin(); }
+ writeback_iterator writeback_end() const { return Writebacks.end(); }
+
+ private:
+ llvm::SmallVector<Writeback, 1> Writebacks;
};
/// FunctionArgList - Type for representing both the decl and type
@@ -88,6 +123,9 @@ namespace CodeGen {
/// Whether this function is noreturn.
bool NoReturn;
+ /// Whether this function is returns-retained.
+ bool ReturnsRetained;
+
unsigned NumArgs;
ArgInfo *Args;
@@ -100,7 +138,8 @@ namespace CodeGen {
typedef ArgInfo *arg_iterator;
CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
- bool HasRegParm, unsigned RegParm, CanQualType ResTy,
+ bool ReturnsRetained, bool HasRegParm, unsigned RegParm,
+ CanQualType ResTy,
const CanQualType *ArgTys, unsigned NumArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -113,6 +152,10 @@ namespace CodeGen {
bool isNoReturn() const { return NoReturn; }
+ /// In ARR, whether this function retains its return value. This
+ /// is not always reliable for call sites.
+ bool isReturnsRetained() const { return ReturnsRetained; }
+
/// getCallingConvention - Return the user specified calling
/// convention.
unsigned getCallingConvention() const { return CallingConvention; }
@@ -137,6 +180,7 @@ namespace CodeGen {
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getCallingConvention());
ID.AddBoolean(NoReturn);
+ ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(HasRegParm);
ID.AddInteger(RegParm);
getReturnType().Profile(ID);
@@ -151,6 +195,7 @@ namespace CodeGen {
Iterator end) {
ID.AddInteger(Info.getCC());
ID.AddBoolean(Info.getNoReturn());
+ ID.AddBoolean(Info.getProducesResult());
ID.AddBoolean(Info.getHasRegParm());
ID.AddInteger(Info.getRegParm());
ResTy.Profile(ID);
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 5725d80b7d03..7dbaaf85299f 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -329,7 +329,7 @@ namespace {
CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
: BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const CXXRecordDecl *DerivedClass =
cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
@@ -398,7 +398,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
BaseClassDecl,
isBaseVirtual);
- AggValueSlot AggSlot = AggValueSlot::forAddr(V, false, /*Lifetime*/ true);
+ AggValueSlot AggSlot = AggValueSlot::forAddr(V, Qualifiers(),
+ /*Lifetime*/ true);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
@@ -428,10 +429,18 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
CGF.Builder.CreateStore(Next, ArrayIndexVar);
}
- AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.isVolatileQualified(),
- /*Lifetime*/ true);
-
- CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ if (!CGF.hasAggregateLLVMType(T)) {
+ LValue lvalue = CGF.MakeAddrLValue(Dest, T);
+ CGF.EmitScalarInit(MemberInit->getInit(), /*decl*/ 0, lvalue, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
+ LHS.isVolatileQualified());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.getQuals(),
+ /*Lifetime*/ true);
+
+ CGF.EmitAggExpr(MemberInit->getInit(), Slot);
+ }
return;
}
@@ -502,7 +511,7 @@ namespace {
CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
: Field(Field), Dtor(Dtor) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// FIXME: Is this OK for C++0x delegating constructors?
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
@@ -540,15 +549,15 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// FIXME: If there's no initializer and the CXXCtorInitializer
// was implicitly generated, we shouldn't be zeroing memory.
- RValue RHS;
- if (FieldType->isReferenceType()) {
- RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field);
- CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
- } else if (FieldType->isArrayType() && !MemberInit->getInit()) {
+ if (FieldType->isArrayType() && !MemberInit->getInit()) {
CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
} else if (!CGF.hasAggregateLLVMType(Field->getType())) {
- RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
- CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+ if (LHS.isSimple()) {
+ CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS, false);
+ } else {
+ RValue RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
+ CGF.EmitStoreThroughLValue(RHS, LHS);
+ }
} else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
LHS.isVolatileQualified());
@@ -576,11 +585,11 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
CGF.Builder.CreateStore(Zero, ArrayIndexVar);
- // If we are copying an array of scalars or classes with trivial copy
+ // If we are copying an array of PODs or classes with trivial copy
// constructors, perform a single aggregate copy.
- const RecordType *Record = BaseElementTy->getAs<RecordType>();
- if (!Record ||
- cast<CXXRecordDecl>(Record->getDecl())->hasTrivialCopyConstructor()) {
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && Record->hasTrivialCopyConstructor())) {
// Find the source pointer. We knows it's the last argument because
// we know we're in a copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
@@ -912,7 +921,7 @@ namespace {
struct CallDtorDelete : EHScopeStack::Cleanup {
CallDtorDelete() {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
@@ -920,48 +929,25 @@ namespace {
}
};
- struct CallArrayFieldDtor : EHScopeStack::Cleanup {
- const FieldDecl *Field;
- CallArrayFieldDtor(const FieldDecl *Field) : Field(Field) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- QualType FieldType = Field->getType();
- const ConstantArrayType *Array =
- CGF.getContext().getAsConstantArrayType(FieldType);
+ class DestroyField : public EHScopeStack::Cleanup {
+ const FieldDecl *field;
+ CodeGenFunction::Destroyer &destroyer;
+ bool useEHCleanupForArray;
+
+ public:
+ DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : field(field), destroyer(*destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Find the address of the field.
+ llvm::Value *thisValue = CGF.LoadCXXThis();
+ LValue LV = CGF.EmitLValueForField(thisValue, field, /*CVRQualifiers=*/0);
+ assert(LV.isSimple());
- QualType BaseType =
- CGF.getContext().getBaseElementType(Array->getElementType());
- const CXXRecordDecl *FieldClassDecl = BaseType->getAsCXXRecordDecl();
-
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
- // FIXME: Qualifiers?
- /*CVRQualifiers=*/0);
-
- const llvm::Type *BasePtr = CGF.ConvertType(BaseType)->getPointerTo();
- llvm::Value *BaseAddrPtr =
- CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- CGF.EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
- Array, BaseAddrPtr);
- }
- };
-
- struct CallFieldDtor : EHScopeStack::Cleanup {
- const FieldDecl *Field;
- CallFieldDtor(const FieldDecl *Field) : Field(Field) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- const CXXRecordDecl *FieldClassDecl =
- Field->getType()->getAsCXXRecordDecl();
-
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
- // FIXME: Qualifiers?
- /*CVRQualifiers=*/0);
-
- CGF.EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
}
@@ -1035,96 +1021,103 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
E = ClassDecl->field_end(); I != E; ++I) {
- const FieldDecl *Field = *I;
-
- QualType FieldType = getContext().getCanonicalType(Field->getType());
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(FieldType);
- if (Array)
- FieldType = getContext().getBaseElementType(Array->getElementType());
-
- const RecordType *RT = FieldType->getAs<RecordType>();
- if (!RT)
- continue;
-
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (FieldClassDecl->hasTrivialDestructor())
- continue;
-
- if (Array)
- EHStack.pushCleanup<CallArrayFieldDtor>(NormalAndEHCleanup, Field);
- else
- EHStack.pushCleanup<CallFieldDtor>(NormalAndEHCleanup, Field);
+ const FieldDecl *field = *I;
+ QualType type = field->getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ EHStack.pushCleanup<DestroyField>(cleanupKind, field,
+ getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
}
}
-/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
-/// for-loop to call the default constructor on individual members of the
-/// array.
-/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
-/// array type and 'ArrayPtr' points to the beginning fo the array.
-/// It is assumed that all relevant checks have been made by the caller.
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
///
-/// \param ZeroInitialization True if each element should be zero-initialized
-/// before it is constructed.
+/// \param ctor the constructor to call for each element
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- const ConstantArrayType *ArrayTy,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- bool ZeroInitialization) {
-
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
- llvm::Value * NumElements =
- llvm::ConstantInt::get(SizeTy,
- getContext().getConstantArrayElementCount(ArrayTy));
-
- EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd,
- ZeroInitialization);
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const ConstantArrayType *arrayType,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+ QualType elementType;
+ llvm::Value *numElements =
+ emitArrayLength(arrayType, elementType, arrayBegin);
+
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
+ argBegin, argEnd, zeroInitialize);
}
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayBegin a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- llvm::Value *NumElements,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- bool ZeroInitialization) {
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
- // Create a temporary for the loop index and initialize it with 0.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
- llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- Builder.CreateStore(Zero, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index < number-of-elements fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- EmitBlock(ForBody);
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+ llvm::BranchInst *zeroCheckBranch = 0;
+
+ // Optimize for a constant count.
+ llvm::ConstantInt *constantCount
+ = dyn_cast<llvm::ConstantInt>(numElements);
+ if (constantCount) {
+ // Just skip out if the constant count is zero.
+ if (constantCount->isZero()) return;
+
+ // Otherwise, emit the check.
+ } else {
+ llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
+ llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
+ zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
+ EmitBlock(loopBB);
+ }
+
+ // Find the end of the array.
+ llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
+ "arrayctor.end");
+
+ // Enter the loop, setting up a phi for the current location to initialize.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
+ EmitBlock(loopBB);
+ llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
+ "arrayctor.cur");
+ cur->addIncoming(arrayBegin, entryBB);
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
// Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
- "arrayidx");
+
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
// Zero initialize the storage, if requested.
- if (ZeroInitialization)
- EmitNullInitialization(Address,
- getContext().getTypeDeclType(D->getParent()));
+ if (zeroInitialize)
+ EmitNullInitialization(cur, type);
// C++ [class.temporary]p4:
// There are two contexts in which temporaries are destroyed at a different
@@ -1134,99 +1127,47 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
// every temporary created in a default argument expression is sequenced
// before the construction of the next array element, if any.
- // Keep track of the current number of live temporaries.
{
RunCleanupsScope Scope(*this);
- EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
- ArgBeg, ArgEnd);
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOptions().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ Destroyer *destroyer = destroyCXXObject;
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
+ }
+
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
+ cur, argBegin, argEnd);
}
- EmitBlock(ContinueBlock);
+ // Go to the next element.
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
+ "arrayctor.next");
+ cur->addIncoming(next, Builder.GetInsertBlock());
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
- Counter = Builder.CreateLoad(IndexPtr);
- NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
- Builder.CreateStore(NextVal, IndexPtr);
+ // Check whether that's the end of the loop.
+ llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
+ llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
+ Builder.CreateCondBr(done, contBB, loopBB);
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
+ // Patch the earlier check to skip over the loop.
+ if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
-}
-
-/// EmitCXXAggrDestructorCall - calls the default destructor on array
-/// elements in reverse order of construction.
-void
-CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
- const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
- assert(CA && "Do we support VLA for destruction ?");
- uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
-
- const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
- llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
- EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
+ EmitBlock(contBB);
}
-/// EmitCXXAggrDestructorCall - calls the default destructor on array
-/// elements in reverse order of construction.
-void
-CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- llvm::Value *UpperCount,
- llvm::Value *This) {
- const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
- llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
-
- // Create a temporary for the loop index and initialize it with count of
- // array elements.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
-
- // Store the number of elements in the index pointer.
- Builder.CreateStore(UpperCount, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index != 0 fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value* zeroConstant =
- llvm::Constant::getNullValue(SizeLTy);
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
- "isne");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsNE, ForBody, AfterFor);
-
- EmitBlock(ForBody);
-
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- Counter = Builder.CreateSub(Counter, One);
- llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Address);
-
- EmitBlock(ContinueBlock);
-
- // Emit the decrement of the loop counter.
- Counter = Builder.CreateLoad(IndexPtr);
- Counter = Builder.CreateSub(Counter, One, "dec");
- Builder.CreateStore(Counter, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
+void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ assert(!dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
+ addr);
}
void
@@ -1370,7 +1311,7 @@ namespace {
CXXDtorType Type)
: Dtor(D), Addr(Addr), Type(Type) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
Addr);
}
@@ -1384,7 +1325,8 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
llvm::Value *ThisPtr = LoadCXXThis();
- AggValueSlot AggSlot = AggValueSlot::forAddr(ThisPtr, false, /*Lifetime*/ true);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(), /*Lifetime*/ true);
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
@@ -1424,7 +1366,7 @@ namespace {
CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
: Dtor(D), Addr(Addr) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Addr);
}
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 41ecd8111790..9c5dd1f23721 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -49,8 +49,7 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
if (rv.isComplex()) {
CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
const llvm::Type *ComplexTy =
- llvm::StructType::get(CGF.getLLVMContext(),
- V.first->getType(), V.second->getType(),
+ llvm::StructType::get(V.first->getType(), V.second->getType(),
(void*) 0);
llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
@@ -257,9 +256,7 @@ void CodeGenFunction::initFullExprCleanup() {
if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
}
-EHScopeStack::Cleanup::~Cleanup() {
- llvm_unreachable("Cleanup is indestructable");
-}
+void EHScopeStack::Cleanup::anchor() {}
/// All the branch fixups on the EH stack have propagated out past the
/// outermost normal cleanup; resolve them all by adding cases to the
@@ -421,13 +418,13 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
// Kill the branch.
Br->eraseFromParent();
- // Merge the blocks.
- Pred->getInstList().splice(Pred->end(), Entry->getInstList());
-
// Replace all uses of the entry with the predecessor, in case there
// are phis in the cleanup.
Entry->replaceAllUsesWith(Pred);
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
// Kill the entry block.
Entry->eraseFromParent();
@@ -439,10 +436,10 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
static void EmitCleanup(CodeGenFunction &CGF,
EHScopeStack::Cleanup *Fn,
- bool ForEH,
+ EHScopeStack::Cleanup::Flags flags,
llvm::Value *ActiveFlag) {
// EH cleanups always occur within a terminate scope.
- if (ForEH) CGF.EHStack.pushTerminate();
+ if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
// If there's an active flag, load it and skip the cleanup if it's
// false.
@@ -457,7 +454,7 @@ static void EmitCleanup(CodeGenFunction &CGF,
}
// Ask the cleanup to emit itself.
- Fn->Emit(CGF, ForEH);
+ Fn->Emit(CGF, flags);
assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
// Emit the continuation block if there was an active flag.
@@ -465,7 +462,7 @@ static void EmitCleanup(CodeGenFunction &CGF,
CGF.EmitBlock(ContBB);
// Leave the terminate scope.
- if (ForEH) CGF.EHStack.popTerminate();
+ if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
}
static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
@@ -540,6 +537,12 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
RequiresNormalCleanup = true;
}
+ EHScopeStack::Cleanup::Flags cleanupFlags;
+ if (Scope.isNormalCleanup())
+ cleanupFlags.setIsNormalCleanupKind();
+ if (Scope.isEHCleanup())
+ cleanupFlags.setIsEHCleanupKind();
+
// Even if we don't need the normal cleanup, we might still have
// prebranched fallthrough to worry about.
if (Scope.isNormalCleanup() && !RequiresNormalCleanup &&
@@ -663,7 +666,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EHStack.popCleanup();
- EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
// Otherwise, the best approach is to thread everything through
// the cleanup block and then try to clean up after ourselves.
@@ -774,7 +777,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EHStack.popCleanup();
assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
- EmitCleanup(*this, Fn, /*ForEH*/ false, NormalActiveFlag);
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
// Append the prepared cleanup prologue from above.
llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
@@ -857,7 +860,9 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
EmitBlock(EHEntry);
- EmitCleanup(*this, Fn, /*ForEH*/ true, EHActiveFlag);
+
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
// Append the prepared cleanup prologue from above.
llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 98d30db647d5..4c1244591743 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -314,8 +314,9 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm::SmallVector<llvm::Value *, 16> EltTys;
llvm::DIType FieldTy =
- DBuilder.createMemberType("isa", getOrCreateMainFile(),
- 0,Size, 0, 0, 0, ISATy);
+ DBuilder.createMemberType(getOrCreateMainFile(), "isa",
+ getOrCreateMainFile(), 0, Size,
+ 0, 0, 0, ISATy);
EltTys.push_back(FieldTy);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
@@ -389,6 +390,7 @@ llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
// Ignore these qualifiers for now.
Qc.removeObjCGCAttr();
Qc.removeAddressSpace();
+ Qc.removeObjCLifetime();
// We will create one Derived type for one qualifier and recurse to handle any
// additional ones.
@@ -528,7 +530,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- FieldTy = DBuilder.createMemberType("__descriptor", Unit,
+ FieldTy = DBuilder.createMemberType(Unit, "__descriptor", Unit,
LineNo, FieldSize, FieldAlign,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
@@ -591,7 +593,8 @@ llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
SourceLocation loc,
AccessSpecifier AS,
uint64_t offsetInBits,
- llvm::DIFile tunit) {
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope) {
llvm::DIType debugType = getOrCreateType(type, tunit);
// Get the location for the field.
@@ -613,15 +616,16 @@ llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
else if (AS == clang::AS_protected)
flags |= llvm::DIDescriptor::FlagProtected;
- return DBuilder.createMemberType(name, file, line, sizeInBits, alignInBits,
- offsetInBits, flags, debugType);
+ return DBuilder.createMemberType(scope, name, file, line, sizeInBits,
+ alignInBits, offsetInBits, flags, debugType);
}
/// CollectRecordFields - A helper function to collect debug info for
/// record fields. This is used while creating debug info entry for a Record.
void CGDebugInfo::
CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
- llvm::SmallVectorImpl<llvm::Value *> &elements) {
+ llvm::SmallVectorImpl<llvm::Value *> &elements,
+ llvm::DIType RecordTy) {
unsigned fieldNo = 0;
const FieldDecl *LastFD = 0;
bool IsMsStruct = record->hasAttr<MsStructAttr>();
@@ -652,7 +656,7 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
llvm::DIType fieldType
= createFieldType(name, type, field->getBitWidth(),
field->getLocation(), field->getAccess(),
- layout.getFieldOffset(fieldNo), tunit);
+ layout.getFieldOffset(fieldNo), tunit, RecordTy);
elements.push_back(fieldType);
}
@@ -960,7 +964,7 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType VPTR
- = DBuilder.createMemberType(getVTableName(RD), Unit,
+ = DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
0, Size, 0, 0, 0,
getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
@@ -1048,7 +1052,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
}
}
- CollectRecordFields(RD, Unit, EltTys);
+ CollectRecordFields(RD, Unit, EltTys, FwdDecl);
llvm::DIArray TParamsArray;
if (CXXDecl) {
CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
@@ -1379,13 +1383,13 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
// FIXME: This should probably be a function type instead.
ElementTypes[0] =
- DBuilder.createMemberType("ptr", U, 0,
+ DBuilder.createMemberType(U, "ptr", U, 0,
Info.first, Info.second, FieldOffset, 0,
PointerDiffDITy);
FieldOffset += Info.first;
ElementTypes[1] =
- DBuilder.createMemberType("ptr", U, 0,
+ DBuilder.createMemberType(U, "ptr", U, 0,
Info.first, Info.second, FieldOffset, 0,
PointerDiffDITy);
@@ -1520,10 +1524,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
#include "clang/AST/TypeNodes.def"
assert(false && "Dependent types cannot show up in debug information");
- // FIXME: Handle these.
case Type::ExtVector:
- return llvm::DIType();
-
case Type::Vector:
return CreateType(cast<VectorType>(Ty), Unit);
case Type::ObjCObjectPointer:
@@ -1586,7 +1587,7 @@ llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType Ty = DBuilder.createMemberType(Name, Unit, 0,
+ llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0,
FieldSize, FieldAlign,
*Offset, 0, FieldTy);
*Offset += FieldSize;
@@ -1897,7 +1898,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
- FieldTy = DBuilder.createMemberType(VD->getName(), Unit,
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
0, FieldSize, FieldAlign,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
@@ -2133,23 +2134,23 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::SmallVector<llvm::Value*, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(0),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__flags", C.IntTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(1),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(2),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__FuncPtr", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(3),
- tunit));
+ tunit, tunit));
fields.push_back(createFieldType("__descriptor",
C.getPointerType(block.NeedsCopyDispose ?
C.getBlockDescriptorExtendedType() :
C.getBlockDescriptorType()),
0, loc, AS_public,
blockLayout->getElementOffsetInBits(4),
- tunit));
+ tunit, tunit));
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
@@ -2198,7 +2199,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
QualType type = method->getThisType(C);
fields.push_back(createFieldType("this", type, 0, loc, AS_public,
- offsetInBits, tunit));
+ offsetInBits, tunit, tunit));
continue;
}
@@ -2213,12 +2214,12 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
uint64_t xoffset;
fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
- fieldType = DBuilder.createMemberType(name, tunit, line,
+ fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
ptrInfo.first, ptrInfo.second,
offsetInBits, 0, fieldType);
} else {
fieldType = createFieldType(name, variable->getType(), 0,
- loc, AS_public, offsetInBits, tunit);
+ loc, AS_public, offsetInBits, tunit, tunit);
}
fields.push_back(fieldType);
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 6ec6b65f5e01..f87d0072e323 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -33,11 +33,11 @@ namespace clang {
class VarDecl;
class ObjCInterfaceDecl;
class ClassTemplateSpecializationDecl;
+ class GlobalDecl;
namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
- class GlobalDecl;
class CGBlockInfo;
/// CGDebugInfo - This class gathers all debug information during compilation
@@ -139,9 +139,11 @@ class CGDebugInfo {
llvm::DIType createFieldType(llvm::StringRef name, QualType type,
Expr *bitWidth, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
- llvm::DIFile tunit);
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &E);
+ llvm::SmallVectorImpl<llvm::Value *> &E,
+ llvm::DIType RecordTy);
void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 8a1a8536dab1..62c3a9791d0f 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -98,7 +98,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
QualType Ty = TD.getUnderlyingType();
if (Ty->isVariablyModifiedType())
- EmitVLASize(Ty);
+ EmitVariablyModifiedType(Ty);
}
}
}
@@ -258,7 +258,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// even though that doesn't really make any sense.
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
- EmitVLASize(D.getType());
+ EmitVariablyModifiedType(D.getType());
// Local static block variables must be treated as globals as they may be
// referenced in their RHS initializer block-literal expresion.
@@ -304,38 +304,40 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
}
namespace {
- struct CallArrayDtor : EHScopeStack::Cleanup {
- CallArrayDtor(const CXXDestructorDecl *Dtor,
- const ConstantArrayType *Type,
- llvm::Value *Loc)
- : Dtor(Dtor), Type(Type), Loc(Loc) {}
-
- const CXXDestructorDecl *Dtor;
- const ConstantArrayType *Type;
- llvm::Value *Loc;
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
- const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
- CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
+ struct DestroyObject : EHScopeStack::Cleanup {
+ DestroyObject(llvm::Value *addr, QualType type,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), type(type), destroyer(*destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ llvm::Value *addr;
+ QualType type;
+ CodeGenFunction::Destroyer &destroyer;
+ bool useEHCleanupForArray;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Don't use an EH cleanup recursively from an EH cleanup.
+ bool useEHCleanupForArray =
+ flags.isForNormalCleanup() && this->useEHCleanupForArray;
+
+ CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
}
};
- struct CallVarDtor : EHScopeStack::Cleanup {
- CallVarDtor(const CXXDestructorDecl *Dtor,
- llvm::Value *NRVOFlag,
- llvm::Value *Loc)
- : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
+ struct DestroyNRVOVariable : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(llvm::Value *addr,
+ const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
llvm::Value *Loc;
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// Along the exceptions path we always execute the dtor.
- bool NRVO = !IsForEH && NRVOFlag;
+ bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
llvm::BasicBlock *SkipDtorBB = 0;
if (NRVO) {
@@ -353,19 +355,31 @@ namespace {
if (NRVO) CGF.EmitBlock(SkipDtorBB);
}
};
-}
-namespace {
struct CallStackRestore : EHScopeStack::Cleanup {
llvm::Value *Stack;
CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, V);
}
};
+ struct ExtendGCLifetime : EHScopeStack::Cleanup {
+ const VarDecl &Var;
+ ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Compute the address of the local variable, in case it's a
+ // byref or something.
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
+ SourceLocation());
+ llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
+ CGF.EmitExtendGCLifetime(value);
+ }
+ };
+
struct CallCleanupFunction : EHScopeStack::Cleanup {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
@@ -375,7 +389,7 @@ namespace {
const VarDecl *Var)
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), Var.getType(), VK_LValue,
SourceLocation());
// Compute the address of the local variable, in case it's a byref
@@ -400,6 +414,207 @@ namespace {
};
}
+/// EmitAutoVarWithLifetime - Does the setup required for an automatic
+/// variable with lifetime.
+static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
+ llvm::Value *addr,
+ Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ CodeGenFunction::Destroyer &destroyer =
+ (var.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? CodeGenFunction::destroyARCStrongPrecise
+ : CodeGenFunction::destroyARCStrongImprecise);
+
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
+ cleanupKind & EHCleanup);
+ break;
+ }
+ case Qualifiers::OCL_Autoreleasing:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanup*/ true);
+ break;
+ }
+}
+
+static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
+ if (const Expr *e = dyn_cast<Expr>(s)) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ s = e = e->IgnoreParenCasts();
+
+ if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
+ return (ref->getDecl() == &var);
+ }
+
+ for (Stmt::const_child_range children = s->children(); children; ++children)
+ // children might be null; as in missing decl or conditional of an if-stmt.
+ if ((*children) && isAccessedBy(var, *children))
+ return true;
+
+ return false;
+}
+
+static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
+ if (!decl) return false;
+ if (!isa<VarDecl>(decl)) return false;
+ const VarDecl *var = cast<VarDecl>(decl);
+ return isAccessedBy(*var, e);
+}
+
+static void drillIntoBlockVariable(CodeGenFunction &CGF,
+ LValue &lvalue,
+ const VarDecl *var) {
+ lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
+}
+
+void CodeGenFunction::EmitScalarInit(const Expr *init,
+ const ValueDecl *D,
+ LValue lvalue,
+ bool capturedByInit) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime) {
+ llvm::Value *value = EmitScalarExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(RValue::get(value), lvalue);
+ return;
+ }
+
+ // If we're emitting a value with lifetime, we have to do the
+ // initialization *before* we leave the cleanup scopes.
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init))
+ init = ewc->getSubExpr();
+
+ // We have to maintain the illusion that the variable is
+ // zero-initialized. If the variable might be accessed in its
+ // initializer, zero-initialize before running the initializer, then
+ // actually perform the initialization with an assign.
+ bool accessedByInit = false;
+ if (lifetime != Qualifiers::OCL_ExplicitNone)
+ accessedByInit = isAccessedBy(D, init);
+ if (accessedByInit) {
+ LValue tempLV = lvalue;
+ // Drill down to the __block object if necessary.
+ if (capturedByInit) {
+ // We can use a simple GEP for this because it can't have been
+ // moved yet.
+ tempLV.setAddress(Builder.CreateStructGEP(tempLV.getAddress(),
+ getByRefValueLLVMField(cast<VarDecl>(D))));
+ }
+
+ const llvm::PointerType *ty
+ = cast<llvm::PointerType>(tempLV.getAddress()->getType());
+ ty = cast<llvm::PointerType>(ty->getElementType());
+
+ llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+
+ // If __weak, we want to use a barrier under certain conditions.
+ if (lifetime == Qualifiers::OCL_Weak)
+ EmitARCInitWeak(tempLV.getAddress(), zero);
+
+ // Otherwise just do a simple store.
+ else
+ EmitStoreOfScalar(zero, tempLV);
+ }
+
+ // Emit the initializer.
+ llvm::Value *value = 0;
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ value = EmitScalarExpr(init);
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // No way to optimize a producing initializer into this. It's not
+ // worth optimizing for, because the value will immediately
+ // disappear in the common case.
+ value = EmitScalarExpr(init);
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ if (accessedByInit)
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
+ else
+ EmitARCInitWeak(lvalue.getAddress(), value);
+ return;
+ }
+
+ case Qualifiers::OCL_Autoreleasing:
+ value = EmitARCRetainAutoreleaseScalarExpr(init);
+ break;
+ }
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+
+ // If the variable might have been accessed by its initializer, we
+ // might have to initialize with a barrier. We have to do this for
+ // both __weak and __strong, but __weak got filtered out above.
+ if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ return;
+ }
+
+ EmitStoreOfScalar(value, lvalue);
+}
+
+/// EmitScalarInit - Initialize the given lvalue with the given object.
+void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime)
+ return EmitStoreThroughLValue(RValue::get(init), lvalue);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong:
+ init = EmitARCRetain(lvalue.getType(), init);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(lvalue.getAddress(), init);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ init = EmitARCRetainAutorelease(lvalue.getType(), init);
+ break;
+ }
+
+ EmitStoreOfScalar(init, lvalue);
+}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
/// non-zero parts of the specified initializer with equal or fewer than
@@ -508,6 +723,10 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
CharUnits alignment = getContext().getDeclAlign(&D);
emission.Alignment = alignment;
+ // If the type is variably-modified, emit all the VLA sizes for it.
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
@@ -521,7 +740,9 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// arrays as long as the initialization is trivial (e.g. if they
// have a non-trivial destructor, but not a non-trivial constructor).
if (D.getInit() &&
- (Ty->isArrayType() || Ty->isRecordType()) && Ty->isPODType() &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
D.getInit()->isConstantInitializer(getContext(), false)) {
// If the variable's a const type, and it's neither an NRVO
@@ -585,10 +806,6 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
DeclPtr = CreateStaticVarDecl(D, Class,
llvm::GlobalValue::InternalLinkage);
}
-
- // FIXME: Can this happen?
- if (Ty->isVariablyModifiedType())
- EmitVLASize(Ty);
} else {
EnsureInsertPoint();
@@ -608,19 +825,17 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
}
- // Get the element type.
- const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
- const llvm::Type *LElemPtrTy =
- LElemTy->getPointerTo(CGM.getContext().getTargetAddressSpace(Ty));
+ llvm::Value *elementCount;
+ QualType elementType;
+ llvm::tie(elementCount, elementType) = getVLASize(Ty);
- llvm::Value *VLASize = EmitVLASize(Ty);
+ const llvm::Type *llvmTy = ConvertTypeForMem(elementType);
// Allocate memory for the array.
- llvm::AllocaInst *VLA =
- Builder.CreateAlloca(llvm::Type::getInt8Ty(getLLVMContext()), VLASize, "vla");
- VLA->setAlignment(alignment.getQuantity());
+ llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
+ vla->setAlignment(alignment.getQuantity());
- DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+ DeclPtr = vla;
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
@@ -667,6 +882,21 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
return false;
}
+/// \brief Determine whether the given initializer is trivial in the sense
+/// that it requires no code to be generated.
+static bool isTrivialInitializer(const Expr *Init) {
+ if (!Init)
+ return true;
+
+ if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ if (CXXConstructorDecl *Constructor = Construct->getConstructor())
+ if (Constructor->isTrivial() &&
+ Constructor->isDefaultConstructor() &&
+ !Construct->requiresZeroInitialization())
+ return true;
+
+ return false;
+}
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
@@ -690,7 +920,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (emission.IsByRef)
emitByrefStructureInit(emission);
- if (!Init) return;
+ if (isTrivialInitializer(Init))
+ return;
+
CharUnits alignment = emission.Alignment;
@@ -702,8 +934,11 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
llvm::Value *Loc =
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
- if (!emission.IsConstantAggregate)
- return EmitExprAsInit(Init, &D, Loc, alignment, capturedByInit);
+ if (!emission.IsConstantAggregate) {
+ LValue lv = MakeAddrLValue(Loc, type, alignment.getQuantity());
+ lv.setNonGC(true);
+ return EmitExprAsInit(Init, &D, lv, capturedByInit);
+ }
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
@@ -765,32 +1000,87 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
/// \param capturedByInit true if the variable is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init,
- const VarDecl *var,
- llvm::Value *loc,
- CharUnits alignment,
+ const ValueDecl *D,
+ LValue lvalue,
bool capturedByInit) {
- QualType type = var->getType();
- bool isVolatile = type.isVolatileQualified();
+ QualType type = D->getType();
if (type->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(init, var);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- EmitStoreOfScalar(RV.getScalarVal(), loc, false,
- alignment.getQuantity(), type);
+ RValue rvalue = EmitReferenceBindingToExpr(init, D);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(rvalue, lvalue);
} else if (!hasAggregateLLVMType(type)) {
- llvm::Value *V = EmitScalarExpr(init);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- EmitStoreOfScalar(V, loc, isVolatile, alignment.getQuantity(), type);
+ EmitScalarInit(init, D, lvalue, capturedByInit);
} else if (type->isAnyComplexType()) {
ComplexPairTy complex = EmitComplexExpr(init);
- if (capturedByInit) loc = BuildBlockByrefAddress(loc, var);
- StoreComplexToAddr(complex, loc, isVolatile);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ StoreComplexToAddr(complex, lvalue.getAddress(), lvalue.isVolatile());
} else {
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forAddr(loc, isVolatile, true, false));
+ EmitAggExpr(init, AggValueSlot::forLValue(lvalue, true, false));
}
}
+/// Enter a destroy cleanup for the given local variable.
+void CodeGenFunction::emitAutoVarTypeCleanup(
+ const CodeGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ llvm::Value *addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ assert(!type->isArrayType());
+ CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
+ emission.NRVOFlag);
+ return;
+ }
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ // Suppress cleanups for pseudo-strong variables.
+ if (var->isARCPseudoStrong()) return;
+
+ // Otherwise, consider whether to use an EH cleanup or not.
+ cleanupKind = getARCCleanupKind();
+
+ // Use the imprecise destroyer by default.
+ if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ break;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer) destroyer = &getDestroyer(dtorKind);
+
+ // Use an EH cleanup in array destructors iff the destructor itself
+ // is being pushed as an EH cleanup.
+ bool useEHCleanup = (cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
+ useEHCleanup);
+}
+
void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
@@ -799,35 +1089,14 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
const VarDecl &D = *emission.Variable;
- // Handle C++ destruction of variables.
- if (getLangOptions().CPlusPlus) {
- QualType type = D.getType();
- QualType baseType = getContext().getBaseElementType(type);
- if (const RecordType *RT = baseType->getAs<RecordType>()) {
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (!ClassDecl->hasTrivialDestructor()) {
- // Note: We suppress the destructor call when the corresponding NRVO
- // flag has been set.
-
- // Note that for __block variables, we want to destroy the
- // original stack object, not the possible forwarded object.
- llvm::Value *Loc = emission.getObjectAddress(*this);
-
- const CXXDestructorDecl *D = ClassDecl->getDestructor();
- assert(D && "EmitLocalBlockVarDecl - destructor is nul");
-
- if (type != baseType) {
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(type);
- assert(Array && "types changed without array?");
- EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup,
- D, Array, Loc);
- } else {
- EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup,
- D, emission.NRVOFlag, Loc);
- }
- }
- }
+ // Check the type for a cleanup.
+ if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
+ emitAutoVarTypeCleanup(emission, dtorKind);
+
+ // In GC mode, honor objc_precise_lifetime.
+ if (getLangOptions().getGCMode() != LangOptions::NonGC &&
+ D.hasAttr<ObjCPreciseLifetimeAttr>()) {
+ EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
// Handle the cleanup attribute.
@@ -847,6 +1116,271 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
enterByrefCleanup(emission);
}
+CodeGenFunction::Destroyer &
+CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ // This is surprisingly compiler-dependent. GCC 4.2 can't bind
+ // references to functions directly in returns, and using '*&foo'
+ // confuses MSVC. Luckily, the following code pattern works in both.
+ Destroyer *destroyer = 0;
+ switch (kind) {
+ case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ destroyer = &destroyCXXObject;
+ break;
+ case QualType::DK_objc_strong_lifetime:
+ destroyer = &destroyARCStrongPrecise;
+ break;
+ case QualType::DK_objc_weak_lifetime:
+ destroyer = &destroyARCWeak;
+ break;
+ }
+ return *destroyer;
+}
+
+/// pushDestroy - Push the standard destructor for the given type.
+void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+}
+
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
+ QualType type, Destroyer &destroyer,
+ bool useEHCleanupForArray) {
+ pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
+ destroyer, useEHCleanupForArray);
+}
+
+/// emitDestroy - Immediately perform the destruction of the given
+/// object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+/// \param useEHCleanupForArray - whether an EH cleanup should be
+/// used when destroying array elements, in case one of the
+/// destructions throws an exception
+void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
+ Destroyer &destroyer,
+ bool useEHCleanupForArray) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ llvm::Value *begin = addr;
+ llvm::Value *length = emitArrayLength(arrayType, type, begin);
+
+ // Normally we have to check whether the array is zero-length.
+ bool checkZeroLength = true;
+
+ // But if the array length is constant, we can suppress that.
+ if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
+ // ...and if it's constant zero, we can just skip the entire thing.
+ if (constLength->isZero()) return;
+ checkZeroLength = false;
+ }
+
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
+ emitArrayDestroy(begin, end, type, destroyer,
+ checkZeroLength, useEHCleanupForArray);
+}
+
+/// emitArrayDestroy - Destroys all the elements of the given array,
+/// beginning from last to first. The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param type - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+/// \param useEHCleanup - whether to push an EH cleanup to destroy
+/// the remaining elements in case the destruction of a single
+/// element throws
+void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
+ llvm::Value *end,
+ QualType type,
+ Destroyer &destroyer,
+ bool checkZeroLength,
+ bool useEHCleanup) {
+ assert(!type->isArrayType());
+
+ // The basic structure here is a do-while loop, because we don't
+ // need to check for the zero-element case.
+ llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
+ llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
+
+ if (checkZeroLength) {
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
+ "arraydestroy.isempty");
+ Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
+ }
+
+ // Enter the loop body, making that address the current address.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ EmitBlock(bodyBB);
+ llvm::PHINode *elementPast =
+ Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
+ elementPast->addIncoming(end, entryBB);
+
+ // Shift the address back by one element.
+ llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
+ "arraydestroy.element");
+
+ if (useEHCleanup)
+ pushRegularPartialArrayCleanup(begin, element, type, destroyer);
+
+ // Perform the actual destruction there.
+ destroyer(*this, element, type);
+
+ if (useEHCleanup)
+ PopCleanupBlock();
+
+ // Check whether we've reached the end.
+ llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
+ Builder.CreateCondBr(done, doneBB, bodyBB);
+ elementPast->addIncoming(element, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(doneBB);
+}
+
+/// Perform partial array destruction as if in an EH cleanup. Unlike
+/// emitArrayDestroy, the element type here may still be an array type.
+static void emitPartialArrayDestroy(CodeGenFunction &CGF,
+ llvm::Value *begin, llvm::Value *end,
+ QualType type,
+ CodeGenFunction::Destroyer &destroyer) {
+ // If the element type is itself an array, drill down.
+ unsigned arrayDepth = 0;
+ while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
+ // VLAs don't require a GEP index to walk into.
+ if (!isa<VariableArrayType>(arrayType))
+ arrayDepth++;
+ type = arrayType->getElementType();
+ }
+
+ if (arrayDepth) {
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
+
+ llvm::SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
+ begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices.begin(),
+ gepIndices.end(), "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(end, gepIndices.begin(),
+ gepIndices.end(), "pad.arrayend");
+ }
+
+ // Destroy the array. We don't ever need an EH cleanup because we
+ // assume that we're in an EH cleanup ourselves, so a throwing
+ // destructor causes an immediate terminate.
+ CGF.emitArrayDestroy(begin, end, type, destroyer,
+ /*checkZeroLength*/ true, /*useEHCleanup*/ false);
+}
+
+namespace {
+ /// RegularPartialArrayDestroy - a cleanup which performs a partial
+ /// array destroy where the end pointer is regularly determined and
+ /// does not need to be loaded from a local.
+ class RegularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEnd;
+ QualType ElementType;
+ CodeGenFunction::Destroyer &Destroyer;
+ public:
+ RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
+ ElementType(elementType), Destroyer(*destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+
+ /// IrregularPartialArrayDestroy - a cleanup which performs a
+ /// partial array destroy where the end pointer is irregularly
+ /// determined and must be loaded from a local.
+ class IrregularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEndPointer;
+ QualType ElementType;
+ CodeGenFunction::Destroyer &Destroyer;
+ public:
+ IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
+ ElementType(elementType), Destroyer(*destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
+ emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+}
+
+/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer &destroyer) {
+ pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEndPointer,
+ elementType, &destroyer);
+}
+
+/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer &destroyer) {
+ pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEnd,
+ elementType, &destroyer);
+}
+
+namespace {
+ /// A cleanup to perform a release of an object at the end of a
+ /// function. This is used to balance out the incoming +1 of a
+ /// ns_consumed argument when we can't reasonably do that just by
+ /// not doing the initial retain for a __block argument.
+ struct ConsumeARCParameter : EHScopeStack::Cleanup {
+ ConsumeARCParameter(llvm::Value *param) : Param(param) {}
+
+ llvm::Value *Param;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitARCRelease(Param, /*precise*/ false);
+ }
+ };
+}
+
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
@@ -883,10 +1417,56 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Otherwise, create a temporary to hold the value.
DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+ bool doStore = true;
+
+ Qualifiers qs = Ty.getQualifiers();
+
+ if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
+ // We honor __attribute__((ns_consumed)) for types with lifetime.
+ // For __strong, it's handled by just skipping the initial retain;
+ // otherwise we have to balance out the initial +1 with an extra
+ // cleanup to do the release at the end of the function.
+ bool isConsumed = D.hasAttr<NSConsumedAttr>();
+
+ // 'self' is always formally __strong, but if this is not an
+ // init method then we don't want to retain it.
+ if (D.isARCPseudoStrong()) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
+ assert(&D == method->getSelfDecl());
+ assert(lt == Qualifiers::OCL_Strong);
+ assert(qs.hasConst());
+ assert(method->getMethodFamily() != OMF_init);
+ (void) method;
+ lt = Qualifiers::OCL_ExplicitNone;
+ }
+
+ if (lt == Qualifiers::OCL_Strong) {
+ if (!isConsumed)
+ // Don't use objc_retainBlock for block pointers, because we
+ // don't want to Block_copy something just because we got it
+ // as a parameter.
+ Arg = EmitARCRetainNonBlock(Arg);
+ } else {
+ // Push the cleanup for a consumed parameter.
+ if (isConsumed)
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg);
+
+ if (lt == Qualifiers::OCL_Weak) {
+ EmitARCInitWeak(DeclPtr, Arg);
+ doStore = false; // The weak init is a store, no need to do two
+ }
+ }
+
+ // Enter the cleanup scope.
+ EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
+ }
+
// Store the initial value into the alloca.
- EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
- getContext().getDeclAlign(&D).getQuantity(), Ty,
- CGM.getTBAAInfo(Ty));
+ if (doStore) {
+ LValue lv = MakeAddrLValue(DeclPtr, Ty,
+ getContext().getDeclAlign(&D).getQuantity());
+ EmitStoreOfScalar(Arg, lv);
+ }
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
@@ -894,8 +1474,6 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DMEntry = DeclPtr;
// Emit debug info for param declaration.
- if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(D.getLocation());
+ if (CGDebugInfo *DI = getDebugInfo())
DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
- }
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 178badd44d81..0ae6a3d2ee90 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -27,68 +27,76 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
"Should not call EmitDeclInit on a reference!");
ASTContext &Context = CGF.getContext();
-
- const Expr *Init = D.getInit();
- QualType T = D.getType();
- bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
- unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
- if (!CGF.hasAggregateLLVMType(T)) {
- llvm::Value *V = CGF.EmitScalarExpr(Init);
+ unsigned alignment = Context.getDeclAlign(&D).getQuantity();
+ QualType type = D.getType();
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
+
+ const Expr *Init = D.getInit();
+ if (!CGF.hasAggregateLLVMType(type)) {
CodeGenModule &CGM = CGF.CGM;
- Qualifiers::GC GCAttr = CGM.getContext().getObjCGCAttrKind(T);
- if (GCAttr == Qualifiers::Strong)
- CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, V, DeclPtr,
- D.isThreadSpecified());
- else if (GCAttr == Qualifiers::Weak)
- CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, V, DeclPtr);
+ if (lv.isObjCStrong())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr, D.isThreadSpecified());
+ else if (lv.isObjCWeak())
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr);
else
- CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
- } else if (T->isAnyComplexType()) {
- CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
+ CGF.EmitScalarInit(Init, &D, lv, false);
+ } else if (type->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
} else {
- CGF.EmitAggExpr(Init, AggValueSlot::forAddr(DeclPtr, isVolatile, true));
+ CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv, true));
}
}
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *DeclPtr) {
+ llvm::Constant *addr) {
CodeGenModule &CGM = CGF.CGM;
- ASTContext &Context = CGF.getContext();
-
- QualType T = D.getType();
-
- // Drill down past array types.
- const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
- if (Array)
- T = Context.getBaseElementType(Array);
+
+ // FIXME: __attribute__((cleanup)) ?
- /// If that's not a record, we're done.
- /// FIXME: __attribute__((cleanup)) ?
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
+ QualType type = D.getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+
+ switch (dtorKind) {
+ case QualType::DK_none:
return;
-
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasTrivialDestructor())
+
+ case QualType::DK_cxx_destructor:
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ // We don't care about releasing objects during process teardown.
return;
-
- CXXDestructorDecl *Dtor = RD->getDestructor();
-
- llvm::Constant *DtorFn;
- if (Array) {
- DtorFn =
- CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, Array,
- DeclPtr);
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
- } else
- DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
-
- CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
+ }
+
+ llvm::Constant *function;
+ llvm::Constant *argument;
+
+ // Special-case non-array C++ destructors, where there's a function
+ // with the right signature that we can just call.
+ const CXXRecordDecl *record = 0;
+ if (dtorKind == QualType::DK_cxx_destructor &&
+ (record = type->getAsCXXRecordDecl())) {
+ assert(!record->hasTrivialDestructor());
+ CXXDestructorDecl *dtor = record->getDestructor();
+
+ function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
+ argument = addr;
+
+ // Otherwise, the standard logic requires a helper function.
+ } else {
+ function = CodeGenFunction(CGM).generateDestroyHelper(addr, type,
+ CGF.getDestroyer(dtorKind),
+ CGF.needsEHCleanup(dtorKind));
+ argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ }
+
+ CGF.EmitCXXGlobalDtorRegistration(function, argument);
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
@@ -118,12 +126,13 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
}
// Get the destructor function type
- const llvm::Type *DtorFnTy =
+ llvm::Type *ArgTys[] = { Int8PtrTy };
+ llvm::Type *DtorFnTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- Int8PtrTy, false);
+ ArgTys, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
- const llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
+ llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
// Get the __cxa_atexit function type
// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
@@ -140,7 +149,7 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
- Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
+ Builder.CreateCall(AtExitFn, Args);
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
@@ -270,12 +279,11 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
getTypes().getNullaryFunctionInfo(),
FunctionArgList(), SourceLocation());
- // Use guarded initialization if the global variable is weak due to
- // being a class template's static data member. These will always
- // have weak_odr linkage.
- if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage &&
- D->isStaticDataMember() &&
- D->getInstantiatedFromStaticDataMember()) {
+ // Use guarded initialization if the global variable is weak. This
+ // occurs for, e.g., instantiated static data members and
+ // definitions explicitly marked weak.
+ if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage ||
+ Addr->getLinkage() == llvm::GlobalValue::WeakAnyLinkage) {
EmitCXXGuardedInit(*D, Addr);
} else {
EmitCXXGlobalVarDeclInit(*D, Addr);
@@ -291,10 +299,21 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
getTypes().getNullaryFunctionInfo(),
FunctionArgList(), SourceLocation());
+ RunCleanupsScope Scope(*this);
+
+ // When building in Objective-C++ ARC mode, create an autorelease pool
+ // around the global initializers.
+ if (getLangOptions().ObjCAutoRefCount && getLangOptions().CPlusPlus) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EmitObjCAutoreleasePoolCleanup(token);
+ }
+
for (unsigned i = 0; i != NumDecls; ++i)
if (Decls[i])
- Builder.CreateCall(Decls[i]);
+ Builder.CreateCall(Decls[i]);
+ Scope.ForceCleanup();
+
FinishFunction();
}
@@ -318,13 +337,13 @@ void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
FinishFunction();
}
-/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
-/// invoked, calls the default destructor on array elements in reverse order of
-/// construction.
+/// generateDestroyHelper - Generates a helper function which, when
+/// invoked, destroys the given object.
llvm::Function *
-CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
+CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer &destroyer,
+ bool useEHCleanupForArray) {
FunctionArgList args;
ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
args.push_back(&dst);
@@ -333,19 +352,16 @@ CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
FunctionType::ExtInfo());
const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
- llvm::Function *Fn =
+ llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
- StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FI, args,
+ StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
SourceLocation());
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo();
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
-
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+ emitDestroy(addr, type, destroyer, useEHCleanupForArray);
FinishFunction();
- return Fn;
+ return fn;
}
+
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index e8ad6da2f980..418bea6ee402 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -29,10 +29,9 @@ using namespace CodeGen;
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Type *ArgTys[] = { CGF.SizeTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
- SizeTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
}
@@ -40,10 +39,9 @@ static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
// void __cxa_free_exception(void *thrown_exception);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
@@ -52,11 +50,9 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
// void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
// void (*dest) (void *));
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *Args[3] = { Int8PtrTy, Int8PtrTy, Int8PtrTy };
+ llvm::Type *Args[3] = { CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Args, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, Args, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
@@ -65,8 +61,7 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
// void __cxa_rethrow();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
}
@@ -74,9 +69,9 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
// void *__cxa_get_exception_ptr(void*);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
}
@@ -84,9 +79,9 @@ static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
// void *__cxa_begin_catch(void*);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(Int8PtrTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
}
@@ -95,8 +90,7 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
// void __cxa_end_catch();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
}
@@ -104,17 +98,17 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
// void __cxa_call_unexepcted(void *thrown_exception);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
+ llvm::Type *ArgTys[] = { Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
@@ -122,8 +116,9 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
}
llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ llvm::Type *ArgTys[] = { Int8PtrTy };
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+ llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
@@ -134,20 +129,26 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
// void __terminate();
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
- /*IsVarArgs=*/false);
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
- return CGF.CGM.CreateRuntimeFunction(FTy,
- CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
+ llvm::StringRef name;
+
+ // In C++, use std::terminate().
+ if (CGF.getLangOptions().CPlusPlus)
+ name = "_ZSt9terminatev"; // FIXME: mangling!
+ else if (CGF.getLangOptions().ObjC1 &&
+ CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
+ name = "objc_terminate";
+ else
+ name = "abort";
+ return CGF.CGM.CreateRuntimeFunction(FTy, name);
}
static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
llvm::StringRef Name) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Int8PtrTy,
- /*IsVarArgs=*/false);
+ llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
}
@@ -322,9 +323,10 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
- struct FreeException {
- static void Emit(CodeGenFunction &CGF, bool forEH,
- llvm::Value *exn) {
+ struct FreeException : EHScopeStack::Cleanup {
+ llvm::Value *exn;
+ FreeException(llvm::Value *exn) : exn(exn) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
->setDoesNotThrow();
}
@@ -354,7 +356,8 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
- CGF.EmitAnyExprToMem(e, typedAddr, /*Volatile*/ false, /*IsInit*/ true);
+ CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
+ /*IsInit*/ true);
// Deactivate the cleanup block.
CGF.DeactivateCleanupBlock(cleanup);
@@ -407,7 +410,6 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
// Now throw the exception.
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
/*ForEH=*/true);
@@ -786,7 +788,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Tell the backend how to generate the landing pad.
llvm::CallInst *Selection =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
- EHSelector.begin(), EHSelector.end(), "eh.selector");
+ EHSelector, "eh.selector");
Selection->setDoesNotThrow();
// Save the selector value in mandatory-cleanup mode.
@@ -920,13 +922,13 @@ namespace {
CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
bool MightThrow;
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
if (!MightThrow) {
CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
return;
}
- CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0);
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF));
}
};
}
@@ -1084,7 +1086,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, false, false));
+ CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
+ false));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
@@ -1137,8 +1140,8 @@ static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
namespace {
struct CallRethrow : EHScopeStack::Cleanup {
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF));
}
};
}
@@ -1209,7 +1212,7 @@ namespace {
CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
: ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
llvm::BasicBlock *CleanupContBB =
CGF.createBasicBlock("finally.cleanup.cont");
@@ -1218,7 +1221,7 @@ namespace {
CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
CGF.EmitBlock(EndCatchBB);
- CGF.EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
+ CGF.EmitCallOrInvoke(EndCatchFn); // catch-all, so might throw
CGF.EmitBlock(CleanupContBB);
}
};
@@ -1236,7 +1239,7 @@ namespace {
: Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// Enter a cleanup to call the end-catch function if one was provided.
if (EndCatchFn)
CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
@@ -1263,10 +1266,9 @@ namespace {
CGF.EmitBlock(RethrowBB);
if (SavedExnVar) {
- llvm::Value *Args[] = { CGF.Builder.CreateLoad(SavedExnVar) };
- CGF.EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ CGF.EmitCallOrInvoke(RethrowFn, CGF.Builder.CreateLoad(SavedExnVar));
} else {
- CGF.EmitCallOrInvoke(RethrowFn, 0, 0);
+ CGF.EmitCallOrInvoke(RethrowFn);
}
CGF.Builder.CreateUnreachable();
@@ -1296,14 +1298,16 @@ namespace {
/// Enters a finally block for an implementation using zero-cost
/// exceptions. This is mostly general, but hard-codes some
/// language/ABI-specific behavior in the catch-all sections.
-CodeGenFunction::FinallyInfo
-CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
- llvm::Constant *BeginCatchFn,
- llvm::Constant *EndCatchFn,
- llvm::Constant *RethrowFn) {
- assert((BeginCatchFn != 0) == (EndCatchFn != 0) &&
+void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
+ const Stmt *body,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn) {
+ assert((beginCatchFn != 0) == (endCatchFn != 0) &&
"begin/end catch functions not paired");
- assert(RethrowFn && "rethrow function is required");
+ assert(rethrowFn && "rethrow function is required");
+
+ BeginCatchFn = beginCatchFn;
// The rethrow function has one of the following two types:
// void (*)()
@@ -1311,13 +1315,12 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
// In the latter case we need to pass it the exception object.
// But we can't use the exception slot because the @finally might
// have a landing pad (which would overwrite the exception slot).
- const llvm::FunctionType *RethrowFnTy =
+ const llvm::FunctionType *rethrowFnTy =
cast<llvm::FunctionType>(
- cast<llvm::PointerType>(RethrowFn->getType())
- ->getElementType());
- llvm::Value *SavedExnVar = 0;
- if (RethrowFnTy->getNumParams())
- SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn");
+ cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
+ SavedExnVar = 0;
+ if (rethrowFnTy->getNumParams())
+ SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
// A finally block is a statement which must be executed on any edge
// out of a given scope. Unlike a cleanup, the finally block may
@@ -1331,67 +1334,64 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
// The finally block itself is generated in the context of a cleanup
// which conditionally leaves the catch-all.
- FinallyInfo Info;
-
// Jump destination for performing the finally block on an exception
// edge. We'll never actually reach this block, so unreachable is
// fine.
- JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
+ RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
// Whether the finally block is being executed for EH purposes.
- llvm::AllocaInst *ForEHVar = CreateTempAlloca(Builder.getInt1Ty(),
- "finally.for-eh");
- InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
+ ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
// Enter a normal cleanup which will perform the @finally block.
- EHStack.pushCleanup<PerformFinally>(NormalCleanup, Body,
- ForEHVar, EndCatchFn,
- RethrowFn, SavedExnVar);
+ CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
+ ForEHVar, endCatchFn,
+ rethrowFn, SavedExnVar);
// Enter a catch-all scope.
- llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
- CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
- Builder.SetInsertPoint(CatchAllBB);
-
- // If there's a begin-catch function, call it.
- if (BeginCatchFn) {
- Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot()))
- ->setDoesNotThrow();
- }
-
- // If we need to remember the exception pointer to rethrow later, do so.
- if (SavedExnVar) {
- llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot());
- Builder.CreateStore(SavedExn, SavedExnVar);
- }
+ llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
+ EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
+ catchScope->setCatchAllHandler(0, catchBB);
+}
- // Tell the finally block that we're in EH.
- Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar);
+void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
+ // Leave the finally catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
+ llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
+ CGF.EHStack.popCatch();
- // Thread a jump through the finally cleanup.
- EmitBranchThroughCleanup(RethrowDest);
+ // If there are any references to the catch-all block, emit it.
+ if (catchBB->use_empty()) {
+ delete catchBB;
+ } else {
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(catchBB);
- Builder.restoreIP(SavedIP);
+ llvm::Value *exn = 0;
- EHCatchScope *CatchScope = EHStack.pushCatch(1);
- CatchScope->setCatchAllHandler(0, CatchAllBB);
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ CGF.Builder.CreateCall(BeginCatchFn, exn)->setDoesNotThrow();
+ }
- return Info;
-}
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ if (!exn) exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ CGF.Builder.CreateStore(exn, SavedExnVar);
+ }
-void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) {
- // Leave the finally catch-all.
- EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin());
- llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block;
- EHStack.popCatch();
+ // Tell the cleanups in the finally block that we're do this for EH.
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
- // And leave the normal cleanup.
- PopCleanupBlock();
+ // Thread a jump through the finally cleanup.
+ CGF.EmitBranchThroughCleanup(RethrowDest);
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- EmitBlock(CatchAllBB, true);
+ CGF.Builder.restoreIP(savedIP);
+ }
- Builder.restoreIP(SavedIP);
+ // Finally, leave the @finally cleanup.
+ CGF.PopCleanupBlock();
}
llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
@@ -1416,7 +1416,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
llvm::Value *Args[3] = { Exn, getOpaquePersonalityFn(CGM, Personality),
getCatchAllValue(*this) };
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
- Args, Args+3, "eh.selector")
+ Args, "eh.selector")
->setDoesNotThrow();
llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 2f6b55bd7b0b..a7e8003eaab5 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -20,8 +20,8 @@
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
-#include "llvm/Intrinsics.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -131,16 +131,16 @@ RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
/// location.
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm::Value *Location,
- bool IsLocationVolatile,
+ Qualifiers Quals,
bool IsInit) {
- if (E->getType()->isComplexType())
- EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
+ if (E->getType()->isAnyComplexType())
+ EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, IsLocationVolatile, IsInit));
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, IsInit));
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
- EmitStoreThroughLValue(RV, LV, E->getType());
+ EmitStoreThroughLValue(RV, LV);
}
}
@@ -203,7 +203,24 @@ static llvm::Value *
EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
llvm::Value *&ReferenceTemporary,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
+ // Look through expressions for materialized temporaries (for now).
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
+ E->getType()->isObjCLifetimeType() &&
+ (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
+ ObjCARCReferenceLifetimeType = E->getType();
+
+ E = M->GetTemporaryExpr();
+ }
+
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
@@ -213,6 +230,7 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
InitializedDecl);
}
@@ -229,12 +247,69 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
RV = CGF.EmitLoadOfPropertyRefLValue(LV);
return RV.getScalarVal();
}
+
if (LV.isSimple())
return LV.getAddress();
// We have to load the lvalue.
- RV = CGF.EmitLoadOfLValue(LV, E->getType());
+ RV = CGF.EmitLoadOfLValue(LV);
} else {
+ if (!ObjCARCReferenceLifetimeType.isNull()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+
+
+ LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
+ ObjCARCReferenceLifetimeType);
+
+ CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
+ RefTempDst, false);
+
+ bool ExtendsLifeOfTemporary = false;
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (Var->extendsLifetimeOfTemporary())
+ ExtendsLifeOfTemporary = true;
+ } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
+ ExtendsLifeOfTemporary = true;
+ }
+
+ if (!ExtendsLifeOfTemporary) {
+ // Since the lifetime of this temporary isn't going to be extended,
+ // we need to clean it up ourselves at the end of the full expression.
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CGF.pushDestroy(NormalAndEHCleanup,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanupForArray*/ true);
+ break;
+ }
+
+ ObjCARCReferenceLifetimeType = QualType();
+ }
+
+ return ReferenceTemporary;
+ }
+
llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
while (true) {
E = E->IgnoreParens();
@@ -279,12 +354,10 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, false,
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
InitializedDecl != 0);
}
-
- RV = CGF.EmitAnyExpr(E, AggSlot);
-
+
if (InitializedDecl) {
// Get the destructor for the reference temporary.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
@@ -294,6 +367,8 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
}
}
+ RV = CGF.EmitAnyExpr(E, AggSlot);
+
// Check if need to perform derived-to-base casts and/or field accesses, to
// get from the temporary object we created (and, potentially, for which we
// extended the lifetime) to the subobject we're binding the reference to.
@@ -326,7 +401,7 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
LValue TempLV = CGF.MakeAddrLValue(Object,
Adjustment.Field->getType());
- CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
break;
}
@@ -361,26 +436,65 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
const NamedDecl *InitializedDecl) {
llvm::Value *ReferenceTemporary = 0;
const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ QualType ObjCARCReferenceLifetimeType;
llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
InitializedDecl);
- if (!ReferenceTemporaryDtor)
+ if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
return RValue::get(Value);
// Make sure to call the destructor for the reference temporary.
- if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
- if (VD->hasGlobalStorage()) {
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
+ if (VD && VD->hasGlobalStorage()) {
+ if (ReferenceTemporaryDtor) {
llvm::Constant *DtorFn =
CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
EmitCXXGlobalDtorRegistration(DtorFn,
cast<llvm::Constant>(ReferenceTemporary));
-
- return RValue::get(Value);
+ } else {
+ assert(!ObjCARCReferenceLifetimeType.isNull());
+ // Note: We intentionally do not register a global "destructor" to
+ // release the object.
}
+
+ return RValue::get(Value);
}
- PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
-
+ if (ReferenceTemporaryDtor)
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+ else {
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ assert(0 && "Not a reference temporary that needs to be deallocated");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do.
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ CleanupKind cleanupKind = getARCCleanupKind();
+ // This local is a GCC and MSVC compiler workaround.
+ Destroyer *destroyer = precise ? &destroyARCStrongPrecise :
+ &destroyARCStrongImprecise;
+ pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
+ *destroyer, cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // This local is a GCC and MSVC compiler workaround.
+ Destroyer *destroyer = &destroyARCWeak;
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
+ ObjCARCReferenceLifetimeType, *destroyer, true);
+ break;
+ }
+ }
+ }
+
return RValue::get(Value);
}
@@ -402,8 +516,7 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
// This needs to be to the standard address space.
Address = Builder.CreateBitCast(Address, Int8PtrTy);
- const llvm::Type *IntPtrT = IntPtrTy;
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
// In time, people may want to control this and use a 1 here.
llvm::Value *Arg = Builder.getFalse();
@@ -592,6 +705,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
case Expr::OpaqueValueExprClass:
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
@@ -599,10 +714,20 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
+
+ case Expr::MaterializeTemporaryExprClass:
+ return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
}
}
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment(), lvalue.getType(),
+ lvalue.getTBAAInfo());
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo) {
@@ -651,6 +776,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
QualType Ty,
llvm::MDNode *TBAAInfo) {
Value = EmitToMemory(Value, Ty);
+
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
if (Alignment)
Store->setAlignment(Alignment);
@@ -658,29 +784,30 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
CGM.DecorateInstruction(Store, TBAAInfo);
}
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment(), lvalue.getType(),
+ lvalue.getTBAAInfo());
+}
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
-RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
if (LV.isObjCWeak()) {
// load of a __weak object.
llvm::Value *AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
if (LV.isSimple()) {
- llvm::Value *Ptr = LV.getAddress();
-
- // Functions are l-values that don't require loading.
- if (ExprType->isFunctionType())
- return RValue::get(Ptr);
+ assert(!LV.getType()->isFunctionType());
// Everything needs a load.
- return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
- LV.getAlignment(), ExprType,
- LV.getTBAAInfo()));
-
+ return RValue::get(EmitLoadOfScalar(LV));
}
if (LV.isVectorElt()) {
@@ -693,21 +820,20 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
// If this is a reference to a subset of the elements of a vector, either
// shuffle the input or extract/insert them as appropriate.
if (LV.isExtVectorElt())
- return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+ return EmitLoadOfExtVectorElementLValue(LV);
if (LV.isBitField())
- return EmitLoadOfBitfieldLValue(LV, ExprType);
+ return EmitLoadOfBitfieldLValue(LV);
assert(LV.isPropertyRef() && "Unknown LValue type!");
return EmitLoadOfPropertyRefLValue(LV);
}
-RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
- QualType ExprType) {
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertType(ExprType);
+ const llvm::Type *ResLTy = ConvertType(LV.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Compute the result as an OR of all of the individual component accesses.
@@ -733,7 +859,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// Cast to the access type.
const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
AI.AccessWidth,
- CGM.getContext().getTargetAddressSpace(ExprType));
+ CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Perform the load.
@@ -777,8 +903,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
-RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
- QualType ExprType) {
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
LV.isVolatileQualified(), "tmp");
@@ -786,7 +911,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
// If the result of the expression is a non-vector type, we must be extracting
// a single element. Just codegen as an extractelement.
- const VectorType *ExprVT = ExprType->getAs<VectorType>();
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
@@ -813,8 +938,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
- QualType Ty) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
@@ -829,15 +953,41 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
// If this is an update of extended vector elements, insert them as
// appropriate.
if (Dst.isExtVectorElt())
- return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
if (Dst.isBitField())
- return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
assert(Dst.isPropertyRef() && "Unknown LValue type");
return EmitStoreThroughPropertyRefLValue(Src, Dst);
}
+ // There's special magic for assigning into an ARC-qualified l-value.
+ if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing special
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
+ Src.getScalarVal()));
+ // fall into the normal path
+ break;
+ }
+ }
+
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
llvm::Value *LvalueDst = Dst.getAddress();
@@ -871,24 +1021,21 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
- EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
- Dst.isVolatileQualified(), Dst.getAlignment(), Ty,
- Dst.getTBAAInfo());
+ EmitStoreOfScalar(Src.getScalarVal(), Dst);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
- QualType Ty,
llvm::Value **Result) {
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertTypeForMem(Ty);
+ const llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
- if (Ty->isBooleanType())
+ if (Dst.getType()->isBooleanType())
SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
@@ -983,8 +1130,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
- LValue Dst,
- QualType Ty) {
+ LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
@@ -993,7 +1139,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::Value *SrcVal = Src.getScalarVal();
- if (const VectorType *VTy = Ty->getAs<VectorType>()) {
+ if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
unsigned NumDstElts =
cast<llvm::VectorType>(Vec->getType())->getNumElements();
@@ -1113,7 +1259,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
return;
}
-
+
+ if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ return;
+ }
+
if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
if (LV.isObjCIvar() && !LV.isObjCArray())
@@ -1136,6 +1287,14 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
}
}
+static llvm::Value *
+EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
+ llvm::Value *V, llvm::Type *IRType,
+ llvm::StringRef Name = llvm::StringRef()) {
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
+}
+
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
const Expr *E, const VarDecl *VD) {
assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
@@ -1144,6 +1303,10 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
V = CGF.Builder.CreateLoad(V, "tmp");
+
+ V = EmitBitCastOfLValueToProperType(CGF, V,
+ CGF.getTypes().ConvertTypeForMem(E->getType()));
+
unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
setObjCGCLValueClass(CGF.getContext(), E, LV);
@@ -1151,7 +1314,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
}
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
- const Expr *E, const FunctionDecl *FD) {
+ const Expr *E, const FunctionDecl *FD) {
llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
@@ -1200,6 +1363,9 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
+ V = EmitBitCastOfLValueToProperType(*this, V,
+ getTypes().ConvertTypeForMem(E->getType()));
+
LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
@@ -1359,7 +1525,7 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() {
TrapBB = createBasicBlock("trap");
EmitBlock(TrapBB);
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
llvm::CallInst *TrapCall = Builder.CreateCall(F);
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
@@ -1400,7 +1566,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType().getCVRQualifiers());
+ E->getBase()->getType());
}
// Extend or truncate the index type to 32 or 64-bits.
@@ -1430,21 +1596,27 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
unsigned ArrayAlignment = 0;
- if (const VariableArrayType *VAT =
+ if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
- llvm::Value *VLASize = GetVLASize(VAT);
-
- Idx = Builder.CreateMul(Idx, VLASize);
-
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
-
- Address = EmitCastToVoidPtr(Base);
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ // The base must be a pointer, which is not an aggregate. Emit
+ // it. It needs to be emitted first in case it's what captures
+ // the VLA bounds.
+ Address = EmitScalarExpr(E->getBase());
+
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOptions().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
- else
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
- Address = Builder.CreateBitCast(Address, Base->getType());
+ }
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
@@ -1539,7 +1711,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
Base = EmitLValue(E->getBase());
} else {
// Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
- assert(E->getBase()->getType()->getAs<VectorType>() &&
+ assert(E->getBase()->getType()->isVectorType() &&
"Result must be a vector");
llvm::Value *Vec = EmitScalarExpr(E->getBase());
@@ -1548,6 +1720,9 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
Builder.CreateStore(Vec, VecMem);
Base = MakeAddrLValue(VecMem, E->getBase()->getType());
}
+
+ QualType type =
+ E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
// Encode the element access list into a vector of unsigned indices.
llvm::SmallVector<unsigned, 4> Indices;
@@ -1555,8 +1730,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (Base.isSimple()) {
llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(), CV,
- Base.getVRQualifiers());
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -1570,8 +1744,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
}
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
- Base.getVRQualifiers());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -1622,7 +1795,7 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
CGM.getTypes().getCGRecordLayout(Field->getParent());
const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
return LValue::MakeBitfield(BaseValue, Info,
- Field->getType().getCVRQualifiers()|CVRQualifiers);
+ Field->getType().withCVRQualifiers(CVRQualifiers));
}
/// EmitLValueForAnonRecordField - Given that the field is a member of
@@ -1656,20 +1829,14 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
bool mayAlias = rec->hasAttr<MayAliasAttr>();
- llvm::Value *addr;
+ llvm::Value *addr = baseAddr;
if (rec->isUnion()) {
- // For unions, we just cast to the appropriate type.
+ // For unions, there is no pointer adjustment.
assert(!type->isReferenceType() && "union has reference member");
-
- const llvm::Type *llvmType = CGM.getTypes().ConvertTypeForMem(type);
- unsigned AS =
- cast<llvm::PointerType>(baseAddr->getType())->getAddressSpace();
- addr = Builder.CreateBitCast(baseAddr, llvmType->getPointerTo(AS),
- field->getName());
} else {
// For structs, we GEP to the field that the record layout suggests.
unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
- addr = Builder.CreateStructGEP(baseAddr, idx, field->getName());
+ addr = Builder.CreateStructGEP(addr, idx, field->getName());
// If this is a reference field, load the reference right now.
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
@@ -1691,6 +1858,14 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
cvr = 0; // qualifiers don't recursively apply to referencee
}
}
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ addr = EmitBitCastOfLValueToProperType(*this, addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
unsigned alignment = getContext().getDeclAlign(field).getQuantity();
LValue LV = MakeAddrLValue(addr, type, alignment);
@@ -1722,9 +1897,17 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
CGM.getTypes().getCGRecordLayout(Field->getParent());
unsigned idx = RL.getLLVMFieldNo(Field);
llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
-
assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ const llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
+
unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
return MakeAddrLValue(V, FieldType, Alignment);
}
@@ -1734,7 +1917,8 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
- EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false, /*Init*/ true);
+ EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
return Result;
}
@@ -1863,13 +2047,16 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
- case CK_AnyPointerToBlockPointerCast: {
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
// that temporary.
llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
- EmitAnyExprToMem(E, V, false, false);
+ EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
return MakeAddrLValue(V, E->getType());
}
@@ -1954,11 +2141,18 @@ LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
return getOpaqueLValueMapping(e);
}
+LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ RValue RV = EmitReferenceBindingToExpr(E->GetTemporaryExpr(),
+ /*InitializedDecl=*/0);
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
-
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -1988,13 +2182,57 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
- if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
- // C++ [expr.pseudo]p1:
- // The result shall only be used as the operand for the function call
- // operator (), and the result of such a call has type void. The only
- // effect is the evaluation of the postfix-expression before the dot or
- // arrow.
- EmitScalarExpr(E->getCallee());
+ if (const CXXPseudoDestructorExpr *PseudoDtor
+ = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ QualType DestroyedType = PseudoDtor->getDestroyedType();
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ DestroyedType->isObjCLifetimeType() &&
+ (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = PseudoDtor->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (PseudoDtor->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ PseudoDtor->getDestroyedType().isVolatileQualified()),
+ /*precise*/ true);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ }
+
return RValue::get(0);
}
@@ -2016,12 +2254,28 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
return EmitPointerToDataMemberBinaryExpr(E);
assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+ // Note that in all of these cases, __block variables need the RHS
+ // evaluated first just in case the variable gets moved by the RHS.
if (!hasAggregateLLVMType(E->getType())) {
- // __block variables need the RHS evaluated first.
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return EmitARCStoreStrong(E, /*ignored*/ false).first;
+
+ case Qualifiers::OCL_Autoreleasing:
+ return EmitARCStoreAutoreleasing(E).first;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ break;
+ }
+
RValue RV = EmitAnyExpr(E->getRHS());
LValue LV = EmitLValue(E->getLHS());
- EmitStoreThroughLValue(RV, LV, E->getType());
+ EmitStoreThroughLValue(RV, LV);
return LV;
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index d8da642a6b38..915ffd6034e2 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -85,15 +85,16 @@ public:
Visit(GE->getResultExpr());
}
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
// l-values.
void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- EmitAggLoadOfLValue(E);
- }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
@@ -131,13 +132,13 @@ public:
void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
-
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
void VisitVAArgExpr(VAArgExpr *E);
- void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
- void EmitNullInitializationToLValue(LValue Address, QualType T);
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address);
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
};
@@ -243,10 +244,31 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
// Visitor Methods
//===----------------------------------------------------------------------===//
+void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
+ Visit(E->GetTemporaryExpr());
+}
+
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
}
+void
+AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ if (E->getType().isPODType(CGF.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ // FIXME: This is a band-aid; the real problem appears to be in our handling
+ // of assignments, where we store directly into the LHS without checking
+ // whether anything in the RHS aliases.
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitAggExpr(E->getInitializer(), Slot);
+}
+
+
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
case CK_Dynamic: {
@@ -271,8 +293,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
QualType PtrTy = CGF.getContext().getPointerType(Ty);
llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
CGF.ConvertType(PtrTy));
- EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
- Ty);
+ EmitInitializationToLValue(E->getSubExpr(),
+ CGF.MakeAddrLValue(CastPtr, Ty));
break;
}
@@ -339,6 +361,9 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -519,13 +544,13 @@ void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
- EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T), T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
}
/// isSimpleZero - If emitting this value will obviously just cause a store of
@@ -557,41 +582,46 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
void
-AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+ QualType type = LV.getType();
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
// Storing "i32 0" to a zero'd memory location is a noop.
} else if (isa<ImplicitValueInitExpr>(E)) {
- EmitNullInitializationToLValue(LV, T);
- } else if (T->isReferenceType()) {
+ EmitNullInitializationToLValue(LV);
+ } else if (type->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
- CGF.EmitStoreThroughLValue(RV, LV, T);
- } else if (T->isAnyComplexType()) {
+ CGF.EmitStoreThroughLValue(RV, LV);
+ } else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
- } else if (CGF.hasAggregateLLVMType(T)) {
- CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
- false, Dest.isZeroed()));
+ } else if (CGF.hasAggregateLLVMType(type)) {
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false,
+ Dest.isZeroed()));
+ } else if (LV.isSimple()) {
+ CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
} else {
- CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
}
}
-void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
+ QualType type = lv.getType();
+
// If the destination slot is already zeroed out before the aggregate is
// copied into it, we don't have to emit any zeros here.
- if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(T))
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
return;
- if (!CGF.hasAggregateLLVMType(T)) {
+ if (!CGF.hasAggregateLLVMType(type)) {
// For non-aggregates, we can store zero
- llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
- CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+ llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
+ CGF.EmitStoreThroughLValue(RValue::get(null), lv);
} else {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitNullInitialization(LV.getAddress(), T);
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
@@ -634,45 +664,135 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
uint64_t NumArrayElements = AType->getNumElements();
- QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
- ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
-
- bool hasNonTrivialCXXConstructor = false;
- if (CGF.getContext().getLangOptions().CPlusPlus)
- if (const RecordType *RT = CGF.getContext()
- .getBaseElementType(ElementType)->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- hasNonTrivialCXXConstructor = !RD->hasTrivialDefaultConstructor();
+ assert(NumInitElements <= NumArrayElements);
+
+ QualType elementType = E->getType().getCanonicalType();
+ elementType = CGF.getContext().getQualifiedType(
+ cast<ArrayType>(elementType)->getElementType(),
+ elementType.getQualifiers() + Dest.getQualifiers());
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin");
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ llvm::AllocaInst *endOfInit = 0;
+ EHScopeStack::stable_iterator cleanup;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ "arrayinit.endOfInit");
+ Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
}
- // FIXME: were we intentionally ignoring address spaces and GC attributes?
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
- for (uint64_t i = 0; i != NumArrayElements; ++i) {
- // If we're done emitting initializers and the destination is known-zeroed
- // then we're done.
- if (i == NumInitElements &&
- Dest.isZeroed() &&
- CGF.getTypes().isZeroInitializable(ElementType) &&
- !hasNonTrivialCXXConstructor)
- break;
+ // Check whether there's a non-trivial array-fill expression.
+ // Note that this will be a CXXConstructExpr even if the element
+ // type is an array (or array of array, etc.) of class type.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = true;
+ if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
+ assert(cons->getConstructor()->isDefaultConstructor());
+ hasTrivialFiller = cons->getConstructor()->isTrivial();
+ }
- llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
- LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
-
- if (i < NumInitElements)
- EmitInitializationToLValue(E->getInit(i), LV, ElementType);
- else if (Expr *filler = E->getArrayFiller())
- EmitInitializationToLValue(filler, LV, ElementType);
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
else
- EmitNullInitializationToLValue(LV, ElementType);
-
- // If the GEP didn't get used because of a dead zero init or something
- // else, clean it up for -O0 builds and general tidiness.
- if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(NextVal))
- if (GEP->use_empty())
- GEP->eraseFromParent();
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
}
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
+
return;
}
@@ -683,9 +803,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = E->getNumInits();
- RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
- if (E->getType()->isUnionType()) {
+ if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
if (!E->getInitializedFieldInUnion()) {
@@ -694,8 +814,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#ifndef NDEBUG
// Make sure that it's really an empty and not a failure of
// semantic analysis.
- for (RecordDecl::field_iterator Field = SD->field_begin(),
- FieldEnd = SD->field_end();
+ for (RecordDecl::field_iterator Field = record->field_begin(),
+ FieldEnd = record->field_end();
Field != FieldEnd; ++Field)
assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
@@ -708,55 +828,81 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
if (NumInitElements) {
// Store the initializer into the field
- EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
} else {
// Default-initialize to null.
- EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ EmitNullInitializationToLValue(FieldLoc);
}
return;
}
+ // We'll need to enter cleanup scopes in case any of the member
+ // initializers throw an exception.
+ llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
- unsigned CurInitVal = 0;
- for (RecordDecl::field_iterator Field = SD->field_begin(),
- FieldEnd = SD->field_end();
- Field != FieldEnd; ++Field) {
- // We're done once we hit the flexible array member
- if (Field->getType()->isIncompleteArrayType())
+ unsigned curInitIndex = 0;
+ for (RecordDecl::field_iterator field = record->field_begin(),
+ fieldEnd = record->field_end();
+ field != fieldEnd; ++field) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
break;
- if (Field->isUnnamedBitfield())
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitfield())
continue;
- // Don't emit GEP before a noop store of zero.
- if (CurInitVal == NumInitElements && Dest.isZeroed() &&
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == NumInitElements && Dest.isZeroed() &&
CGF.getTypes().isZeroInitializable(E->getType()))
break;
// FIXME: volatility
- LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
+ LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
// We never generate write-barries for initialized fields.
- FieldLoc.setNonGC(true);
+ LV.setNonGC(true);
- if (CurInitVal < NumInitElements) {
+ if (curInitIndex < NumInitElements) {
// Store the initializer into the field.
- EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
- Field->getType());
+ EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
} else {
// We're out of initalizers; default-initialize to null
- EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ EmitNullInitializationToLValue(LV);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ bool pushedCleanup = false;
+ if (QualType::DestructionKind dtorKind
+ = field->getType().isDestructedType()) {
+ assert(LV.isSimple());
+ if (CGF.needsEHCleanup(dtorKind)) {
+ CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
+ CGF.getDestroyer(dtorKind), false);
+ cleanups.push_back(CGF.EHStack.stable_begin());
+ pushedCleanup = true;
+ }
}
// If the GEP didn't get used because of a dead zero init or something
// else, clean it up for -O0 builds and general tidiness.
- if (FieldLoc.isSimple())
+ if (!pushedCleanup && LV.isSimple())
if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(FieldLoc.getAddress()))
+ dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
if (GEP->use_empty())
GEP->eraseFromParent();
}
+
+ // Deactivate all the partial cleanups in reverse order, which
+ // generally means popping them.
+ for (unsigned i = cleanups.size(); i != 0; --i)
+ CGF.DeactivateCleanupBlock(cleanups[i-1]);
}
//===----------------------------------------------------------------------===//
@@ -873,8 +1019,6 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
///
/// \param IsInitializer - true if this evaluation is initializing an
/// object whose lifetime is already being managed.
-//
-// FIXME: Take Qualifiers object.
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
bool IgnoreResult) {
assert(E && hasAggregateLLVMType(E->getType()) &&
@@ -892,7 +1036,7 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forAddr(Temp, LV.isVolatileQualified(), false));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
return LV;
}
@@ -954,7 +1098,10 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
- if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ // Don't do any of the memmove_collectable tests if GC isn't set.
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
+ // fall through
+ } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
@@ -964,7 +1111,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
SizeVal);
return;
}
- } else if (getContext().getAsArrayType(Ty)) {
+ } else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 81fee677f61a..4396f567f2f9 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -379,19 +379,11 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
}
}
- const ConstantArrayType *Array
- = getContext().getAsConstantArrayType(E->getType());
- if (Array) {
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Dest.getAddr(), BasePtr);
-
- EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
+ if (const ConstantArrayType *arrayType
+ = getContext().getAsConstantArrayType(E->getType())) {
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
E->arg_begin(), E->arg_end());
- }
- else {
+ } else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
@@ -619,10 +611,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// can be ignored because the result shouldn't be used if
// allocation fails.
if (typeSizeMultiplier != 1) {
- const llvm::Type *intrinsicTypes[] = { CGF.SizeTy };
llvm::Value *umul_with_overflow
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow,
- intrinsicTypes, 1);
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
llvm::Value *tsmV =
llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
@@ -661,10 +651,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
if (cookieSize != 0) {
sizeWithoutCookie = size;
- const llvm::Type *intrinsicTypes[] = { CGF.SizeTy };
llvm::Value *uadd_with_overflow
- = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow,
- intrinsicTypes, 1);
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
llvm::Value *result =
@@ -707,16 +695,15 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
unsigned Alignment =
CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
- if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
- AllocType.isVolatileQualified(), Alignment,
- AllocType);
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, Alignment),
+ false);
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
else {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), true);
CGF.EmitAggExpr(Init, Slot);
}
}
@@ -806,7 +793,7 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
RequiresZeroInitialization = true;
}
-
+
CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
E->constructor_arg_begin(),
E->constructor_arg_end(),
@@ -875,7 +862,7 @@ namespace {
getPlacementArgs()[I] = Arg;
}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const FunctionProtoType *FPT
= OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
@@ -932,7 +919,7 @@ namespace {
getPlacementArgs()[I] = Arg;
}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const FunctionProtoType *FPT
= OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
@@ -1075,7 +1062,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// CXXNewExpr::shouldNullCheckAllocation()) and we have an
// interesting initializer.
bool nullCheck = allocatorType->isNothrow(getContext()) &&
- !(allocType->isPODType() && !E->hasInitializer());
+ !(allocType.isPODType(getContext()) && !E->hasInitializer());
llvm::BasicBlock *nullCheckBB = 0;
llvm::BasicBlock *contBB = 0;
@@ -1202,7 +1189,7 @@ namespace {
QualType ElementType)
: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
}
};
@@ -1212,7 +1199,8 @@ namespace {
static void EmitObjectDelete(CodeGenFunction &CGF,
const FunctionDecl *OperatorDelete,
llvm::Value *Ptr,
- QualType ElementType) {
+ QualType ElementType,
+ bool UseGlobalDelete) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = 0;
@@ -1222,17 +1210,30 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
Dtor = RD->getDestructor();
if (Dtor->isVirtual()) {
+ if (UseGlobalDelete) {
+ // If we're supposed to call the global delete, make sure we do so
+ // even if the destructor throws.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete,
+ ElementType);
+ }
+
const llvm::Type *Ty =
CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
Dtor_Complete),
/*isVariadic=*/false);
llvm::Value *Callee
- = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
+ = CGF.BuildVirtualCall(Dtor,
+ UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
+ Ptr, Ty);
CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
0, 0);
- // The dtor took care of deleting the object.
+ if (UseGlobalDelete) {
+ CGF.PopCleanupBlock();
+ }
+
return;
}
}
@@ -1247,7 +1248,29 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Ptr);
+ else if (CGF.getLangOptions().ObjCAutoRefCount &&
+ ElementType->isObjCLifetimeType()) {
+ switch (ElementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+ case Qualifiers::OCL_Strong: {
+ // Load the pointer value.
+ llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
+ ElementType.isVolatileQualified());
+
+ CGF.EmitARCRelease(PtrValue, /*precise*/ true);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCDestroyWeak(Ptr);
+ break;
+ }
+ }
+
CGF.PopCleanupBlock();
}
@@ -1268,7 +1291,7 @@ namespace {
: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
ElementType(ElementType), CookieSize(CookieSize) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
const FunctionProtoType *DeleteFTy =
OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
@@ -1316,31 +1339,40 @@ namespace {
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *E,
- llvm::Value *Ptr,
- QualType ElementType) {
- llvm::Value *NumElements = 0;
- llvm::Value *AllocatedPtr = 0;
- CharUnits CookieSize;
- CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
- NumElements, AllocatedPtr, CookieSize);
+ llvm::Value *deletedPtr,
+ QualType elementType) {
+ llvm::Value *numElements = 0;
+ llvm::Value *allocatedPtr = 0;
+ CharUnits cookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
+ numElements, allocatedPtr, cookieSize);
- assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
+ assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
// Make sure that we call delete even if one of the dtors throws.
- const FunctionDecl *OperatorDelete = E->getOperatorDelete();
+ const FunctionDecl *operatorDelete = E->getOperatorDelete();
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
- AllocatedPtr, OperatorDelete,
- NumElements, ElementType,
- CookieSize);
-
- if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
- if (!RD->hasTrivialDestructor()) {
- assert(NumElements && "ReadArrayCookie didn't find element count"
- " for a class with destructor");
- CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
- }
+ allocatedPtr, operatorDelete,
+ numElements, elementType,
+ cookieSize);
+
+ // Destroy the elements.
+ if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
+ assert(numElements && "no element count for a type with a destructor!");
+
+ llvm::Value *arrayEnd =
+ CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
+
+ // Note that it is legal to allocate a zero-length array, and we
+ // can never fold the check away because the length should always
+ // come from a cookie.
+ CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
+ CGF.getDestroyer(dtorKind),
+ /*checkZeroLength*/ true,
+ CGF.needsEHCleanup(dtorKind));
}
+ // Pop the cleanup block.
CGF.PopCleanupBlock();
}
@@ -1397,7 +1429,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
- EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
+ E->isGlobalDelete());
}
EmitBlock(DeleteEnd);
@@ -1415,7 +1448,7 @@ static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
static void EmitBadTypeidCall(CodeGenFunction &CGF) {
llvm::Value *Fn = getBadTypeidFn(CGF);
- CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
@@ -1489,11 +1522,11 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
// const abi::__class_type_info *dst,
// std::ptrdiff_t src2dst_offset);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *PtrDiffTy =
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+ llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(Int8PtrTy, Args, false);
@@ -1513,7 +1546,7 @@ static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
static void EmitBadCastCall(CodeGenFunction &CGF) {
llvm::Value *Fn = getBadCastFn(CGF);
- CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index bd1958602484..35cff1d72714 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -112,6 +112,10 @@ public:
return Visit(GE->getResultExpr());
}
ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+ ComplexPairTy
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
// l-values.
ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
@@ -352,6 +356,8 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
QualType DestTy) {
switch (CK) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
case CK_GetObjCProperty: {
LValue LV = CGF.EmitLValue(Op);
assert(LV.isPropertyRef() && "Unknown LValue type!");
@@ -360,39 +366,74 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_NoOp:
case CK_LValueToRValue:
+ case CK_UserDefinedConversion:
return Visit(Op);
- // TODO: do all of these
- default:
- break;
- }
-
- // Two cases here: cast from (complex to complex) and (scalar to complex).
- if (Op->getType()->isAnyComplexType())
- return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
-
- // FIXME: We should be looking at all of the cast kinds here, not
- // cherry-picking the ones we have test cases for.
- if (CK == CK_LValueBitCast) {
+ case CK_LValueBitCast: {
llvm::Value *V = CGF.EmitLValue(Op).getAddress();
V = Builder.CreateBitCast(V,
- CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
// FIXME: Are the qualifiers correct here?
return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
}
-
- // C99 6.3.1.7: When a value of real type is converted to a complex type, the
- // real part of the complex result value is determined by the rules of
- // conversion to the corresponding real type and the imaginary part of the
- // complex result value is a positive zero or an unsigned zero.
- llvm::Value *Elt = CGF.EmitScalarExpr(Op);
-
- // Convert the input element to the element type of the complex.
- DestTy = DestTy->getAs<ComplexType>()->getElementType();
- Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
-
- // Return (realval, 0).
- return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
}
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index da37bd5b0a1f..45e44dda0f58 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -433,9 +433,19 @@ llvm::Constant *ConstStructBuilder::
if (!Builder.Build(ILE))
return 0;
+ // Pick the type to use. If the type is layout identical to the ConvertType
+ // type then use it, otherwise use whatever the builder produced for us.
+ const llvm::StructType *STy =
+ llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
+ Builder.Elements,Builder.Packed);
+ const llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
+ if (const llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
+ if (ILESTy->isLayoutIdentical(STy))
+ STy = ILESTy;
+ }
+
llvm::Constant *Result =
- llvm::ConstantStruct::get(CGM.getLLVMContext(),
- Builder.Elements, Builder.Packed);
+ llvm::ConstantStruct::get(STy, Builder.Elements);
assert(Builder.NextFieldOffsetInChars.RoundUpToAlignment(
Builder.getAlignment(Result)) ==
@@ -471,6 +481,11 @@ public:
return Visit(PE->getSubExpr());
}
+ llvm::Constant *
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
}
@@ -523,7 +538,7 @@ public:
// Build a struct with the union sub-element as the first member,
// and padded to the appropriate size
std::vector<llvm::Constant*> Elts;
- std::vector<const llvm::Type*> Types;
+ std::vector<llvm::Type*> Types;
Elts.push_back(C);
Types.push_back(C->getType());
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
@@ -531,7 +546,7 @@ public:
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
- const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
if (NumPadBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumPadBytes);
@@ -570,6 +585,9 @@ public:
case CK_GetObjCProperty:
case CK_ToVoid:
case CK_Dynamic:
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject:
return 0;
// These might need to be supported for constexpr.
@@ -650,6 +668,10 @@ public:
return Visit(DAE->getExpr());
}
+ llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ return Visit(E->GetTemporaryExpr());
+ }
+
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
unsigned NumInitElements = ILE->getNumInits();
if (NumInitElements == 1 && ILE->getType() == ILE->getInit(0)->getType() &&
@@ -694,7 +716,7 @@ public:
if (RewriteType) {
// FIXME: Try to avoid packing the array
- std::vector<const llvm::Type*> Types;
+ std::vector<llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
@@ -986,7 +1008,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
Result.Val.getComplexIntImag());
// FIXME: the target may want to specify that this is packed.
- return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Float:
return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
@@ -999,7 +1024,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
Result.Val.getComplexFloatImag());
// FIXME: the target may want to specify that this is packed.
- return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Vector: {
llvm::SmallVector<llvm::Constant *, 4> Inits;
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index dff7bf45e0c3..a73e667e780e 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -82,15 +82,15 @@ public:
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
- Value *EmitLoadOfLValue(LValue LV, QualType T) {
- return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+ Value *EmitLoadOfLValue(LValue LV) {
+ return CGF.EmitLoadOfLValue(LV).getScalarVal();
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType());
+ return EmitLoadOfLValue(EmitCheckedLValue(E));
}
/// EmitConversionToBool - Convert the specified expression value to a
@@ -161,6 +161,9 @@ public:
Value *VisitParenExpr(ParenExpr *PE) {
return Visit(PE->getSubExpr());
}
+ Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
}
@@ -197,7 +200,7 @@ public:
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
- return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getType());
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
// Otherwise, assume the mapping is the scalar directly.
return CGF.getOpaqueRValueMapping(E).getScalarVal();
@@ -252,7 +255,7 @@ public:
Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
LValue LV = CGF.EmitObjCIsaExpr(E);
- Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+ Value *V = CGF.EmitLoadOfLValue(LV).getScalarVal();
return V;
}
@@ -269,14 +272,12 @@ public:
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return CGF.CGM.EmitNullConstant(E->getType());
}
- Value *VisitCastExpr(CastExpr *E) {
- // Make sure to evaluate VLA bounds now so that we have them for later.
+ Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
if (E->getType()->isVariablyModifiedType())
- CGF.EmitVLASize(E->getType());
-
- return EmitCastExpr(E);
+ CGF.EmitVariablyModifiedType(E->getType());
+ return VisitCastExpr(E);
}
- Value *EmitCastExpr(CastExpr *E);
+ Value *VisitCastExpr(CastExpr *E);
Value *VisitCallExpr(const CallExpr *E) {
if (E->getCallReturnType()->isReferenceType())
@@ -1001,7 +1002,7 @@ static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
// have to handle a more broad range of conversions than explicit casts, as they
// handle things like function to ptr-to-function decay etc.
-Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
+Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Expr *E = CE->getSubExpr();
QualType DestTy = CE->getType();
CastKind Kind = CE->getCastKind();
@@ -1020,7 +1021,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
Value *V = EmitLValue(E).getAddress();
V = Builder.CreateBitCast(V,
ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), DestTy);
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy));
}
case CK_AnyPointerToObjCPointerCast:
@@ -1106,7 +1107,17 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// function pointers on Itanium and ARM).
return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
-
+
+ case CK_ObjCProduceObject:
+ return CGF.EmitARCRetainScalarExpr(E);
+ case CK_ObjCConsumeObject:
+ return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
+ case CK_ObjCReclaimReturnedObject: {
+ llvm::Value *value = Visit(E);
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+ return CGF.EmitObjCConsumeObject(E->getType(), value);
+ }
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
@@ -1122,7 +1133,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
assert(E->isGLValue() && E->getObjectKind() == OK_ObjCProperty &&
"CK_GetObjCProperty for non-lvalue or non-ObjCProperty");
- RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType());
+ RValue RV = CGF.EmitLoadOfLValue(CGF.EmitLValue(E));
return RV.getScalarVal();
}
@@ -1143,15 +1154,10 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
}
- case CK_PointerToIntegral: {
- Value *Src = Visit(const_cast<Expr*>(E));
-
- // Handle conversion to bool correctly.
- if (DestTy->isBooleanType())
- return EmitScalarConversion(Src, E->getType(), DestTy);
+ case CK_PointerToIntegral:
+ assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
+ return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
- return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
- }
case CK_ToVoid: {
CGF.EmitIgnoredExpr(E);
return 0;
@@ -1221,7 +1227,7 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
LValue LV = CGF.EmitBlockDeclRefLValue(E);
- return CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+ return CGF.EmitLoadOfLValue(LV).getScalarVal();
}
//===----------------------------------------------------------------------===//
@@ -1258,7 +1264,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
QualType type = E->getSubExpr()->getType();
- llvm::Value *value = EmitLoadOfLValue(LV, type);
+ llvm::Value *value = EmitLoadOfLValue(LV);
llvm::Value *input = value;
int amount = (isInc ? 1 : -1);
@@ -1282,7 +1288,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// overflow because of promotion rules; we're just eliding a few steps here.
if (type->isSignedIntegerOrEnumerationType() &&
value->getType()->getPrimitiveSizeInBits() >=
- CGF.CGM.IntTy->getBitWidth())
+ CGF.IntTy->getBitWidth())
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
else
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
@@ -1292,16 +1298,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
QualType type = ptr->getPointeeType();
// VLA types don't have constant size.
- if (type->isVariableArrayType()) {
- llvm::Value *vlaSize =
- CGF.GetVLASize(CGF.getContext().getAsVariableArrayType(type));
- value = CGF.EmitCastToVoidPtr(value);
- if (!isInc) vlaSize = Builder.CreateNSWNeg(vlaSize, "vla.negsize");
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(type)) {
+ llvm::Value *numElts = CGF.getVLASize(vla).first;
+ if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- value = Builder.CreateGEP(value, vlaSize, "vla.inc");
+ value = Builder.CreateGEP(value, numElts, "vla.inc");
else
- value = Builder.CreateInBoundsGEP(value, vlaSize, "vla.inc");
- value = Builder.CreateBitCast(value, input->getType());
+ value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
@@ -1374,9 +1378,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Store the updated result through the lvalue.
if (LV.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, type, &value);
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
else
- CGF.EmitStoreThroughLValue(RValue::get(value), LV, type);
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV);
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
@@ -1521,14 +1525,25 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
// sizeof(type) - make sure to emit the VLA size.
- CGF.EmitVLASize(TypeToSize);
+ CGF.EmitVariablyModifiedType(TypeToSize);
} else {
// C99 6.5.3.4p2: If the argument is an expression of type
// VLA, it is evaluated.
CGF.EmitIgnoredExpr(E->getArgumentExpr());
}
- return CGF.GetVLASize(VAT);
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = CGF.getVLASize(VAT);
+
+ llvm::Value *size = numElts;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
+
+ return size;
}
}
@@ -1546,8 +1561,7 @@ Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (E->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
- .getScalarVal();
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, false, true).first;
@@ -1563,8 +1577,7 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (Op->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getType())
- .getScalarVal();
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, true, false).second;
@@ -1616,7 +1629,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS());
- OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
@@ -1631,10 +1644,9 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// 'An assignment expression has the value of the left operand after the
// assignment...'.
if (LHSLV.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
- &Result);
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
else
- CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
return LHSLV;
}
@@ -1662,15 +1674,17 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return RHS;
// Otherwise, reload the value.
- return EmitLoadOfLValue(LHS, E->getType());
+ return EmitLoadOfLValue(LHS);
}
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
const BinOpInfo &Ops,
llvm::Value *Zero, bool isDiv) {
- llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
llvm::BasicBlock *contBB =
- CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn);
+ CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
@@ -1700,9 +1714,11 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
if (Ops.Ty->isIntegerType())
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
else if (Ops.Ty->isRealFloatingType()) {
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
+ llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
+ llvm::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
CGF.CurFn);
- llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn);
CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
overflowBB, DivCont);
EmitOverflowBB(overflowBB);
@@ -1759,9 +1775,9 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
OpID <<= 1;
OpID |= 1;
- const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+ llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
- llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
@@ -1769,8 +1785,10 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::Function::iterator insertPt = initialBB;
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
+ llvm::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
- llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
@@ -1788,8 +1806,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.SetInsertPoint(overflowBB);
// Get the overflow handler.
- const llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
- const llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
+ llvm::Type *Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
llvm::FunctionType *handlerTy =
llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
@@ -1817,196 +1835,187 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
return phi;
}
-Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
- if (!Ops.Ty->isAnyPointerType()) {
- if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
- switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
- case LangOptions::SOB_Undefined:
- return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
- case LangOptions::SOB_Defined:
- return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
- case LangOptions::SOB_Trapping:
- return EmitOverflowCheckedBinOp(Ops);
- }
- }
-
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
-
- return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
- }
-
- // Must have binary (not unary) expr here. Unary pointer decrement doesn't
- // use this path.
- const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
-
- if (Ops.Ty->isPointerType() &&
- Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition needs to account for the VLA size
- CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
- }
+/// Emit pointer + index arithmetic.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinOpInfo &op,
+ bool isSubtraction) {
+ // Must have binary (not unary) expr here. Unary pointer
+ // increment/decrement doesn't use this path.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
- Value *Ptr, *Idx;
- Expr *IdxExp;
- const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
- const ObjCObjectPointerType *OPT =
- BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
- if (PT || OPT) {
- Ptr = Ops.LHS;
- Idx = Ops.RHS;
- IdxExp = BinOp->getRHS();
- } else { // int + pointer
- PT = BinOp->getRHS()->getType()->getAs<PointerType>();
- OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
- assert((PT || OPT) && "Invalid add expr");
- Ptr = Ops.RHS;
- Idx = Ops.LHS;
- IdxExp = BinOp->getLHS();
- }
-
- unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.PointerWidthInBits) {
- // Zero or sign extend the pointer value based on whether the index is
- // signed or not.
- const llvm::Type *IdxType = CGF.IntPtrTy;
- if (IdxExp->getType()->isSignedIntegerOrEnumerationType())
- Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
- else
- Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
- }
- const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
- // Handle interface types, which are not represented with a concrete type.
- if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
- Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ptr->getType());
+ Value *pointer = op.LHS;
+ Expr *pointerOperand = expr->getLHS();
+ Value *index = op.RHS;
+ Expr *indexOperand = expr->getRHS();
+
+ // In a subtraction, the LHS is always the pointer.
+ if (!isSubtraction && !pointer->getType()->isPointerTy()) {
+ std::swap(pointer, index);
+ std::swap(pointerOperand, indexOperand);
+ }
+
+ unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
+ if (width != CGF.PointerWidthInBits) {
+ // Zero-extend or sign-extend the pointer value according to
+ // whether the index is signed or not.
+ bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
+ index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ "idx.ext");
+ }
+
+ // If this is subtraction, negate the index.
+ if (isSubtraction)
+ index = CGF.Builder.CreateNeg(index, "idx.neg");
+
+ const PointerType *pointerType
+ = pointerOperand->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ QualType objectType = pointerOperand->getType()
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize
+ = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+
+ index = CGF.Builder.CreateMul(index, objectSize);
+
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ QualType elementType = pointerType->getPointeeType();
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = CGF.getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (CGF.getLangOptions().isSignedOverflowDefined()) {
+ index = CGF.Builder.CreateMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ } else {
+ index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ }
+ return pointer;
}
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
- if (ElementType->isVoidType() || ElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
- Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ptr->getType());
+ if (elementType->isVoidType() || elementType->isFunctionType()) {
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
}
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- return Builder.CreateGEP(Ptr, Idx, "add.ptr");
- return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
+ if (CGF.getLangOptions().isSignedOverflowDefined())
+ return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+
+ return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
}
-Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
- if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
+ if (op.LHS->getType()->isPointerTy() ||
+ op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
+
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
+ // The LHS is always a pointer if either side is.
+ if (!op.LHS->getType()->isPointerTy()) {
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
- return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Defined:
- return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Trapping:
- return EmitOverflowCheckedBinOp(Ops);
+ return EmitOverflowCheckedBinOp(op);
}
}
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
- return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
}
- // Must have binary (not unary) expr here. Unary pointer increment doesn't
- // use this path.
- const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
-
- if (BinOp->getLHS()->getType()->isPointerType() &&
- BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition needs to account for the VLA size for
- // ptr-int
- // The amount of the division needs to account for the VLA size for
- // ptr-ptr.
- CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
- }
-
- const QualType LHSType = BinOp->getLHS()->getType();
- const QualType LHSElementType = LHSType->getPointeeType();
- if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
- // pointer - int
- Value *Idx = Ops.RHS;
- unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (Width < CGF.PointerWidthInBits) {
- // Zero or sign extend the pointer value based on whether the index is
- // signed or not.
- const llvm::Type *IdxType = CGF.IntPtrTy;
- if (BinOp->getRHS()->getType()->isSignedIntegerOrEnumerationType())
- Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
- else
- Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
- }
- Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
-
- // Handle interface types, which are not represented with a concrete type.
- if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
- llvm::Value *InterfaceSize =
- llvm::ConstantInt::get(Idx->getType(),
- CGF.getContext().
- getTypeSizeInChars(OIT).getQuantity());
- Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
- Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
- return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ // If the RHS is not a pointer, then we have normal pointer
+ // arithmetic.
+ if (!op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
- // Explicitly handle GNU void* and function pointer arithmetic
- // extensions. The GNU void* casts amount to no-ops since our void* type is
- // i8*, but this is future proof.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
- Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
- return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ // Otherwise, this is a pointer subtraction.
- if (CGF.getContext().getLangOptions().isSignedOverflowDefined())
- return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
- return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
- }
-
- // pointer - pointer
- Value *LHS = Ops.LHS;
- Value *RHS = Ops.RHS;
+ // Do the raw subtraction part.
+ llvm::Value *LHS
+ = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
+ llvm::Value *RHS
+ = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
+ Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
- CharUnits ElementSize;
+ // Okay, figure out the element size.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+ QualType elementType = expr->getLHS()->getType()->getPointeeType();
- // Handle GCC extension for pointer arithmetic on void* and function pointer
- // types.
- if (LHSElementType->isVoidType() || LHSElementType->isFunctionType())
- ElementSize = CharUnits::One();
- else
- ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+ llvm::Value *divisor = 0;
- const llvm::Type *ResultType = ConvertType(Ops.Ty);
- LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
- RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+ // For a variable-length array, this is going to be non-constant.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ llvm::Value *numElements;
+ llvm::tie(numElements, elementType) = CGF.getVLASize(vla);
- // Optimize out the shift for element size of 1.
- if (ElementSize.isOne())
- return BytesBetween;
+ divisor = numElements;
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
+ if (!eltSize.isOne())
+ divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
+
+ // For everything elese, we can just compute it, safe in the
+ // assumption that Sema won't let anything through that we can't
+ // safely compute the size of.
+ } else {
+ CharUnits elementSize;
+ // Handle GCC extension for pointer arithmetic on void* and
+ // function pointer types.
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elementSize = CharUnits::One();
+ else
+ elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+
+ // Don't even emit the divide for element size of 1.
+ if (elementSize.isOne())
+ return diffInChars;
+
+ divisor = CGF.CGM.getSize(elementSize);
+ }
+
// Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
// pointer difference in C is only defined in the case where both operands
// are pointing to elements of an array.
- Value *BytesPerElt =
- llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
- return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
}
Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
@@ -2228,20 +2237,41 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
bool Ignore = TestAndClearIgnoreResultAssign();
- // __block variables need to have the rhs evaluated first, plus this should
- // improve codegen just a little.
- Value *RHS = Visit(E->getRHS());
- LValue LHS = EmitCheckedLValue(E->getLHS());
-
- // Store the value into the LHS. Bit-fields are handled specially
- // because the result is altered by the store, i.e., [C99 6.5.16p1]
- // 'An assignment expression has the value of the left operand after
- // the assignment...'.
- if (LHS.isBitField())
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
- &RHS);
- else
- CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ Value *RHS;
+ LValue LHS;
+
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ llvm::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm::tie(LHS,RHS) = CGF.EmitARCStoreAutoreleasing(E);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
+ break;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // __block variables need to have the rhs evaluated first, plus
+ // this should improve codegen just a little.
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
+ }
// If the result is clearly ignored, return now.
if (Ignore)
@@ -2260,7 +2290,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return RHS;
// Otherwise, reload the value.
- return EmitLoadOfLValue(LHS, E->getType());
+ return EmitLoadOfLValue(LHS);
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
@@ -2548,7 +2578,7 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
- const llvm::Type * DstTy = ConvertType(E->getDstType());
+ const llvm::Type *DstTy = ConvertType(E->getType());
// Going from vec4->vec3 or vec3->vec4 is a special case and requires
// a shuffle vector instead of a bitcast.
@@ -2656,7 +2686,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeAddrLValue(V, E->getType()), E->getType());
+ MakeAddrLValue(V, E->getType()));
} else {
if (E->isArrow())
V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index fa42cd1f36ab..426cca00140c 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -15,15 +15,29 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
using namespace clang;
using namespace CodeGen;
+typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+
+/// Given the address of a variable of pointer type, find the correct
+/// null to store into it.
+static llvm::Constant *getNullForVariable(llvm::Value *addr) {
+ const llvm::Type *type =
+ cast<llvm::PointerType>(addr->getType())->getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
+}
+
/// Emits an instance of NSConstantString representing the object.
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
@@ -55,6 +69,7 @@ static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
RValue Result) {
if (!Method)
return Result;
+
if (!Method->hasRelatedResultType() ||
CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
!Result.isScalar())
@@ -71,6 +86,18 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
+ bool isDelegateInit = E->isDelegateInitCall();
+
+ // We don't retain the receiver in delegate init calls, and this is
+ // safe because the receiver value is always loaded from 'self',
+ // which we zero out. We don't want to Block_copy block receivers,
+ // though.
+ bool retainSelf =
+ (!isDelegateInit &&
+ CGM.getLangOptions().ObjCAutoRefCount &&
+ E->getMethodDecl() &&
+ E->getMethodDecl()->hasAttr<NSConsumesSelfAttr>());
+
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
bool isSuperMessage = false;
bool isClassMessage = false;
@@ -80,8 +107,15 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
llvm::Value *Receiver = 0;
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
- Receiver = EmitScalarExpr(E->getInstanceReceiver());
ReceiverType = E->getInstanceReceiver()->getType();
+ if (retainSelf) {
+ TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
+ E->getInstanceReceiver());
+ Receiver = ter.getPointer();
+ if (!ter.getInt())
+ Receiver = EmitARCRetainNonBlock(Receiver);
+ } else
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
break;
case ObjCMessageExpr::Class: {
@@ -92,6 +126,9 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
assert(OID && "Invalid Objective-C class message send");
Receiver = Runtime.GetClass(Builder, OID);
isClassMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
@@ -99,6 +136,9 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
case ObjCMessageExpr::SuperClass:
@@ -106,14 +146,36 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Receiver = LoadObjCSelf();
isSuperMessage = true;
isClassMessage = true;
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
+ QualType ResultType =
+ E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
- QualType ResultType =
- E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+ // For delegate init calls in ARC, do an unsafe store of null into
+ // self. This represents the call taking direct ownership of that
+ // value. We have to do this after emitting the other call
+ // arguments because they might also reference self, but we don't
+ // have to worry about any of them modifying self because that would
+ // be an undefined read and write of an object in unordered
+ // expressions.
+ if (isDelegateInit) {
+ assert(getLangOptions().ObjCAutoRefCount &&
+ "delegate init calls should only be marked in ARC");
+
+ // Do an unsafe store of null into self.
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ assert(selfAddr && "no self entry for a delegate init call?");
+
+ Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
+ }
RValue result;
if (isSuperMessage) {
@@ -134,10 +196,54 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Receiver, Args, OID,
E->getMethodDecl());
}
-
+
+ // For delegate init calls in ARC, implicitly store the result of
+ // the call back into self. This takes ownership of the value.
+ if (isDelegateInit) {
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ llvm::Value *newSelf = result.getScalarVal();
+
+ // The delegate return type isn't necessarily a matching type; in
+ // fact, it's quite likely to be 'id'.
+ const llvm::Type *selfTy =
+ cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ newSelf = Builder.CreateBitCast(newSelf, selfTy);
+
+ Builder.CreateStore(newSelf, selfAddr);
+ }
+
return AdjustRelatedResultType(*this, E, E->getMethodDecl(), result);
}
+namespace {
+struct FinishARCDealloc : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+
+ const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ if (!iface->getSuperClass()) return;
+
+ bool isCategory = isa<ObjCCategoryImplDecl>(impl);
+
+ // Call [super dealloc] if we have a superclass.
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ CallArgList args;
+ CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
+ CGF.getContext().VoidTy,
+ method->getSelector(),
+ iface,
+ isCategory,
+ self,
+ /*is class msg*/ false,
+ args,
+ method);
+ }
+};
+}
+
/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
@@ -164,8 +270,21 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
CurGD = OMD;
StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
+
+ // In ARC, certain methods get an extra cleanup.
+ if (CGM.getLangOptions().ObjCAutoRefCount &&
+ OMD->isInstanceMethod() &&
+ OMD->getSelector().isUnarySelector()) {
+ const IdentifierInfo *ident =
+ OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (ident->isStr("dealloc"))
+ EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
+ }
}
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue, QualType type);
+
void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
bool IsAtomic, bool IsStrong) {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
@@ -269,6 +388,9 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
+
+ // objc_getProperty does an autorelease, so we should suppress ours.
+ AutoreleaseResult = false;
} else {
const llvm::Triple &Triple = getContext().Target.getTriple();
QualType IVART = Ivar->getType();
@@ -346,18 +468,24 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
}
else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- if (PD->getType()->isReferenceType()) {
- RValue RV = RValue::get(LV.getAddress());
- EmitReturnOfRValue(RV, PD->getType());
- }
- else {
- CodeGenTypes &Types = CGM.getTypes();
- RValue RV = EmitLoadOfLValue(LV, IVART);
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
+ Ivar, 0);
+ QualType propType = PD->getType();
+
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress();
+ } else {
+ // In ARC, we want to emit this retained.
+ if (getLangOptions().ObjCAutoRefCount &&
+ PD->getType()->isObjCRetainableType())
+ value = emitARCRetainLoadOfScalar(*this, LV, IVART);
+ else
+ value = EmitLoadOfLValue(LV).getScalarVal();
+
+ value = Builder.CreateBitCast(value, ConvertType(propType));
}
+
+ EmitReturnOfRValue(RValue::get(value), propType);
}
}
@@ -551,50 +679,35 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
FinishFunction();
}
-// FIXME: these are stolen from CGClass.cpp, which is lame.
namespace {
- struct CallArrayIvarDtor : EHScopeStack::Cleanup {
+ struct DestroyIvar : EHScopeStack::Cleanup {
+ private:
+ llvm::Value *addr;
const ObjCIvarDecl *ivar;
- llvm::Value *self;
- CallArrayIvarDtor(const ObjCIvarDecl *ivar, llvm::Value *self)
- : ivar(ivar), self(self) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- LValue lvalue =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), self, ivar, 0);
-
- QualType type = ivar->getType();
- const ConstantArrayType *arrayType
- = CGF.getContext().getAsConstantArrayType(type);
- QualType baseType = CGF.getContext().getBaseElementType(arrayType);
- const CXXRecordDecl *classDecl = baseType->getAsCXXRecordDecl();
-
- llvm::Value *base
- = CGF.Builder.CreateBitCast(lvalue.getAddress(),
- CGF.ConvertType(baseType)->getPointerTo());
- CGF.EmitCXXAggrDestructorCall(classDecl->getDestructor(),
- arrayType, base);
+ CodeGenFunction::Destroyer &destroyer;
+ bool useEHCleanupForArray;
+ public:
+ DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), ivar(ivar), destroyer(*destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
+}
- struct CallIvarDtor : EHScopeStack::Cleanup {
- const ObjCIvarDecl *ivar;
- llvm::Value *self;
- CallIvarDtor(const ObjCIvarDecl *ivar, llvm::Value *self)
- : ivar(ivar), self(self) {}
-
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
- LValue lvalue =
- CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), self, ivar, 0);
-
- QualType type = ivar->getType();
- const CXXRecordDecl *classDecl = type->getAsCXXRecordDecl();
-
- CGF.EmitCXXDestructorCall(classDecl->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- lvalue.getAddress());
- }
- };
+/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
+static void destroyARCStrongWithStore(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *null = getNullForVariable(addr);
+ CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
}
static void emitCXXDestructMethod(CodeGenFunction &CGF,
@@ -609,29 +722,26 @@ static void emitCXXDestructMethod(CodeGenFunction &CGF,
ivar; ivar = ivar->getNextIvar()) {
QualType type = ivar->getType();
- // Drill down to the base element type.
- QualType baseType = type;
- const ConstantArrayType *arrayType =
- CGF.getContext().getAsConstantArrayType(baseType);
- if (arrayType) baseType = CGF.getContext().getBaseElementType(arrayType);
-
// Check whether the ivar is a destructible type.
- QualType::DestructionKind destructKind = baseType.isDestructedType();
- assert(destructKind == type.isDestructedType());
-
- switch (destructKind) {
- case QualType::DK_none:
- continue;
-
- case QualType::DK_cxx_destructor:
- if (arrayType)
- CGF.EHStack.pushCleanup<CallArrayIvarDtor>(NormalAndEHCleanup,
- ivar, self);
- else
- CGF.EHStack.pushCleanup<CallIvarDtor>(NormalAndEHCleanup,
- ivar, self);
- break;
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ // Use a call to objc_storeStrong to destroy strong ivars, for the
+ // general benefit of the tools.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = &destroyARCStrongWithStore;
+
+ // Otherwise use the default for the destruction kind.
+ } else {
+ destroyer = &CGF.getDestroyer(dtorKind);
}
+
+ CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
+
+ CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
+ cleanupKind & EHCleanup);
}
assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
@@ -645,6 +755,9 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
// Emit .cxx_construct.
if (ctor) {
+ // Suppress the final autorelease in ARC.
+ AutoreleaseResult = false;
+
llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
E = IMP->init_end(); B != E; ++B) {
@@ -747,6 +860,16 @@ RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
llvm::Value *Receiver = LV.getPropertyRefBaseAddr();
+ if (CGM.getLangOptions().ObjCAutoRefCount) {
+ QualType receiverType;
+ if (E->isSuperReceiver())
+ receiverType = E->getSuperReceiverType();
+ else if (E->isClassReceiver())
+ receiverType = getContext().getObjCClassType();
+ else
+ receiverType = E->getBase()->getType();
+ }
+
// Accesses to 'super' follow a different code path.
if (E->isSuperReceiver())
return AdjustRelatedResultType(*this, E, method,
@@ -757,9 +880,9 @@ RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
const ObjCInterfaceDecl *ReceiverClass
= (E->isClassReceiver() ? E->getClassReceiver() : 0);
return AdjustRelatedResultType(*this, E, method,
- CGM.getObjCRuntime().
- GenerateMessageSend(*this, Return, ResultType, S,
- Receiver, CallArgList(), ReceiverClass));
+ CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Return, ResultType, S,
+ Receiver, CallArgList(), ReceiverClass));
}
void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
@@ -808,17 +931,17 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
return;
}
- // The local variable comes into scope immediately.
- AutoVarEmission variable = AutoVarEmission::invalid();
- if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
- variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
-
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(S.getSourceRange().getBegin());
DI->EmitRegionStart(Builder);
}
+ // The local variable comes into scope immediately.
+ AutoVarEmission variable = AutoVarEmission::invalid();
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
+ variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
+
JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
@@ -958,6 +1081,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
elementLValue = EmitLValue(&tempDRE);
elementType = D->getType();
elementIsVariable = true;
+
+ if (D->isARCPseudoStrong())
+ elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
} else {
elementLValue = LValue(); // suppress warning
elementType = cast<Expr>(S.getElement())->getType();
@@ -984,10 +1110,12 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Make sure we have an l-value. Yes, this gets evaluated every
// time through the loop.
- if (!elementIsVariable)
+ if (!elementIsVariable) {
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
-
- EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, elementType);
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
+ } else {
+ EmitScalarInit(CurrentItem, elementLValue);
+ }
// If we do have an element variable, this assignment is the end of
// its initialization.
@@ -1048,7 +1176,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
- EmitStoreThroughLValue(RValue::get(null), elementLValue, elementType);
+ EmitStoreThroughLValue(RValue::get(null), elementLValue);
}
if (DI) {
@@ -1072,4 +1200,957 @@ void CodeGenFunction::EmitObjCAtSynchronizedStmt(
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
+/// Produce the code for a CK_ObjCProduceObject. Just does a
+/// primitive retain.
+llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetain(type, value);
+}
+
+namespace {
+ struct CallObjCRelease : EHScopeStack::Cleanup {
+ CallObjCRelease(QualType type, llvm::Value *ptr, llvm::Value *condition)
+ : type(type), ptr(ptr), condition(condition) {}
+ QualType type;
+ llvm::Value *ptr;
+ llvm::Value *condition;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *object;
+
+ // If we're in a conditional branch, we had to stash away in an
+ // alloca the pointer to be released.
+ llvm::BasicBlock *cont = 0;
+ if (condition) {
+ llvm::BasicBlock *release = CGF.createBasicBlock("release.yes");
+ cont = CGF.createBasicBlock("release.cont");
+
+ llvm::Value *cond = CGF.Builder.CreateLoad(condition);
+ CGF.Builder.CreateCondBr(cond, release, cont);
+ CGF.EmitBlock(release);
+ object = CGF.Builder.CreateLoad(ptr);
+ } else {
+ object = ptr;
+ }
+
+ CGF.EmitARCRelease(object, /*precise*/ true);
+
+ if (cont) CGF.EmitBlock(cont);
+ }
+ };
+}
+
+/// Produce the code for a CK_ObjCConsumeObject. Does a primitive
+/// release at the end of the full-expression.
+llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
+ llvm::Value *object) {
+ // If we're in a conditional branch, we need to make the cleanup
+ // conditional. FIXME: this really needs to be supported by the
+ // environment.
+ llvm::AllocaInst *cond;
+ llvm::Value *ptr;
+ if (isInConditionalBranch()) {
+ cond = CreateTempAlloca(Builder.getInt1Ty(), "release.cond");
+ ptr = CreateTempAlloca(object->getType(), "release.value");
+
+ // The alloca is false until we get here.
+ // FIXME: er. doesn't this need to be set at the start of the condition?
+ InitTempAlloca(cond, Builder.getFalse());
+
+ // Then it turns true.
+ Builder.CreateStore(Builder.getTrue(), cond);
+ Builder.CreateStore(object, ptr);
+ } else {
+ cond = 0;
+ ptr = object;
+ }
+
+ EHStack.pushCleanup<CallObjCRelease>(getARCCleanupKind(), type, ptr, cond);
+ return object;
+}
+
+llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetainAutorelease(type, value);
+}
+
+
+static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
+ const llvm::FunctionType *type,
+ llvm::StringRef fnName) {
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+
+ // In -fobjc-no-arc-runtime, emit weak references to the runtime
+ // support library.
+ if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ f->setLinkage(llvm::Function::ExternalWeakLinkage);
+
+ return fn;
+}
+
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id'.
+ const llvm::Type *origType = value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**)
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id*'.
+ const llvm::Type *origType = addr->getType();
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+
+ // Cast the result back to a dereference of the original type.
+ llvm::Value *result = call;
+ if (origType != CGF.Int8PtrPtrTy)
+ result = CGF.Builder.CreateBitCast(result,
+ cast<llvm::PointerType>(origType)->getElementType());
+
+ return result;
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**, i8*)
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ if (!fn) {
+ std::vector<llvm::Type*> argTypes(2);
+ argTypes[0] = CGF.Int8PtrPtrTy;
+ argTypes[1] = CGF.Int8PtrTy;
+
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ const llvm::Type *origType = value->getType();
+
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
+ result->setDoesNotThrow();
+
+ if (ignored) return 0;
+
+ return CGF.Builder.CreateBitCast(result, origType);
+}
+
+/// Perform an operation having the following signature:
+/// void (i8**, i8**)
+static void emitARCCopyOperation(CodeGenFunction &CGF,
+ llvm::Value *dst,
+ llvm::Value *src,
+ llvm::Constant *&fn,
+ llvm::StringRef fnName) {
+ assert(dst->getType() == src->getType());
+
+ if (!fn) {
+ std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
+ src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
+ result->setDoesNotThrow();
+}
+
+/// Produce the code to do a retain. Based on the type, calls one of:
+/// call i8* @objc_retain(i8* %value)
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
+ if (type->isBlockPointerType())
+ return EmitARCRetainBlock(value);
+ else
+ return EmitARCRetainNonBlock(value);
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* @objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retain,
+ "objc_retain");
+}
+
+/// Retain the given block, with _Block_copy semantics.
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+}
+
+/// Retain the given object which is the result of a function call.
+/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+///
+/// Yes, this function name is one character away from a different
+/// call with completely different semantics.
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
+ // Fetch the void(void) inline asm which marks that we're going to
+ // retain the autoreleased return value.
+ llvm::InlineAsm *&marker
+ = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
+ if (!marker) {
+ llvm::StringRef assembly
+ = CGM.getTargetCodeGenInfo()
+ .getARCRetainAutoreleasedReturnValueMarker();
+
+ // If we have an empty assembly string, there's nothing to do.
+ if (assembly.empty()) {
+
+ // Otherwise, at -O0, build an inline asm that we're going to call
+ // in a moment.
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::FunctionType *type =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
+ /*variadic*/ false);
+
+ marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
+
+ // If we're at -O1 and above, we don't want to litter the code
+ // with this marker yet, so leave a breadcrumb for the ARC
+ // optimizer to pick up.
+ } else {
+ llvm::NamedMDNode *metadata =
+ CGM.getModule().getOrInsertNamedMetadata(
+ "clang.arc.retainAutoreleasedReturnValueMarker");
+ assert(metadata->getNumOperands() <= 1);
+ if (metadata->getNumOperands() == 0) {
+ llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
+ llvm::Value *args[] = { string };
+ metadata->addOperand(llvm::MDNode::get(getLLVMContext(), args));
+ }
+ }
+ }
+
+ // Call the marker asm if we made one, which we do only at -O0.
+ if (marker) Builder.CreateCall(marker);
+
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
+ "objc_retainAutoreleasedReturnValue");
+}
+
+/// Release the given object.
+/// call void @objc_release(i8* %value)
+void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ if (!precise) {
+ llvm::SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+}
+
+/// Store into a strong object. Always calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
+ if (!fn) {
+ llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
+ const llvm::FunctionType *fnType
+ = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ }
+
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
+
+ Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
+
+ if (ignored) return 0;
+ return value;
+}
+
+/// Store into a strong object. Sometimes calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// Other times, breaks it down into components.
+llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
+ llvm::Value *newValue,
+ bool ignored) {
+ QualType type = dst.getType();
+ bool isBlock = type->isBlockPointerType();
+
+ // Use a store barrier at -O0 unless this is a block type or the
+ // lvalue is inadequately aligned.
+ if (shouldUseFusedARCCalls() &&
+ !isBlock &&
+ !(dst.getAlignment() && dst.getAlignment() < PointerAlignInBytes)) {
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
+ }
+
+ // Otherwise, split it out.
+
+ // Retain the new value.
+ newValue = EmitARCRetain(type, newValue);
+
+ // Read the old value.
+ llvm::Value *oldValue = EmitLoadOfScalar(dst);
+
+ // Store. We do this before the release so that any deallocs won't
+ // see the old value.
+ EmitStoreOfScalar(newValue, dst);
+
+ // Finally, release the old value.
+ EmitARCRelease(oldValue, /*precise*/ false);
+
+ return newValue;
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autorelease,
+ "objc_autorelease");
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
+ "objc_autoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
+ "objc_retainAutoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+/// or
+/// %retain = call i8* @objc_retainBlock(i8* %value)
+/// call i8* @objc_autorelease(i8* %retain)
+llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
+ llvm::Value *value) {
+ if (!type->isBlockPointerType())
+ return EmitARCRetainAutoreleaseNonBlock(value);
+
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ const llvm::Type *origType = value->getType();
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+ value = EmitARCRetainBlock(value);
+ value = EmitARCAutorelease(value);
+ return Builder.CreateBitCast(value, origType);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutorelease,
+ "objc_retainAutorelease");
+}
+
+/// i8* @objc_loadWeak(i8** %addr)
+/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeak,
+ "objc_loadWeak");
+}
+
+/// i8* @objc_loadWeakRetained(i8** %addr)
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeakRetained,
+ "objc_loadWeakRetained");
+}
+
+/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// Returns %value.
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ return emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_storeWeak,
+ "objc_storeWeak", ignored);
+}
+
+/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// Returns %value. %addr is known to not have a current weak entry.
+/// Essentially equivalent to:
+/// *addr = nil; objc_storeWeak(addr, value);
+void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+ // If we're initializing to null, just write null to memory; no need
+ // to get the runtime involved. But don't do this if optimization
+ // is enabled, because accounting for this would make the optimizer
+ // much more complicated.
+ if (isa<llvm::ConstantPointerNull>(value) &&
+ CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ Builder.CreateStore(value, addr);
+ return;
+ }
+
+ emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_initWeak,
+ "objc_initWeak", /*ignored*/ true);
+}
+
+/// void @objc_destroyWeak(i8** %addr)
+/// Essentially objc_storeWeak(addr, nil).
+void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ }
+
+ // Cast the argument to 'id*'.
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+
+ llvm::CallInst *call = Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+}
+
+/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Leaves %src pointing to nothing.
+/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
+void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_moveWeak,
+ "objc_moveWeak");
+}
+
+/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Essentially
+/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
+void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_copyWeak,
+ "objc_copyWeak");
+}
+
+/// Produce the code to do a objc_autoreleasepool_push.
+/// call i8* @objc_autoreleasePoolPush(void)
+llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
+ if (!fn) {
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn);
+ call->setDoesNotThrow();
+
+ return call;
+}
+
+/// Produce the code to do a primitive release.
+/// call void @objc_autoreleasePoolPop(i8* %ptr)
+void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
+ assert(value->getType() == Int8PtrTy);
+
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ const llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+
+ // We don't want to use a weak import here; instead we should not
+ // fall into this path.
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+}
+
+/// Produce the code to do an MRR version objc_autoreleasepool_push.
+/// Which is: [[NSAutoreleasePool alloc] init];
+/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
+/// init is declared as: - (id) init; in its NSObject super class.
+///
+llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
+ // [NSAutoreleasePool alloc]
+ IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ Selector AllocSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ RValue AllocRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ AllocSel, Receiver, Args);
+
+ // [Receiver init]
+ Receiver = AllocRV.getScalarVal();
+ II = &CGM.getContext().Idents.get("init");
+ Selector InitSel = getContext().Selectors.getSelector(0, &II);
+ RValue InitRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ InitSel, Receiver, Args);
+ return InitRV.getScalarVal();
+}
+
+/// Produce the code to do a primitive release.
+/// [tmp drain];
+void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ Selector DrainSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, DrainSel, Arg, Args);
+}
+
+void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ true);
+}
+
+void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ false);
+}
+
+void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ CGF.EmitARCDestroyWeak(addr);
+}
+
+namespace {
+ struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCAutoreleasePoolPop(Token);
+ }
+ };
+ struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCMRRAutoreleasePoolPop(Token);
+ }
+ };
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
+ else
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
+ false);
+
+ case Qualifiers::OCL_Weak:
+ return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
+ true);
+ }
+
+ llvm_unreachable("impossible lifetime!");
+ return TryEmitResult();
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ const Expr *e) {
+ e = e->IgnoreParens();
+ QualType type = e->getType();
+
+ // As a very special optimization, in ARC++, if the l-value is the
+ // result of a non-volatile assignment, do a simple retain of the
+ // result of the call to objc_storeWeak instead of reloading.
+ if (CGF.getLangOptions().CPlusPlus &&
+ !type.isVolatileQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ isa<BinaryOperator>(e) &&
+ cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
+ return TryEmitResult(CGF.EmitScalarExpr(e), false);
+
+ return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value);
+
+/// Given that the given expression is some sort of call (which does
+/// not return retained), emit a retain following it.
+static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
+ llvm::Value *value = CGF.EmitScalarExpr(e);
+ return emitARCRetainAfterCall(CGF, value);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value) {
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain immediately following the call.
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+ } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain at the beginning of the normal destination block.
+ llvm::BasicBlock *BB = invoke->getNormalDest();
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+
+ // Bitcasts can arise because of related-result returns. Rewrite
+ // the operand.
+ } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ llvm::Value *operand = bitcast->getOperand(0);
+ operand = emitARCRetainAfterCall(CGF, operand);
+ bitcast->setOperand(0, operand);
+ return bitcast;
+
+ // Generic fall-back case.
+ } else {
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ return CGF.EmitARCRetainNonBlock(value);
+ }
+}
+
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ // The desired result type, if it differs from the type of the
+ // ultimate opaque expression.
+ const llvm::Type *resultType = 0;
+
+ // If we're loading retained from a __strong xvalue, we can avoid
+ // an extra retain/release pair by zeroing out the source of this
+ // "move" operation.
+ if (e->isXValue() && !e->getType().isConstQualified() &&
+ e->getType().getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Emit the lvalue
+ LValue lv = CGF.EmitLValue(e);
+
+ // Load the object pointer and cast it to the appropriate type.
+ QualType exprType = e->getType();
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
+
+ if (resultType)
+ result = CGF.Builder.CreateBitCast(result, resultType);
+
+ // Set the source pointer to NULL.
+ llvm::Value *null
+ = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(CGF.ConvertType(exprType)));
+ CGF.EmitStoreOfScalar(null, lv);
+
+ return TryEmitResult(result, true);
+ }
+
+ while (true) {
+ e = e->IgnoreParens();
+
+ // There's a break at the end of this if-chain; anything
+ // that wants to keep looping has to explicitly continue.
+ if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ // No-op casts don't change the type, so we just ignore them.
+ case CK_NoOp:
+ e = ce->getSubExpr();
+ continue;
+
+ case CK_LValueToRValue: {
+ TryEmitResult loadResult
+ = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
+ if (resultType) {
+ llvm::Value *value = loadResult.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ loadResult.setPointer(value);
+ }
+ return loadResult;
+ }
+
+ // These casts can change the type, so remember that and
+ // soldier on. We only need to remember the outermost such
+ // cast, though.
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast:
+ if (!resultType)
+ resultType = CGF.ConvertType(ce->getType());
+ e = ce->getSubExpr();
+ assert(e->getType()->hasPointerRepresentation());
+ continue;
+
+ // For consumptions, just emit the subexpression and thus elide
+ // the retain/release pair.
+ case CK_ObjCConsumeObject: {
+ llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // For reclaims, emit the subexpression as a retained call and
+ // skip the consumption.
+ case CK_ObjCReclaimReturnedObject: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ case CK_GetObjCProperty: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ default:
+ break;
+ }
+
+ // Skip __extension__.
+ } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_Extension) {
+ e = op->getSubExpr();
+ continue;
+ }
+
+ // For calls and message sends, use the retained-call logic.
+ // Delegate inits are a special case in that they're the only
+ // returns-retained expression that *isn't* surrounded by
+ // a consume.
+ } else if (isa<CallExpr>(e) ||
+ (isa<ObjCMessageExpr>(e) &&
+ !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
+ llvm::Value *result = emitARCRetainCall(CGF, e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Conservatively halt the search at any other expression kind.
+ break;
+ }
+
+ // We didn't find an obvious production, so emit what we've got and
+ // tell the caller that we didn't manage to retain.
+ llvm::Value *result = CGF.EmitScalarExpr(e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, false);
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = CGF.EmitARCRetain(type, value);
+ return value;
+}
+
+/// EmitARCRetainScalarExpr - Semantically equivalent to
+/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
+/// best-effort attempt to peephole expressions that naturally produce
+/// retained objects.
+llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = EmitARCRetain(e->getType(), value);
+ return value;
+}
+
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (result.getInt())
+ value = EmitARCAutorelease(value);
+ else
+ value = EmitARCRetainAutorelease(e->getType(), value);
+ return value;
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
+ bool ignored) {
+ // Evaluate the RHS first.
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
+ llvm::Value *value = result.getPointer();
+
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ // If the RHS was emitted retained, expand this.
+ if (result.getInt()) {
+ llvm::Value *oldValue =
+ EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatileQualified(),
+ lvalue.getAlignment(), e->getType(),
+ lvalue.getTBAAInfo());
+ EmitStoreOfScalar(value, lvalue.getAddress(),
+ lvalue.isVolatileQualified(), lvalue.getAlignment(),
+ e->getType(), lvalue.getTBAAInfo());
+ EmitARCRelease(oldValue, /*precise*/ false);
+ } else {
+ value = EmitARCStoreStrong(lvalue, value, ignored);
+ }
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
+ llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ EmitStoreOfScalar(value, lvalue.getAddress(),
+ lvalue.isVolatileQualified(), lvalue.getAlignment(),
+ e->getType(), lvalue.getTBAAInfo());
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
+ const ObjCAutoreleasePoolStmt &ARPS) {
+ const Stmt *subStmt = ARPS.getSubStmt();
+ const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getLBracLoc());
+ DI->EmitRegionStart(Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+ if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
+ } else {
+ llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
+ }
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end(); I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ DI->setLocation(S.getRBracLoc());
+ DI->EmitRegionEnd(Builder);
+ }
+}
+
+/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+/// make sure it survives garbage collection until this point.
+void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
+ // We just use an inline assembly.
+ llvm::Type *paramTypes[] = { VoidPtrTy };
+ llvm::FunctionType *extenderType
+ = llvm::FunctionType::get(VoidTy, paramTypes, /*variadic*/ false);
+ llvm::Value *extender
+ = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
+
+ object = Builder.CreateBitCast(object, VoidPtrTy);
+ Builder.CreateCall(extender, object)->setDoesNotThrow();
+}
+
CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index f0993c5dadbb..61027feb5cb9 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -50,7 +50,7 @@ namespace {
/// avoids constructing the type more than once if it's used more than once.
class LazyRuntimeFunction {
CodeGenModule *CGM;
- std::vector<const llvm::Type*> ArgTys;
+ std::vector<llvm::Type*> ArgTys;
const char *FunctionName;
llvm::Constant *Function;
public:
@@ -63,14 +63,14 @@ class LazyRuntimeFunction {
/// of the arguments.
END_WITH_NULL
void init(CodeGenModule *Mod, const char *name,
- const llvm::Type *RetTy, ...) {
+ llvm::Type *RetTy, ...) {
CGM =Mod;
FunctionName = name;
Function = 0;
ArgTys.clear();
va_list Args;
va_start(Args, RetTy);
- while (const llvm::Type *ArgTy = va_arg(Args, const llvm::Type*))
+ while (llvm::Type *ArgTy = va_arg(Args, llvm::Type*))
ArgTys.push_back(ArgTy);
va_end(Args);
// Push the return type on at the end so we can pop it off easily
@@ -118,24 +118,24 @@ protected:
/// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
/// SEL is included in a header somewhere, in which case it will be whatever
/// type is declared in that header, most likely {i8*, i8*}.
- const llvm::PointerType *SelectorTy;
+ llvm::PointerType *SelectorTy;
/// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
/// places where it's used
const llvm::IntegerType *Int8Ty;
/// Pointer to i8 - LLVM type of char*, for all of the places where the
/// runtime needs to deal with C strings.
- const llvm::PointerType *PtrToInt8Ty;
+ llvm::PointerType *PtrToInt8Ty;
/// Instance Method Pointer type. This is a pointer to a function that takes,
/// at a minimum, an object and a selector, and is the generic type for
/// Objective-C methods. Due to differences between variadic / non-variadic
/// calling conventions, it must always be cast to the correct type before
/// actually being used.
- const llvm::PointerType *IMPTy;
+ llvm::PointerType *IMPTy;
/// Type of an untyped Objective-C object. Clang treats id as a built-in type
/// when compiling Objective-C code, so this may be an opaque pointer (i8*),
/// but if the runtime header declaring it is included then it may be a
/// pointer to a structure.
- const llvm::PointerType *IdTy;
+ llvm::PointerType *IdTy;
/// Pointer to a pointer to an Objective-C object. Used in the new ABI
/// message lookup function and some GC-related functions.
const llvm::PointerType *PtrToIdTy;
@@ -143,15 +143,15 @@ protected:
/// call Objective-C methods.
CanQualType ASTIdTy;
/// LLVM type for C int type.
- const llvm::IntegerType *IntTy;
+ llvm::IntegerType *IntTy;
/// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is
/// used in the code to document the difference between i8* meaning a pointer
/// to a C string and i8* meaning a pointer to some opaque type.
- const llvm::PointerType *PtrTy;
+ llvm::PointerType *PtrTy;
/// LLVM type for C long type. The runtime uses this in a lot of places where
/// it should be using intptr_t, but we can't fix this without breaking
/// compatibility with GCC...
- const llvm::IntegerType *LongTy;
+ llvm::IntegerType *LongTy;
/// LLVM type for C size_t. Used in various runtime data structures.
const llvm::IntegerType *SizeTy;
/// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
@@ -392,6 +392,9 @@ private:
/// Emits a reference to a class. This allows the linker to object if there
/// is no class of the matching name.
void EmitClassRef(const std::string &className);
+ /// Emits a pointer to the named class
+ llvm::Value *GetClassNamed(CGBuilderTy &Builder, const std::string &Name,
+ bool isWeak);
protected:
/// Looks up the method for sending a message to the specified object. This
/// mechanism differs between the GCC and GNU runtimes, so this method must be
@@ -484,6 +487,7 @@ public:
virtual llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
virtual llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
return NULLPtr;
@@ -527,7 +531,7 @@ protected:
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
PtrToObjCSuperTy), cmd};
- return Builder.CreateCall(MsgLookupSuperFn, lookupArgs, lookupArgs+2);
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs);
}
public:
CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) {
@@ -596,15 +600,14 @@ class CGObjCGNUstep : public CGObjCGNU {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
- llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs,
- lookupArgs+2);
+ llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs);
slot->setOnlyReadsMemory();
return Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
}
public:
CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
- llvm::StructType *SlotStructTy = llvm::StructType::get(VMContext, PtrTy,
+ llvm::StructType *SlotStructTy = llvm::StructType::get(PtrTy,
PtrTy, PtrTy, IntTy, IMPTy, NULL);
SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
// Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
@@ -615,7 +618,7 @@ class CGObjCGNUstep : public CGObjCGNU {
PtrToObjCSuperTy, SelectorTy, NULL);
// If we're in ObjC++ mode, then we want to make
if (CGM.getLangOptions().CPlusPlus) {
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
// void __cxa_end_catch(void)
@@ -705,10 +708,10 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
}
PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
- ObjCSuperTy = llvm::StructType::get(VMContext, IdTy, IdTy, NULL);
+ ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, NULL);
PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void objc_exception_throw(id);
ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
@@ -736,16 +739,19 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
PtrDiffTy, BoolTy, BoolTy, NULL);
// IMP type
- const llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
+ llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
true));
+ const LangOptions &Opts = CGM.getLangOptions();
+ if ((Opts.getGCMode() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
+ RuntimeVersion = 10;
+
// Don't bother initialising the GC stuff unless we're compiling in GC mode
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (Opts.getGCMode() != LangOptions::NonGC) {
// This is a bit of an hack. We should sort this out by having a proper
// CGObjCGNUstep subclass for GC, but we may want to really support the old
// ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
- RuntimeVersion = 10;
// Get selectors needed in GC mode
RetainSel = GetNullarySelector("retain", CGM.getContext());
ReleaseSel = GetNullarySelector("release", CGM.getContext());
@@ -772,24 +778,38 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
}
}
-// This has to perform the lookup every time, since posing and related
-// techniques can modify the name -> class mapping.
-llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *OID) {
- llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+llvm::Value *CGObjCGNU::GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name,
+ bool isWeak) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(Name);
// With the incompatible ABI, this will need to be replaced with a direct
// reference to the class symbol. For the compatible nonfragile ABI we are
// still performing this lookup at run time but emitting the symbol for the
// class externally so that we can make the switch later.
- EmitClassRef(OID->getNameAsString());
+ //
+ // Libobjc2 contains an LLVM pass that replaces calls to objc_lookup_class
+ // with memoized versions or with static references if it's safe to do so.
+ if (!isWeak)
+ EmitClassRef(Name);
ClassName = Builder.CreateStructGEP(ClassName, 0);
+ llvm::Type *ArgTys[] = { PtrToInt8Ty };
llvm::Constant *ClassLookupFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, ArgTys, true),
"objc_lookup_class");
return Builder.CreateCall(ClassLookupFn, ClassName);
}
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ return GetClassNamed(Builder, OID->getNameAsString(), OID->isWeakImported());
+}
+llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ return GetClassNamed(Builder, "NSAutoreleasePool", false);
+}
+
llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
const std::string &TypeEncoding, bool lval) {
@@ -908,7 +928,7 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
fields.push_back(Vtable);
fields.push_back(typeName);
llvm::Constant *TI =
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
NULL), fields, "__objc_eh_typeinfo_" + className,
llvm::GlobalValue::LinkOnceODRLinkage);
return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
@@ -929,7 +949,7 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
Ivars.push_back(MakeConstantString(Str));
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
Ivars, ".objc_str");
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
@@ -969,7 +989,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -979,11 +999,13 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (isCategoryImpl) {
llvm::Constant *classLookupFunction = 0;
if (IsClassMessage) {
+ llvm::Type *ArgTys[] = { PtrTy };
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, PtrTy, true), "objc_get_meta_class");
+ IdTy, ArgTys, true), "objc_get_meta_class");
} else {
+ llvm::Type *ArgTys[] = { PtrTy };
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, PtrTy, true), "objc_get_class");
+ IdTy, ArgTys, true), "objc_get_class");
}
ReceiverClass = Builder.CreateCall(classLookupFunction,
MakeConstantString(Class->getNameAsString()));
@@ -1012,13 +1034,13 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
// Cast the pointer to a simplified version of the class structure
ReceiverClass = Builder.CreateBitCast(ReceiverClass,
llvm::PointerType::getUnqual(
- llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
+ llvm::StructType::get(IdTy, IdTy, NULL)));
// Get the superclass pointer
ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
// Load the superclass pointer
ReceiverClass = Builder.CreateLoad(ReceiverClass);
// Construct the structure used to look up the IMP
- llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(
Receiver->getType(), IdTy, NULL);
llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
@@ -1121,7 +1143,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CallArgList ActualArgs;
ActualArgs.add(RValue::get(Receiver), ASTIdTy);
ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -1187,7 +1209,7 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
if (MethodSels.empty())
return NULLPtr;
// Get the method structure type.
- llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime creates it us.
PtrToInt8Ty, // Method types
IMPTy, //Method pointer
@@ -1217,18 +1239,14 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
Methods);
// Structure containing list pointer, array and array count
- llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
- llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
- llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
- llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodListTy =
+ llvm::StructType::createNamed(VMContext, "");
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
+ ObjCMethodListTy->setBody(
NextPtrTy,
IntTy,
ObjCMethodArrayTy,
NULL);
- // Refine next pointer type to concrete type
- llvm::cast<llvm::OpaqueType>(
- OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
- ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
Methods.clear();
Methods.push_back(llvm::ConstantPointerNull::get(
@@ -1249,7 +1267,7 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
if (IvarNames.size() == 0)
return NULLPtr;
// Get the method structure type.
- llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
PtrToInt8Ty,
PtrToInt8Ty,
IntTy,
@@ -1273,7 +1291,7 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
// Structure containing array and array count
- llvm::StructType *ObjCIvarListTy = llvm::StructType::get(VMContext, IntTy,
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
ObjCIvarArrayTy,
NULL);
@@ -1302,7 +1320,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// Fields marked New ABI are part of the GNUstep runtime. We emit them
// anyway; the classes will still work with the GNU runtime, they will just
// be ignored.
- llvm::StructType *ClassTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ClassTy = llvm::StructType::get(
PtrToInt8Ty, // class_pointer
PtrToInt8Ty, // super_class
PtrToInt8Ty, // name
@@ -1359,7 +1377,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
// Get the method structure type.
- llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
PtrToInt8Ty,
NULL);
@@ -1375,7 +1393,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
MethodNames.size());
llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
Methods);
- llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
IntTy, ObjCMethodArrayTy, NULL);
Methods.clear();
Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
@@ -1388,7 +1406,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
const llvm::SmallVectorImpl<std::string> &Protocols) {
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
Protocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
SizeTy,
ProtocolArrayTy,
@@ -1435,7 +1453,7 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
MethodList->getType(),
@@ -1521,7 +1539,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// The isSynthesized value is always set to 0 in a protocol. It exists to
// simplify the runtime library by allowing it to use the same data
// structures for protocol metadata everywhere.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
PtrToInt8Ty, NULL);
std::vector<llvm::Constant*> Properties;
@@ -1573,7 +1591,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
{llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
PropertyListInit, ".objc_property_list");
@@ -1586,7 +1604,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
OptionalPropertyArray };
llvm::Constant *OptionalPropertyListInit =
- llvm::ConstantStruct::get(VMContext, OptionalPropertyListInitFields, 3, false);
+ llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
OptionalPropertyListInit->getType(), false,
llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
@@ -1594,7 +1612,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
InstanceMethodList->getType(),
@@ -1641,7 +1659,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
// Protocol list
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
ExistingProtocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
SizeTy,
ProtocolArrayTy,
@@ -1664,7 +1682,7 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
ProtocolElements, ".objc_protocol_list"), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
}
@@ -1718,7 +1736,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Elements.push_back(llvm::ConstantExpr::getBitCast(
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
}
@@ -1729,7 +1747,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
//
// Property metadata: name, attributes, isSynthesized, setter name, setter
// types, getter name, getter types.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
PtrToInt8Ty, NULL);
std::vector<llvm::Constant*> Properties;
@@ -1788,7 +1806,7 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
{llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
llvm::GlobalValue::InternalLinkage, PropertyListInit,
".objc_property_list");
@@ -1859,13 +1877,25 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
Offset = BaseOffset - superInstanceSize;
}
- IvarOffsets.push_back(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
- IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
+ llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
+ // Create the direct offset value
+ std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString();
+ llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
+ if (OffsetVar) {
+ OffsetVar->setInitializer(OffsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
false, llvm::GlobalValue::ExternalLinkage,
- llvm::ConstantInt::get(IntTy, Offset),
+ OffsetValue,
"__objc_ivar_offset_value_" + ClassName +"." +
- IVD->getNameAsString()));
+ IVD->getNameAsString());
+ IvarOffsets.push_back(OffsetValue);
+ IvarOffsetValues.push_back(OffsetVar);
}
llvm::GlobalVariable *IvarOffsetArray =
MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
@@ -2005,18 +2035,12 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
SelectorTy->getElementType());
- const llvm::Type *SelStructPtrTy = SelectorTy;
+ llvm::Type *SelStructPtrTy = SelectorTy;
if (SelStructTy == 0) {
- SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
- PtrToInt8Ty, NULL);
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
}
- // Name the ObjC types to make the IR a bit easier to read
- TheModule.addTypeName(".objc_selector", SelStructPtrTy);
- TheModule.addTypeName(".objc_id", IdTy);
- TheModule.addTypeName(".objc_imp", IMPTy);
-
std::vector<llvm::Constant*> Elements;
llvm::Constant *Statics = NULLPtr;
// Generate statics list:
@@ -2034,7 +2058,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
ConstantStrings));
llvm::StructType *StaticsListTy =
- llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
llvm::Type *StaticsListPtrTy =
llvm::PointerType::getUnqual(StaticsListTy);
Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
@@ -2049,8 +2073,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Array of classes, categories, and constant objects
llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
Classes.size() + Categories.size() + 2);
- llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
- LongTy, SelStructPtrTy,
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
llvm::Type::getInt16Ty(VMContext),
llvm::Type::getInt16Ty(VMContext),
ClassListTy, NULL);
@@ -2132,10 +2155,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// The symbol table is contained in a module which has some version-checking
// constants
- llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
- (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ? NULL : IntTy,
- NULL);
+ (RuntimeVersion >= 10) ? IntTy : NULL, NULL);
Elements.clear();
// Runtime version, used for ABI compatibility checking.
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
@@ -2154,14 +2176,21 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
Elements.push_back(SymTab);
- switch (CGM.getLangOptions().getGCMode()) {
- case LangOptions::GCOnly:
+ if (RuntimeVersion >= 10)
+ switch (CGM.getLangOptions().getGCMode()) {
+ case LangOptions::GCOnly:
Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
- case LangOptions::NonGC:
break;
- case LangOptions::HybridGC:
- Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
- }
+ case LangOptions::NonGC:
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ else
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ break;
+ case LangOptions::HybridGC:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ break;
+ }
llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
@@ -2176,9 +2205,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
CGBuilderTy Builder(VMContext);
Builder.SetInsertPoint(EntryBB);
+ llvm::Type *ArgTys[] = { llvm::PointerType::getUnqual(ModuleTy) };
llvm::FunctionType *FT =
- llvm::FunctionType::get(Builder.getVoidTy(),
- llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::FunctionType::get(Builder.getVoidTy(), ArgTys, true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, Module);
Builder.CreateRetVoid();
@@ -2280,8 +2309,8 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
CGF.Builder.CreateUnreachable();
} else {
- CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
- &ExceptionAsObject+1);
+ CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB,
+ ExceptionAsObject);
}
// Clear the insertion point to indicate we are in unreachable code.
CGF.Builder.ClearInsertionPoint();
@@ -2422,10 +2451,19 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
const ObjCIvarDecl *Ivar) {
if (CGM.getLangOptions().ObjCNonFragileABI) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
- return CGF.Builder.CreateZExtOrBitCast(
- CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
- ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
- PtrDiffTy);
+ if (RuntimeVersion < 10)
+ return CGF.Builder.CreateZExtOrBitCast(
+ CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ PtrDiffTy);
+ std::string name = "__objc_ivar_offset_value_" +
+ Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ llvm::Value *Offset = TheModule.getGlobalVariable(name);
+ if (!Offset)
+ Offset = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::CommonLinkage,
+ 0, name);
+ return CGF.Builder.CreateLoad(Offset);
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
return llvm::ConstantInt::get(PtrDiffTy, Offset, "ivar");
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 8c3e9a36e307..010b9e174e46 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -63,10 +63,13 @@ private:
/// The default messenger, used for sends whose ABI is unchanged from
/// the all-integer/pointer case.
llvm::Constant *getMessageSendFn() const {
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ // Add the non-lazy-bind attribute, since objc_msgSend is likely to
+ // be called a lot.
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
- "objc_msgSend");
+ "objc_msgSend",
+ llvm::Attribute::NonLazyBind);
}
/// void objc_msgSend_stret (id, SEL, ...)
@@ -75,7 +78,7 @@ private:
/// by indirect reference in the first argument, and therefore the
/// self and selector parameters are shifted over by one.
llvm::Constant *getMessageSendStretFn() const {
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
params, true),
"objc_msgSend_stret");
@@ -88,7 +91,7 @@ private:
/// floating-point stack; without a special entrypoint, the nil case
/// would be unbalanced.
llvm::Constant *getMessageSendFpretFn() const {
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::Type::getDoubleTy(VMContext),
params, true),
@@ -102,7 +105,7 @@ private:
/// semantics. The class passed is the superclass of the current
/// class.
llvm::Constant *getMessageSendSuperFn() const {
- const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper");
@@ -113,7 +116,7 @@ private:
/// A slightly different messenger used for super calls. The class
/// passed is the current class.
llvm::Constant *getMessageSendSuperFn2() const {
- const llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper2");
@@ -124,7 +127,7 @@ private:
///
/// The messenger used for super calls which return an aggregate indirectly.
llvm::Constant *getMessageSendSuperStretFn() const {
- const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper_stret");
@@ -135,7 +138,7 @@ private:
///
/// objc_msgSendSuper_stret with the super2 semantics.
llvm::Constant *getMessageSendSuperStretFn2() const {
- const llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, true),
"objc_msgSendSuper2_stret");
@@ -155,20 +158,20 @@ protected:
CodeGen::CodeGenModule &CGM;
public:
- const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
- const llvm::Type *Int8PtrTy;
+ llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ llvm::Type *Int8PtrTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
- const llvm::Type *ObjectPtrTy;
+ llvm::Type *ObjectPtrTy;
/// PtrObjectPtrTy - LLVM type for id *
- const llvm::Type *PtrObjectPtrTy;
+ llvm::Type *PtrObjectPtrTy;
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
- const llvm::Type *SelectorPtrTy;
+ llvm::Type *SelectorPtrTy;
/// ProtocolPtrTy - LLVM type for external protocol handles
/// (typeof(Protocol))
- const llvm::Type *ExternalProtocolPtrTy;
+ llvm::Type *ExternalProtocolPtrTy;
// SuperCTy - clang type for struct objc_super.
QualType SuperCTy;
@@ -176,28 +179,28 @@ public:
QualType SuperPtrCTy;
/// SuperTy - LLVM type for struct objc_super.
- const llvm::StructType *SuperTy;
+ llvm::StructType *SuperTy;
/// SuperPtrTy - LLVM type for struct objc_super *.
- const llvm::Type *SuperPtrTy;
+ llvm::Type *SuperPtrTy;
/// PropertyTy - LLVM type for struct objc_property (struct _prop_t
/// in GCC parlance).
- const llvm::StructType *PropertyTy;
+ llvm::StructType *PropertyTy;
/// PropertyListTy - LLVM type for struct objc_property_list
/// (_prop_list_t in GCC parlance).
- const llvm::StructType *PropertyListTy;
+ llvm::StructType *PropertyListTy;
/// PropertyListPtrTy - LLVM type for struct objc_property_list*.
- const llvm::Type *PropertyListPtrTy;
+ llvm::Type *PropertyListPtrTy;
// MethodTy - LLVM type for struct objc_method.
- const llvm::StructType *MethodTy;
+ llvm::StructType *MethodTy;
/// CacheTy - LLVM type for struct objc_cache.
- const llvm::Type *CacheTy;
+ llvm::Type *CacheTy;
/// CachePtrTy - LLVM type for struct objc_cache *.
- const llvm::Type *CachePtrTy;
-
+ llvm::Type *CachePtrTy;
+
llvm::Constant *getGetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -270,7 +273,7 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::Constant *getGcReadWeakFn() {
// id objc_read_weak (id *)
- const llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
@@ -279,7 +282,7 @@ public:
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::Constant *getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
@@ -288,7 +291,7 @@ public:
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::Constant *getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
@@ -297,7 +300,7 @@ public:
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::Constant *getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
@@ -306,8 +309,8 @@ public:
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::Constant *getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
- CGM.PtrDiffTy };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
+ CGM.PtrDiffTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
@@ -316,7 +319,7 @@ public:
/// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
llvm::Constant *GcMemmoveCollectableFn() {
// void *objc_memmove_collectable(void *dst, const void *src, size_t size)
- const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
}
@@ -324,7 +327,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::Constant *getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- const llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
llvm::FunctionType *FTy =
llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
@@ -333,7 +336,7 @@ public:
/// ExceptionThrowFn - LLVM objc_exception_throw function.
llvm::Constant *getExceptionThrowFn() {
// void objc_exception_throw(id)
- const llvm::Type *args[] = { ObjectPtrTy };
+ llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
@@ -349,7 +352,7 @@ public:
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
// void objc_sync_enter (id)
- const llvm::Type *args[] = { ObjectPtrTy };
+ llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
@@ -358,7 +361,7 @@ public:
/// SyncExitFn - LLVM object_sync_exit function.
llvm::Constant *getSyncExitFn() {
// void objc_sync_exit (id)
- const llvm::Type *args[] = { ObjectPtrTy };
+ llvm::Type *args[] = { ObjectPtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
@@ -397,62 +400,62 @@ public:
class ObjCTypesHelper : public ObjCCommonTypesHelper {
public:
/// SymtabTy - LLVM type for struct objc_symtab.
- const llvm::StructType *SymtabTy;
+ llvm::StructType *SymtabTy;
/// SymtabPtrTy - LLVM type for struct objc_symtab *.
- const llvm::Type *SymtabPtrTy;
+ llvm::Type *SymtabPtrTy;
/// ModuleTy - LLVM type for struct objc_module.
- const llvm::StructType *ModuleTy;
+ llvm::StructType *ModuleTy;
/// ProtocolTy - LLVM type for struct objc_protocol.
- const llvm::StructType *ProtocolTy;
+ llvm::StructType *ProtocolTy;
/// ProtocolPtrTy - LLVM type for struct objc_protocol *.
- const llvm::Type *ProtocolPtrTy;
+ llvm::Type *ProtocolPtrTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension.
- const llvm::StructType *ProtocolExtensionTy;
+ llvm::StructType *ProtocolExtensionTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension *.
- const llvm::Type *ProtocolExtensionPtrTy;
+ llvm::Type *ProtocolExtensionPtrTy;
/// MethodDescriptionTy - LLVM type for struct
/// objc_method_description.
- const llvm::StructType *MethodDescriptionTy;
+ llvm::StructType *MethodDescriptionTy;
/// MethodDescriptionListTy - LLVM type for struct
/// objc_method_description_list.
- const llvm::StructType *MethodDescriptionListTy;
+ llvm::StructType *MethodDescriptionListTy;
/// MethodDescriptionListPtrTy - LLVM type for struct
/// objc_method_description_list *.
- const llvm::Type *MethodDescriptionListPtrTy;
+ llvm::Type *MethodDescriptionListPtrTy;
/// ProtocolListTy - LLVM type for struct objc_property_list.
- const llvm::Type *ProtocolListTy;
+ llvm::StructType *ProtocolListTy;
/// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
- const llvm::Type *ProtocolListPtrTy;
+ llvm::Type *ProtocolListPtrTy;
/// CategoryTy - LLVM type for struct objc_category.
- const llvm::StructType *CategoryTy;
+ llvm::StructType *CategoryTy;
/// ClassTy - LLVM type for struct objc_class.
- const llvm::StructType *ClassTy;
+ llvm::StructType *ClassTy;
/// ClassPtrTy - LLVM type for struct objc_class *.
- const llvm::Type *ClassPtrTy;
+ llvm::Type *ClassPtrTy;
/// ClassExtensionTy - LLVM type for struct objc_class_ext.
- const llvm::StructType *ClassExtensionTy;
+ llvm::StructType *ClassExtensionTy;
/// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
- const llvm::Type *ClassExtensionPtrTy;
+ llvm::Type *ClassExtensionPtrTy;
// IvarTy - LLVM type for struct objc_ivar.
- const llvm::StructType *IvarTy;
+ llvm::StructType *IvarTy;
/// IvarListTy - LLVM type for struct objc_ivar_list.
- const llvm::Type *IvarListTy;
+ llvm::Type *IvarListTy;
/// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
- const llvm::Type *IvarListPtrTy;
+ llvm::Type *IvarListPtrTy;
/// MethodListTy - LLVM type for struct objc_method_list.
- const llvm::Type *MethodListTy;
+ llvm::Type *MethodListTy;
/// MethodListPtrTy - LLVM type for struct objc_method_list *.
- const llvm::Type *MethodListPtrTy;
+ llvm::Type *MethodListPtrTy;
/// ExceptionDataTy - LLVM type for struct _objc_exception_data.
- const llvm::Type *ExceptionDataTy;
-
+ llvm::Type *ExceptionDataTy;
+
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::Constant *getExceptionTryEnterFn() {
- const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
@@ -460,7 +463,7 @@ public:
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::Constant *getExceptionTryExitFn() {
- const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
@@ -468,7 +471,7 @@ public:
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::Constant *getExceptionExtractFn() {
- const llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, false),
"objc_exception_extract");
@@ -476,7 +479,7 @@ public:
/// ExceptionMatchFn - LLVM objc_exception_match function.
llvm::Constant *getExceptionMatchFn() {
- const llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
+ llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false),
"objc_exception_match");
@@ -486,7 +489,7 @@ public:
/// SetJmpFn - LLVM _setjmp function.
llvm::Constant *getSetJmpFn() {
// This is specifically the prototype for x86.
- const llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
+ llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
params, false),
"_setjmp");
@@ -503,46 +506,46 @@ class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
public:
// MethodListnfABITy - LLVM for struct _method_list_t
- const llvm::StructType *MethodListnfABITy;
+ llvm::StructType *MethodListnfABITy;
// MethodListnfABIPtrTy - LLVM for struct _method_list_t*
- const llvm::Type *MethodListnfABIPtrTy;
+ llvm::Type *MethodListnfABIPtrTy;
// ProtocolnfABITy = LLVM for struct _protocol_t
- const llvm::StructType *ProtocolnfABITy;
+ llvm::StructType *ProtocolnfABITy;
// ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
- const llvm::Type *ProtocolnfABIPtrTy;
+ llvm::Type *ProtocolnfABIPtrTy;
// ProtocolListnfABITy - LLVM for struct _objc_protocol_list
- const llvm::StructType *ProtocolListnfABITy;
+ llvm::StructType *ProtocolListnfABITy;
// ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
- const llvm::Type *ProtocolListnfABIPtrTy;
+ llvm::Type *ProtocolListnfABIPtrTy;
// ClassnfABITy - LLVM for struct _class_t
- const llvm::StructType *ClassnfABITy;
+ llvm::StructType *ClassnfABITy;
// ClassnfABIPtrTy - LLVM for struct _class_t*
- const llvm::Type *ClassnfABIPtrTy;
+ llvm::Type *ClassnfABIPtrTy;
// IvarnfABITy - LLVM for struct _ivar_t
- const llvm::StructType *IvarnfABITy;
+ llvm::StructType *IvarnfABITy;
// IvarListnfABITy - LLVM for struct _ivar_list_t
- const llvm::StructType *IvarListnfABITy;
+ llvm::StructType *IvarListnfABITy;
// IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
- const llvm::Type *IvarListnfABIPtrTy;
+ llvm::Type *IvarListnfABIPtrTy;
// ClassRonfABITy - LLVM for struct _class_ro_t
- const llvm::StructType *ClassRonfABITy;
+ llvm::StructType *ClassRonfABITy;
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- const llvm::Type *ImpnfABITy;
+ llvm::Type *ImpnfABITy;
// CategorynfABITy - LLVM for struct _category_t
- const llvm::StructType *CategorynfABITy;
+ llvm::StructType *CategorynfABITy;
// New types for nonfragile abi messaging.
@@ -551,31 +554,31 @@ public:
// IMP messenger;
// SEL name;
// };
- const llvm::StructType *MessageRefTy;
+ llvm::StructType *MessageRefTy;
// MessageRefCTy - clang type for struct _message_ref_t
QualType MessageRefCTy;
// MessageRefPtrTy - LLVM for struct _message_ref_t*
- const llvm::Type *MessageRefPtrTy;
+ llvm::Type *MessageRefPtrTy;
// MessageRefCPtrTy - clang type for struct _message_ref_t*
QualType MessageRefCPtrTy;
// MessengerTy - Type of the messenger (shown as IMP above)
- const llvm::FunctionType *MessengerTy;
+ llvm::FunctionType *MessengerTy;
// SuperMessageRefTy - LLVM for:
// struct _super_message_ref_t {
// SUPER_IMP messenger;
// SEL name;
// };
- const llvm::StructType *SuperMessageRefTy;
+ llvm::StructType *SuperMessageRefTy;
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
- const llvm::Type *SuperMessageRefPtrTy;
+ llvm::Type *SuperMessageRefPtrTy;
llvm::Constant *getMessageSendFixupFn() {
// id objc_msgSend_fixup(id, struct message_ref_t*, ...)
- const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend_fixup");
@@ -583,7 +586,7 @@ public:
llvm::Constant *getMessageSendFpretFixupFn() {
// id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
- const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend_fpret_fixup");
@@ -591,7 +594,7 @@ public:
llvm::Constant *getMessageSendStretFixupFn() {
// id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
- const llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSend_stret_fixup");
@@ -600,7 +603,7 @@ public:
llvm::Constant *getMessageSendSuper2FixupFn() {
// id objc_msgSendSuper2_fixup (struct objc_super *,
// struct _super_message_ref_t*, ...)
- const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper2_fixup");
@@ -609,7 +612,7 @@ public:
llvm::Constant *getMessageSendSuper2StretFixupFn() {
// id objc_msgSendSuper2_stret_fixup(struct objc_super *,
// struct _super_message_ref_t*, ...)
- const llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
params, true),
"objc_msgSendSuper2_stret_fixup");
@@ -622,15 +625,15 @@ public:
}
llvm::Constant *getObjCBeginCatchFn() {
- const llvm::Type *params[] = { Int8PtrTy };
+ llvm::Type *params[] = { Int8PtrTy };
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
params, false),
"objc_begin_catch");
}
- const llvm::StructType *EHTypeTy;
- const llvm::Type *EHTypePtrTy;
-
+ llvm::StructType *EHTypeTy;
+ llvm::Type *EHTypePtrTy;
+
ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCNonFragileABITypesHelper(){}
};
@@ -887,6 +890,11 @@ private:
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
/// EmitSuperClassRef - Emits reference to class's main metadata class.
llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
@@ -1158,6 +1166,11 @@ private:
/// for the given class reference.
llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
/// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given super class reference.
@@ -1402,6 +1415,19 @@ llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
}
llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().ObjCIdRedefinitionType, /*ForEH=*/true);
+ }
+ if (T->isObjCClassType() ||
+ T->isObjCQualifiedClassType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().ObjCClassRedefinitionType, /*ForEH=*/true);
+ }
+ if (T->isObjCObjectPointerType())
+ return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
+
llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
return 0;
}
@@ -1526,7 +1552,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
ActualArgs.add(RValue::get(Arg0), Arg0Ty);
ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
- ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ ActualArgs.addFrom(CallArgs);
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
@@ -1562,7 +1588,7 @@ static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
if (FQT.isObjCGCStrong())
return Qualifiers::Strong;
- if (FQT.isObjCGCWeak())
+ if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
return Qualifiers::Weak;
if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
@@ -1579,7 +1605,8 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
llvm::Constant *nullPtr =
llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ !CGM.getLangOptions().ObjCAutoRefCount)
return nullPtr;
bool hasUnion = false;
@@ -1866,7 +1893,7 @@ CGObjCMac::EmitProtocolList(llvm::Twine Name,
// This list is null terminated.
ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
// This field is only used by the runtime.
Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
@@ -1876,7 +1903,7 @@ CGObjCMac::EmitProtocolList(llvm::Twine Name,
ProtocolRefs.size()),
ProtocolRefs);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
4, false);
@@ -1950,13 +1977,13 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
unsigned PropertySize =
CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
Properties.size());
Values[2] = llvm::ConstantArray::get(AT, Properties);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init,
@@ -1994,12 +2021,12 @@ llvm::Constant *CGObjCMac::EmitMethodDescList(llvm::Twine Name,
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
- std::vector<llvm::Constant*> Values(2);
+ llvm::Constant *Values[2];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
Methods.size());
Values[1] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
return llvm::ConstantExpr::getBitCast(GV,
@@ -2044,7 +2071,7 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
ClassMethods.push_back(GetMethodConstant(*i));
}
- std::vector<llvm::Constant*> Values(7);
+ llvm::Constant *Values[7];
Values[0] = GetClassName(OCD->getIdentifier());
Values[1] = GetClassName(Interface->getIdentifier());
LazySymbols.insert(Interface->getIdentifier());
@@ -2129,7 +2156,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Interface->all_referenced_protocol_begin(),
Interface->all_referenced_protocol_end());
unsigned Flags = eClassFlags_Factory;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
Flags |= eClassFlags_HasCXXStructors;
unsigned Size =
CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
@@ -2166,7 +2193,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
}
}
- std::vector<llvm::Constant*> Values(12);
+ llvm::Constant *Values[12];
Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
// Record a reference to the super class.
@@ -2225,7 +2252,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= eClassFlags_Hidden;
- std::vector<llvm::Constant*> Values(12);
+ llvm::Constant *Values[12];
// The isa for the metaclass is the root of the hierarchy.
const ObjCInterfaceDecl *Root = ID->getClassInterface();
while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
@@ -2339,7 +2366,7 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
uint64_t Size =
CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[1] = BuildIvarLayout(ID, false);
Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
@@ -2383,11 +2410,8 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
ObjCInterfaceDecl *OID =
const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
- llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
-
- for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
- ObjCIvarDecl *IVD = OIvars[i];
+ for (ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
@@ -2402,12 +2426,12 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
if (Ivars.empty())
return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
- std::vector<llvm::Constant*> Values(2);
+ llvm::Constant *Values[2];
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
Ivars.size());
Values[1] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV;
if (ForClass)
@@ -2459,17 +2483,16 @@ llvm::Constant *CGObjCMac::EmitMethodList(llvm::Twine Name,
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
Methods.size());
Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodListPtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
}
llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
@@ -2558,7 +2581,7 @@ namespace {
: S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
// Check whether we need to call objc_exception_try_exit.
// In optimized code, this branch will always be folded.
llvm::BasicBlock *FinallyCallExit =
@@ -2674,14 +2697,12 @@ FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
void FragileHazards::emitWriteHazard() {
if (Locals.empty()) return;
- CGF.Builder.CreateCall(WriteHazard, Locals.begin(), Locals.end())
- ->setDoesNotThrow();
+ CGF.Builder.CreateCall(WriteHazard, Locals)->setDoesNotThrow();
}
void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
assert(!Locals.empty());
- Builder.CreateCall(ReadHazard, Locals.begin(), Locals.end())
- ->setDoesNotThrow();
+ Builder.CreateCall(ReadHazard, Locals)->setDoesNotThrow();
}
/// Emit read hazards in all the protected blocks, i.e. all the blocks
@@ -2745,7 +2766,7 @@ void FragileHazards::collectLocals() {
}
llvm::FunctionType *FragileHazards::GetAsmFnType() {
- llvm::SmallVector<const llvm::Type *, 16> tys(Locals.size());
+ llvm::SmallVector<llvm::Type *, 16> tys(Locals.size());
for (unsigned i = 0, e = Locals.size(); i != e; ++i)
tys[i] = Locals[i]->getType();
return llvm::FunctionType::get(CGF.VoidTy, tys, false);
@@ -3389,7 +3410,7 @@ void CGObjCCommonMac::EmitImageInfo() {
Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
llvm::GlobalVariable *GV =
CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
- llvm::ConstantArray::get(AT, values, 2),
+ llvm::ConstantArray::get(AT, values),
Section,
0,
true);
@@ -3430,7 +3451,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
if (!NumClasses && !NumCategories)
return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
- std::vector<llvm::Constant*> Values(5);
+ llvm::Constant *Values[5];
Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
@@ -3452,7 +3473,7 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
NumClasses + NumCategories),
Symbols);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
@@ -3461,25 +3482,35 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
-llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID) {
- LazySymbols.insert(ID->getIdentifier());
-
- llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
-
+llvm::Value *CGObjCMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ LazySymbols.insert(II);
+
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
if (!Entry) {
llvm::Constant *Casted =
- llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
- ObjCTypes.ClassPtrTy);
+ llvm::ConstantExpr::getBitCast(GetClassName(II),
+ ObjCTypes.ClassPtrTy);
Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
- 4, true);
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
}
-
+
return Builder.CreateLoad(Entry, "tmp");
}
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
bool lvalue) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
@@ -3567,12 +3598,18 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
uint64_t MaxFieldOffset = 0;
uint64_t MaxSkippedFieldOffset = 0;
uint64_t LastBitfieldOrUnnamedOffset = 0;
+ uint64_t FirstFieldDelta = 0;
if (RecFields.empty())
return;
unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
-
+ if (!RD && CGM.getLangOptions().ObjCAutoRefCount) {
+ FieldDecl *FirstField = RecFields[0];
+ FirstFieldDelta =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
+ }
+
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
FieldDecl *Field = RecFields[i];
uint64_t FieldOffset;
@@ -3580,9 +3617,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
// Note that 'i' here is actually the field index inside RD of Field,
// although this dependency is hidden.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- FieldOffset = RL.getFieldOffset(i) / ByteSizeInBits;
+ FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta;
} else
- FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+ FieldOffset =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta;
// Skip over unnamed or bitfields
if (!Field->getIdentifier() || Field->isBitField()) {
@@ -3861,16 +3899,25 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
bool hasUnion = false;
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ !CGM.getLangOptions().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
- llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
- const ObjCInterfaceDecl *OI = OMD->getClassInterface();
- CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
-
+ ObjCInterfaceDecl *OI =
+ const_cast<ObjCInterfaceDecl*>(OMD->getClassInterface());
llvm::SmallVector<FieldDecl*, 32> RecFields;
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
- RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+ if (CGM.getLangOptions().ObjCAutoRefCount) {
+ for (ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ RecFields.push_back(cast<FieldDecl>(IVD));
+ }
+ else {
+ llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+ }
if (RecFields.empty())
return llvm::Constant::getNullValue(PtrTy);
@@ -4113,21 +4160,19 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *name;
// char *attributes;
// }
- PropertyTy = llvm::StructType::get(VMContext, Int8PtrTy, Int8PtrTy, NULL);
- CGM.getModule().addTypeName("struct._prop_t",
- PropertyTy);
+ PropertyTy = llvm::StructType::createNamed("struct._prop_t",
+ Int8PtrTy, Int8PtrTy, NULL);
// struct _prop_list_t {
// uint32_t entsize; // sizeof(struct _prop_t)
// uint32_t count_of_properties;
// struct _prop_t prop_list[count_of_properties];
// }
- PropertyListTy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- llvm::ArrayType::get(PropertyTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._prop_list_t",
- PropertyListTy);
+ PropertyListTy =
+ llvm::StructType::createNamed("struct._prop_list_t",
+ IntTy, IntTy,
+ llvm::ArrayType::get(PropertyTy, 0),
+ NULL);
// struct _prop_list_t *
PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
@@ -4136,16 +4181,14 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *method_type;
// char *_imp;
// }
- MethodTy = llvm::StructType::get(VMContext, SelectorPtrTy,
- Int8PtrTy,
- Int8PtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+ MethodTy = llvm::StructType::createNamed("struct._objc_method",
+ SelectorPtrTy, Int8PtrTy, Int8PtrTy,
+ NULL);
// struct _objc_cache *
- CacheTy = llvm::OpaqueType::get(VMContext);
- CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+ CacheTy = llvm::StructType::createNamed(VMContext, "struct._objc_cache");
CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+
}
ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
@@ -4155,22 +4198,18 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *types;
// }
MethodDescriptionTy =
- llvm::StructType::get(VMContext, SelectorPtrTy,
- Int8PtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_method_description",
- MethodDescriptionTy);
+ llvm::StructType::createNamed("struct._objc_method_description",
+ SelectorPtrTy, Int8PtrTy, NULL);
// struct _objc_method_description_list {
// int count;
// struct _objc_method_description[1];
// }
MethodDescriptionListTy =
- llvm::StructType::get(VMContext, IntTy,
- llvm::ArrayType::get(MethodDescriptionTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._objc_method_description_list",
- MethodDescriptionListTy);
+ llvm::StructType::createNamed("struct._objc_method_description_list",
+ IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),
+ NULL);
// struct _objc_method_description_list *
MethodDescriptionListPtrTy =
@@ -4185,29 +4224,27 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_property_list *instance_properties;
// }
ProtocolExtensionTy =
- llvm::StructType::get(VMContext, IntTy,
- MethodDescriptionListPtrTy,
- MethodDescriptionListPtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_protocol_extension",
- ProtocolExtensionTy);
+ llvm::StructType::createNamed("struct._objc_protocol_extension",
+ IntTy,
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ PropertyListPtrTy,
+ NULL);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
// Handle recursive construction of Protocol and ProtocolList types
- llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get(VMContext);
- llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+ ProtocolTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_protocol");
- const llvm::Type *T =
- llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ ProtocolListTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list");
+ ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
LongTy,
- llvm::ArrayType::get(ProtocolTyHolder, 0),
+ llvm::ArrayType::get(ProtocolTy, 0),
NULL);
- cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
// struct _objc_protocol {
// struct _objc_protocol_extension *isa;
@@ -4216,22 +4253,15 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_method_description_list *instance_methods;
// struct _objc_method_description_list *class_methods;
// }
- T = llvm::StructType::get(VMContext, ProtocolExtensionPtrTy,
- Int8PtrTy,
- llvm::PointerType::getUnqual(ProtocolListTyHolder),
- MethodDescriptionListPtrTy,
- MethodDescriptionListPtrTy,
- NULL);
- cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
-
- ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
- CGM.getModule().addTypeName("struct._objc_protocol_list",
- ProtocolListTy);
+ ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTy),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+
// struct _objc_protocol_list *
ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
- ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
- CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
// Class description structures
@@ -4241,32 +4271,26 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *ivar_type;
// int ivar_offset;
// }
- IvarTy = llvm::StructType::get(VMContext, Int8PtrTy,
- Int8PtrTy,
- IntTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+ IvarTy = llvm::StructType::createNamed("struct._objc_ivar",
+ Int8PtrTy, Int8PtrTy, IntTy, NULL);
// struct _objc_ivar_list *
- IvarListTy = llvm::OpaqueType::get(VMContext);
- CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+ IvarListTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_ivar_list");
IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
// struct _objc_method_list *
- MethodListTy = llvm::OpaqueType::get(VMContext);
- CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+ MethodListTy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_method_list");
MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
// struct _objc_class_extension *
ClassExtensionTy =
- llvm::StructType::get(VMContext, IntTy,
- Int8PtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+ llvm::StructType::createNamed("struct._objc_class_extension",
+ IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
- llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+ ClassTy = llvm::StructType::createNamed(VMContext, "struct._objc_class");
// struct _objc_class {
// Class isa;
@@ -4282,24 +4306,20 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *ivar_layout;
// struct _objc_class_ext *ext;
// };
- T = llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(ClassTyHolder),
- llvm::PointerType::getUnqual(ClassTyHolder),
- Int8PtrTy,
- LongTy,
- LongTy,
- LongTy,
- IvarListPtrTy,
- MethodListPtrTy,
- CachePtrTy,
- ProtocolListPtrTy,
- Int8PtrTy,
- ClassExtensionPtrTy,
- NULL);
- cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
-
- ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
- CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+ ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy),
+ llvm::PointerType::getUnqual(ClassTy),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+
ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
// struct _objc_category {
@@ -4310,15 +4330,11 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// uint32_t size; // sizeof(struct _objc_category)
// struct _objc_property_list *instance_properties;// category's @property
// }
- CategoryTy = llvm::StructType::get(VMContext, Int8PtrTy,
- Int8PtrTy,
- MethodListPtrTy,
- MethodListPtrTy,
- ProtocolListPtrTy,
- IntTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+ CategoryTy =
+ llvm::StructType::createNamed("struct._objc_category",
+ Int8PtrTy, Int8PtrTy, MethodListPtrTy,
+ MethodListPtrTy, ProtocolListPtrTy,
+ IntTy, PropertyListPtrTy, NULL);
// Global metadata structures
@@ -4329,13 +4345,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// short cat_def_cnt;
// char *defs[cls_def_cnt + cat_def_cnt];
// }
- SymtabTy = llvm::StructType::get(VMContext, LongTy,
- SelectorPtrTy,
- ShortTy,
- ShortTy,
- llvm::ArrayType::get(Int8PtrTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+ SymtabTy =
+ llvm::StructType::createNamed("struct._objc_symtab",
+ LongTy, SelectorPtrTy, ShortTy, ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0), NULL);
SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
// struct _objc_module {
@@ -4345,12 +4358,8 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_symtab* symtab;
// }
ModuleTy =
- llvm::StructType::get(VMContext, LongTy,
- LongTy,
- Int8PtrTy,
- SymtabPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+ llvm::StructType::createNamed("struct._objc_module",
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
// FIXME: This is the size of the setjmp buffer and should be target
@@ -4358,16 +4367,14 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
uint64_t SetJmpBufferSize = 18;
// Exceptions
- const llvm::Type *StackPtrTy = llvm::ArrayType::get(
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(
llvm::Type::getInt8PtrTy(VMContext), 4);
ExceptionDataTy =
- llvm::StructType::get(VMContext,
- llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
- SetJmpBufferSize),
- StackPtrTy, NULL);
- CGM.getModule().addTypeName("struct._objc_exception_data",
- ExceptionDataTy);
+ llvm::StructType::createNamed("struct._objc_exception_data",
+ llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+ SetJmpBufferSize),
+ StackPtrTy, NULL);
}
@@ -4378,12 +4385,11 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// uint32_t method_count;
// struct _objc_method method_list[method_count];
// }
- MethodListnfABITy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- llvm::ArrayType::get(MethodTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct.__method_list_t",
- MethodListnfABITy);
+ MethodListnfABITy =
+ llvm::StructType::createNamed("struct.__method_list_t",
+ IntTy, IntTy,
+ llvm::ArrayType::get(MethodTy, 0),
+ NULL);
// struct method_list_t *
MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
@@ -4401,22 +4407,21 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
// Holder for struct _protocol_list_t *
- llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
-
- ProtocolnfABITy = llvm::StructType::get(VMContext, ObjectPtrTy,
- Int8PtrTy,
- llvm::PointerType::getUnqual(
- ProtocolListTyHolder),
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- PropertyListPtrTy,
- IntTy,
- IntTy,
- NULL);
- CGM.getModule().addTypeName("struct._protocol_t",
- ProtocolnfABITy);
+ ProtocolListnfABITy =
+ llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list");
+
+ ProtocolnfABITy =
+ llvm::StructType::createNamed("struct._protocol_t",
+ ObjectPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListnfABITy),
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ PropertyListPtrTy,
+ IntTy,
+ IntTy,
+ NULL);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -4425,14 +4430,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// long protocol_count; // Note, this is 32/64 bit
// struct _protocol_t *[protocol_count];
// }
- ProtocolListnfABITy = llvm::StructType::get(VMContext, LongTy,
- llvm::ArrayType::get(
- ProtocolnfABIPtrTy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._objc_protocol_list",
- ProtocolListnfABITy);
- cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
- ProtocolListnfABITy);
+ ProtocolListnfABITy->setBody(LongTy,
+ llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
+ NULL);
// struct _objc_protocol_list*
ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
@@ -4444,26 +4444,25 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// uint32_t alignment;
// uint32_t size;
// }
- IvarnfABITy = llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(LongTy),
- Int8PtrTy,
- Int8PtrTy,
- IntTy,
- IntTy,
- NULL);
- CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+ IvarnfABITy =
+ llvm::StructType::createNamed("struct._ivar_t",
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ IntTy,
+ NULL);
// struct _ivar_list_t {
// uint32 entsize; // sizeof(struct _ivar_t)
// uint32 count;
// struct _iver_t list[count];
// }
- IvarListnfABITy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- llvm::ArrayType::get(
- IvarnfABITy, 0),
- NULL);
- CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+ IvarListnfABITy =
+ llvm::StructType::createNamed("struct._ivar_list_t",
+ IntTy, IntTy,
+ llvm::ArrayType::get(IvarnfABITy, 0),
+ NULL);
IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
@@ -4482,22 +4481,21 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
// FIXME. Add 'reserved' field in 64bit abi mode!
- ClassRonfABITy = llvm::StructType::get(VMContext, IntTy,
- IntTy,
- IntTy,
- Int8PtrTy,
- Int8PtrTy,
- MethodListnfABIPtrTy,
- ProtocolListnfABIPtrTy,
- IvarListnfABIPtrTy,
- Int8PtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._class_ro_t",
- ClassRonfABITy);
+ ClassRonfABITy = llvm::StructType::createNamed("struct._class_ro_t",
+ IntTy,
+ IntTy,
+ IntTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- const llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false)
->getPointerTo();
@@ -4509,19 +4507,13 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// struct class_ro_t *ro;
// }
- llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
- ClassnfABITy =
- llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(ClassTyHolder),
- llvm::PointerType::getUnqual(ClassTyHolder),
- CachePtrTy,
- llvm::PointerType::getUnqual(ImpnfABITy),
- llvm::PointerType::getUnqual(ClassRonfABITy),
- NULL);
- CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
-
- cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
- ClassnfABITy);
+ ClassnfABITy = llvm::StructType::createNamed(VMContext, "struct._class_t");
+ ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy),
+ llvm::PointerType::getUnqual(ClassnfABITy),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ NULL);
// LLVM for struct _class_t *
ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
@@ -4534,14 +4526,14 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// const struct _protocol_list_t * const protocols;
// const struct _prop_list_t * const properties;
// }
- CategorynfABITy = llvm::StructType::get(VMContext, Int8PtrTy,
- ClassnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- ProtocolListnfABIPtrTy,
- PropertyListPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+ CategorynfABITy = llvm::StructType::createNamed("struct._category_t",
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
// New types for nonfragile abi messaging.
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -4576,26 +4568,25 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// SUPER_IMP messenger;
// SEL name;
// };
- SuperMessageRefTy = llvm::StructType::get(VMContext, ImpnfABITy,
- SelectorPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+ SuperMessageRefTy =
+ llvm::StructType::createNamed("struct._super_message_ref_t",
+ ImpnfABITy, SelectorPtrTy, NULL);
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
-
+
// struct objc_typeinfo {
// const void** vtable; // objc_ehtype_vtable + 2
// const char* name; // c++ typeinfo string
// Class cls;
// };
- EHTypeTy = llvm::StructType::get(VMContext,
- llvm::PointerType::getUnqual(Int8PtrTy),
- Int8PtrTy,
- ClassnfABIPtrTy,
- NULL);
- CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+ EHTypeTy =
+ llvm::StructType::createNamed("struct._objc_typeinfo",
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ NULL);
EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
}
@@ -4743,7 +4734,12 @@ enum MetaDataDlags {
CLS_META = 0x1,
CLS_ROOT = 0x2,
OBJC2_CLS_HIDDEN = 0x10,
- CLS_EXCEPTION = 0x20
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
};
/// BuildClassRoTInitializer - generate meta-data for:
/// struct _class_ro_t {
@@ -4767,6 +4763,10 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getNameAsString();
std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+
+ if (CGM.getLangOptions().ObjCAutoRefCount)
+ flags |= CLS_COMPILED_BY_ARC;
+
Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
@@ -4936,7 +4936,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
ID->getClassInterface()->getVisibility() == HiddenVisibility;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
flags |= eClassFlags_ABI2_HasCXXStructors;
if (!ID->getClassInterface()->getSuperClass()) {
// class is root
@@ -4972,7 +4972,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
flags = CLS;
if (classIsHidden)
flags |= OBJC2_CLS_HIDDEN;
- if (ID->getNumIvarInitializers())
+ if (ID->hasCXXStructors())
flags |= eClassFlags_ABI2_HasCXXStructors;
if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
@@ -5174,7 +5174,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
if (Methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
- std::vector<llvm::Constant*> Values(3);
+ llvm::Constant *Values[3];
// sizeof(struct _objc_method)
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
@@ -5183,19 +5183,15 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
Methods.size());
Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::InternalLinkage,
- Init,
- Name);
- GV->setAlignment(
- CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.AddUsedGlobal(GV);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodListnfABIPtrTy);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
}
/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
@@ -5262,17 +5258,14 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
std::vector<llvm::Constant*> Ivars, Ivar(5);
- const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ ObjCInterfaceDecl *OID =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
// FIXME. Consolidate this with similar code in GenerateClass.
- // Collect declared and synthesized ivars in a small vector.
- llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
-
- for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
- ObjCIvarDecl *IVD = OIvars[i];
+ for (ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
@@ -5298,14 +5291,15 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Return null for empty list.
if (Ivars.empty())
return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
- std::vector<llvm::Constant*> Values(3);
+
+ llvm::Constant *Values[3];
unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
Ivars.size());
Values[2] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
@@ -5491,7 +5485,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
ProtocolRefs.push_back(llvm::Constant::getNullValue(
ObjCTypes.ProtocolnfABIPtrTy));
- std::vector<llvm::Constant*> Values(2);
+ llvm::Constant *Values[2];
Values[0] =
llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
Values[1] =
@@ -5500,11 +5494,10 @@ CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
ProtocolRefs.size()),
ProtocolRefs);
- llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::InternalLinkage,
- Init,
- Name);
+ Init, Name);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
CGM.getTargetData().getABITypeAlignment(Init->getType()));
@@ -5654,8 +5647,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
if (!messageRef) {
// Build the message ref structure.
llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
- llvm::Constant *init =
- llvm::ConstantStruct::get(VMContext, values, 2, false);
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
messageRef = new llvm::GlobalVariable(CGM.getModule(),
init->getType(),
/*constant*/ false,
@@ -5719,28 +5711,39 @@ CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
return GV;
}
-llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID) {
- llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
-
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
if (!Entry) {
- std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string ClassName(getClassSymbolPrefix() + II->getName().str());
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::InternalLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
- CGM.getTargetData().getABITypeAlignment(
- ObjCTypes.ClassnfABIPtrTy));
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
}
-
+
return Builder.CreateLoad(Entry, "tmp");
}
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
+ CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
llvm::Value *
CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID) {
@@ -6043,12 +6046,10 @@ void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy,
"tmp");
- llvm::Value *Args[] = { Exception };
- CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(),
- Args, Args+1)
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
.setDoesNotReturn();
} else {
- CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn(), 0, 0)
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn())
.setDoesNotReturn();
}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 21150f1f848c..09c8d0b28f9c 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -52,14 +52,14 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
// implemented. This should be fixed to get the information from the layout
// directly.
unsigned Index = 0;
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CGM.getContext().ShallowCollectObjCIvars(Container, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
- if (Ivar == Ivars[k])
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl*>(Container);
+
+ for (ObjCIvarDecl *IVD = IDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ if (Ivar == IVD)
break;
++Index;
}
- assert(Index != Ivars.size() && "Ivar is not inside container!");
assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
return RL->getFieldOffset(Index);
@@ -134,7 +134,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
ContainingTypeSize, ContainingTypeAlign));
return LValue::MakeBitfield(V, *Info,
- IvarTy.getCVRQualifiers() | CVRQualifiers);
+ IvarTy.withCVRQualifiers(CVRQualifiers));
}
namespace {
@@ -151,13 +151,13 @@ namespace {
bool MightThrow;
llvm::Value *Fn;
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
if (!MightThrow) {
CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
return;
}
- CGF.EmitCallOrInvoke(Fn, 0, 0);
+ CGF.EmitCallOrInvoke(Fn);
}
};
}
@@ -175,10 +175,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CodeGenFunction::FinallyInfo FinallyInfo;
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
- FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
- beginCatchFn,
- endCatchFn,
- exceptionRethrowFn);
+ FinallyInfo.enter(CGF, Finally->getFinallyBody(),
+ beginCatchFn, endCatchFn, exceptionRethrowFn);
llvm::SmallVector<CatchHandler, 8> Handlers;
@@ -266,9 +264,9 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
- // Pop out of the normal cleanup on the finally.
+ // Pop out of the finally.
if (S.getFinallyStmt())
- CGF.ExitFinallyBlock(FinallyInfo);
+ FinallyInfo.exit(CGF);
if (Cont.isValid())
CGF.EmitBlock(Cont.getBlock());
@@ -281,7 +279,7 @@ namespace {
CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
}
};
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 866d5d83fe60..7accc70c9623 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -205,7 +205,13 @@ public:
/// interface decl.
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID) = 0;
-
+
+
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ assert(false &&"autoreleasepool unsupported in this ABI");
+ return 0;
+ }
+
/// EnumerationMutationFunction - Return the function that's called by the
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index c73b199e31f7..e564c7070525 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -658,9 +658,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
break;
}
- llvm::Constant *Init =
- llvm::ConstantStruct::get(VMContext, &Fields[0], Fields.size(),
- /*Packed=*/false);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 6d9fc0589e93..8a450298f70a 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -176,11 +176,11 @@ class CGRecordLayout {
private:
/// The LLVM type corresponding to this record layout; used when
/// laying it out as a complete object.
- llvm::PATypeHolder CompleteObjectType;
+ llvm::StructType *CompleteObjectType;
/// The LLVM type for the non-virtual part of this record layout;
/// used when laying it out as a base subobject.
- llvm::PATypeHolder BaseSubobjectType;
+ llvm::StructType *BaseSubobjectType;
/// Map from (non-bit-field) struct field to the corresponding llvm struct
/// type field no. This info is populated by record builder.
@@ -208,8 +208,8 @@ private:
bool IsZeroInitializableAsBase : 1;
public:
- CGRecordLayout(const llvm::StructType *CompleteObjectType,
- const llvm::StructType *BaseSubobjectType,
+ CGRecordLayout(llvm::StructType *CompleteObjectType,
+ llvm::StructType *BaseSubobjectType,
bool IsZeroInitializable,
bool IsZeroInitializableAsBase)
: CompleteObjectType(CompleteObjectType),
@@ -219,14 +219,14 @@ public:
/// \brief Return the "complete object" LLVM type associated with
/// this record.
- const llvm::StructType *getLLVMType() const {
- return cast<llvm::StructType>(CompleteObjectType.get());
+ llvm::StructType *getLLVMType() const {
+ return CompleteObjectType;
}
/// \brief Return the "base subobject" LLVM type associated with
/// this record.
- const llvm::StructType *getBaseSubobjectLLVMType() const {
- return cast<llvm::StructType>(BaseSubobjectType.get());
+ llvm::StructType *getBaseSubobjectLLVMType() const {
+ return BaseSubobjectType;
}
/// \brief Check whether this struct can be C++ zero-initialized
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 0d72f854ba3e..2b07bafa0096 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenTypes.h"
#include "CGCXXABI.h"
#include "llvm/DerivedTypes.h"
@@ -34,7 +35,7 @@ class CGRecordLayoutBuilder {
public:
/// FieldTypes - Holds the LLVM types that the struct is created from.
///
- llvm::SmallVector<const llvm::Type *, 16> FieldTypes;
+ llvm::SmallVector<llvm::Type *, 16> FieldTypes;
/// BaseSubobjectType - Holds the LLVM type for the non-virtual part
/// of the struct. For example, consider:
@@ -51,7 +52,7 @@ public:
///
/// This only gets initialized if the base subobject type is
/// different from the complete-object type.
- const llvm::StructType *BaseSubobjectType;
+ llvm::StructType *BaseSubobjectType;
/// FieldInfo - Holds a field and its corresponding LLVM field number.
llvm::DenseMap<const FieldDecl *, unsigned> Fields;
@@ -108,8 +109,8 @@ private:
/// LayoutUnionField - Will layout a field in an union and return the type
/// that the field will have.
- const llvm::Type *LayoutUnionField(const FieldDecl *Field,
- const ASTRecordLayout &Layout);
+ llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
/// LayoutUnion - Will layout a union RecordDecl.
void LayoutUnion(const RecordDecl *D);
@@ -150,7 +151,7 @@ private:
void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
/// AppendField - Appends a field with the given offset and type.
- void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy);
+ void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
/// AppendPadding - Appends enough padding bytes so that the total
/// struct size is a multiple of the field alignment.
@@ -164,7 +165,7 @@ private:
/// getByteArrayType - Returns a byte array type with the given number of
/// elements.
- const llvm::Type *getByteArrayType(CharUnits NumBytes);
+ llvm::Type *getByteArrayType(CharUnits NumBytes);
/// AppendBytes - Append a given number of bytes to the record.
void AppendBytes(CharUnits numBytes);
@@ -229,7 +230,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize,
uint64_t ContainingTypeSizeInBits,
unsigned ContainingTypeAlign) {
- const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
+ const llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
@@ -268,6 +269,18 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
unsigned AccessedTargetBits = 0; // The number of target bits accessed.
unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+ // If requested, widen the initial bit-field access to be register sized. The
+ // theory is that this is most likely to allow multiple accesses into the same
+ // structure to be coalesced, and that the backend should be smart enough to
+ // narrow the store if no coalescing is ever done.
+ //
+ // The subsequent code will handle align these access to common boundaries and
+ // guaranteeing that we do not access past the end of the structure.
+ if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
+ if (AccessWidth < Types.getTarget().getRegisterWidth())
+ AccessWidth = Types.getTarget().getRegisterWidth();
+ }
+
// Round down from the field offset to find the first access position that is
// at an aligned offset of the initial access type.
uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
@@ -427,7 +440,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
CharUnits fieldOffsetInBytes
= Types.getContext().toCharUnitsFromBits(fieldOffset);
- const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
+ llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
CharUnits typeAlignment = getTypeAlignment(Ty);
// If the type alignment is larger then the struct alignment, we must use
@@ -475,7 +488,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
return true;
}
-const llvm::Type *
+llvm::Type *
CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
const ASTRecordLayout &Layout) {
if (Field->isBitField()) {
@@ -486,7 +499,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
if (FieldSize == 0)
return 0;
- const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
llvm::RoundUpToAlignment(FieldSize,
Types.getContext().Target.getCharAlign()));
@@ -502,7 +515,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
// This is a regular union field.
Fields[Field] = 0;
- return Types.ConvertTypeForMemRecursive(Field->getType());
+ return Types.ConvertTypeForMem(Field->getType());
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
@@ -510,7 +523,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
- const llvm::Type *unionType = 0;
+ llvm::Type *unionType = 0;
CharUnits unionSize = CharUnits::Zero();
CharUnits unionAlign = CharUnits::Zero();
@@ -521,7 +534,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
assert(layout.getFieldOffset(fieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
- const llvm::Type *fieldType = LayoutUnionField(*field, layout);
+ llvm::Type *fieldType = LayoutUnionField(*field, layout);
if (!fieldType)
continue;
@@ -586,10 +599,8 @@ void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
// approximation, which is to use the base subobject type if it
// has the same LLVM storage size as the nvsize.
- const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
AppendField(baseOffset, subobjectType);
-
- Types.addBaseSubobjectTypeName(base, baseLayout);
}
void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
@@ -723,13 +734,14 @@ CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
FieldTypes.push_back(getByteArrayType(NumBytes));
}
- BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(),
- FieldTypes, Packed);
+
+ BaseSubobjectType = llvm::StructType::createNamed(Types.getLLVMContext(), "",
+ FieldTypes, Packed);
+ Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
- if (needsPadding) {
- // Pull the padding back off.
+ // Pull the padding back off.
+ if (needsPadding)
FieldTypes.pop_back();
- }
return true;
}
@@ -806,7 +818,7 @@ void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
}
void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
- const llvm::Type *fieldType) {
+ llvm::Type *fieldType) {
CharUnits fieldSize =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
@@ -852,10 +864,10 @@ bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
return true;
}
-const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
+llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
- const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
if (numBytes > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
@@ -911,17 +923,16 @@ void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
}
}
-CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty) {
CGRecordLayoutBuilder Builder(*this);
Builder.Layout(D);
- const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
- Builder.FieldTypes,
- Builder.Packed);
+ Ty->setBody(Builder.FieldTypes, Builder.Packed);
// If we're in C++, compute the base subobject type.
- const llvm::StructType *BaseTy = 0;
+ llvm::StructType *BaseTy = 0;
if (isa<CXXRecordDecl>(D)) {
BaseTy = Builder.BaseSubobjectType;
if (!BaseTy) BaseTy = Ty;
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index a982621be79a..07bddb797265 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -151,6 +151,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::ObjCForCollectionStmtClass:
EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
break;
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
+ break;
case Stmt::CXXTryStmtClass:
EmitCXXTryStmt(cast<CXXTryStmt>(*S));
@@ -764,7 +767,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, false, true));
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(), true));
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -1275,11 +1278,16 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return Constraint;
llvm::StringRef Register = Attr->getLabel();
assert(Target.isValidGCCRegisterName(Register));
- // FIXME: We should check which registers are compatible with "r" or "x".
- if (Constraint != "r" && Constraint != "x") {
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo Info(Constraint, "");
+ if (Target.validateOutputConstraint(Info) &&
+ !Info.allowsRegister()) {
CGM.ErrorUnsupported(&Stmt, "__asm__");
return Constraint;
}
+ // Canonicalize the register here before returning it.
+ Register = Target.getNormalizedGCCRegisterName(Register);
return "{" + Register.str() + "}";
}
@@ -1291,7 +1299,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
llvm::Value *Arg;
if (Info.allowsRegister() || !Info.allowsMemory()) {
if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
- Arg = EmitLoadOfLValue(InputValue, InputType).getScalarVal();
+ Arg = EmitLoadOfLValue(InputValue).getScalarVal();
} else {
const llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
@@ -1400,15 +1408,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<LValue> ResultRegDests;
std::vector<QualType> ResultRegQualTys;
- std::vector<const llvm::Type *> ResultRegTypes;
- std::vector<const llvm::Type *> ResultTruncRegTypes;
- std::vector<const llvm::Type*> ArgTypes;
+ std::vector<llvm::Type *> ResultRegTypes;
+ std::vector<llvm::Type *> ResultTruncRegTypes;
+ std::vector<llvm::Type*> ArgTypes;
std::vector<llvm::Value*> Args;
// Keep track of inout constraints.
std::string InOutConstraints;
std::vector<llvm::Value*> InOutArgs;
- std::vector<const llvm::Type*> InOutArgTypes;
+ std::vector<llvm::Type*> InOutArgTypes;
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
@@ -1457,7 +1465,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegTypes.back() = ConvertType(InputTy);
}
}
- if (const llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
ResultRegTypes.back()))
ResultRegTypes.back() = AdjTy;
@@ -1550,6 +1558,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
llvm::StringRef Clobber = S.getClobber(i)->getString();
+ if (Clobber != "memory" && Clobber != "cc")
Clobber = Target.getNormalizedGCCRegisterName(Clobber);
if (i != 0 || NumConstraints != 0)
@@ -1582,7 +1591,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, AsmString, Constraints,
S.isVolatile() || S.getNumOutputs() == 0);
- llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
Result->addAttribute(~0, llvm::Attribute::NoUnwind);
// Slap the source location of the inline asm into a !srcloc metadata on the
@@ -1629,7 +1638,6 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
- EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
- ResultRegQualTys[i]);
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
}
}
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
index 3b4c50910bf8..0387daeb654f 100644
--- a/lib/CodeGen/CGTemporaries.cpp
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -16,9 +16,12 @@ using namespace clang;
using namespace CodeGen;
namespace {
- struct DestroyTemporary {
- static void Emit(CodeGenFunction &CGF, bool forEH,
- const CXXDestructorDecl *dtor, llvm::Value *addr) {
+ struct DestroyTemporary : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *dtor;
+ llvm::Value *addr;
+ DestroyTemporary(const CXXDestructorDecl *dtor, llvm::Value *addr)
+ : dtor(dtor), addr(addr) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*ForVirtualBase=*/false,
addr);
}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index aefc41e50002..cec02cdfc235 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -390,8 +390,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
llvm::Constant *Init =
- llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents().data(),
- Builder.getVTTComponents().size());
+ llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents());
VTT->setInitializer(Init);
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 9ac5e67ada4d..c161b79fd3a0 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -2937,7 +2937,8 @@ void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
// We can't emit thunks for member functions with incomplete types.
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- if (CGM.getTypes().VerifyFuncTypeComplete(MD->getType().getTypePtr()))
+ if (!CGM.getTypes().isFuncTypeConvertible(
+ cast<FunctionType>(MD->getType().getTypePtr())))
return;
EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
@@ -3165,7 +3166,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
}
llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
- return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
+ return llvm::ConstantArray::get(ArrayType, Inits);
}
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index e830e9a6fbcb..eff6e56c1f80 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -18,7 +18,7 @@
#include "llvm/GlobalVariable.h"
#include "clang/Basic/ABI.h"
#include "clang/AST/CharUnits.h"
-#include "GlobalDecl.h"
+#include "clang/AST/GlobalDecl.h"
namespace clang {
class CXXRecordDecl;
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 7f77d552032a..4d0b8410e451 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -101,8 +101,6 @@ public:
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
/// bitrange.
class LValue {
- // FIXME: alignment?
-
enum {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
@@ -128,6 +126,8 @@ class LValue {
const ObjCPropertyRefExpr *PropertyRefExpr;
};
+ QualType Type;
+
// 'const' is unused here
Qualifiers Quals;
@@ -156,8 +156,9 @@ class LValue {
llvm::MDNode *TBAAInfo;
private:
- void Initialize(Qualifiers Quals, unsigned Alignment = 0,
+ void Initialize(QualType Type, Qualifiers Quals, unsigned Alignment = 0,
llvm::MDNode *TBAAInfo = 0) {
+ this->Type = Type;
this->Quals = Quals;
this->Alignment = Alignment;
assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
@@ -182,6 +183,12 @@ public:
return Quals.getCVRQualifiers() & ~Qualifiers::Const;
}
+ QualType getType() const { return Type; }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
bool isObjCIvar() const { return Ivar; }
void setObjCIvar(bool Value) { Ivar = Value; }
@@ -203,6 +210,10 @@ public:
bool isObjCStrong() const {
return Quals.getObjCGCAttr() == Qualifiers::Strong;
}
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
@@ -219,6 +230,10 @@ public:
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
+ void setAddress(llvm::Value *address) {
+ assert(isSimple());
+ V = address;
+ }
// vector elt lvalue
llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
@@ -251,36 +266,36 @@ public:
return PropertyRefExpr;
}
- static LValue MakeAddr(llvm::Value *V, QualType T, unsigned Alignment,
- ASTContext &Context,
+ static LValue MakeAddr(llvm::Value *address, QualType type,
+ unsigned alignment, ASTContext &Context,
llvm::MDNode *TBAAInfo = 0) {
- Qualifiers Quals = T.getQualifiers();
- Quals.setObjCGCAttr(Context.getObjCGCAttrKind(T));
+ Qualifiers qs = type.getQualifiers();
+ qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
LValue R;
R.LVType = Simple;
- R.V = V;
- R.Initialize(Quals, Alignment, TBAAInfo);
+ R.V = address;
+ R.Initialize(type, qs, alignment, TBAAInfo);
return R;
}
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
- unsigned CVR) {
+ QualType type) {
LValue R;
R.LVType = VectorElt;
R.V = Vec;
R.VectorIdx = Idx;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(type, type.getQualifiers());
return R;
}
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
- unsigned CVR) {
+ QualType type) {
LValue R;
R.LVType = ExtVectorElt;
R.V = Vec;
R.VectorElts = Elts;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(type, type.getQualifiers());
return R;
}
@@ -290,13 +305,14 @@ public:
/// bit-field.
/// \param Info - The information describing how to perform the bit-field
/// access.
- static LValue MakeBitfield(llvm::Value *BaseValue, const CGBitFieldInfo &Info,
- unsigned CVR) {
+ static LValue MakeBitfield(llvm::Value *BaseValue,
+ const CGBitFieldInfo &Info,
+ QualType type) {
LValue R;
R.LVType = BitField;
R.V = BaseValue;
R.BitFieldInfo = &Info;
- R.Initialize(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(type, type.getQualifiers());
return R;
}
@@ -309,7 +325,7 @@ public:
R.LVType = PropertyRef;
R.V = Base;
R.PropertyRefExpr = E;
- R.Initialize(Qualifiers());
+ R.Initialize(QualType(), Qualifiers());
return R;
}
};
@@ -318,9 +334,11 @@ public:
class AggValueSlot {
/// The address.
llvm::Value *Addr;
+
+ // Qualifiers
+ Qualifiers Quals;
// Associated flags.
- bool VolatileFlag : 1;
bool LifetimeFlag : 1;
bool RequiresGCollection : 1;
@@ -335,25 +353,31 @@ public:
static AggValueSlot ignored() {
AggValueSlot AV;
AV.Addr = 0;
- AV.VolatileFlag = AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
+ AV.Quals = Qualifiers();
+ AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
return AV;
}
/// forAddr - Make a slot for an aggregate value.
///
/// \param Volatile - true if the slot should be volatile-initialized
+ ///
+ /// \param Qualifiers - The qualifiers that dictate how the slot
+ /// should be initialied. Only 'volatile' and the Objective-C
+ /// lifetime qualifiers matter.
+ ///
/// \param LifetimeExternallyManaged - true if the slot's lifetime
/// is being externally managed; false if a destructor should be
/// registered for any temporaries evaluated into the slot
/// \param RequiresGCollection - true if the slot is located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *Addr, bool Volatile,
+ static AggValueSlot forAddr(llvm::Value *Addr, Qualifiers Quals,
bool LifetimeExternallyManaged,
bool RequiresGCollection = false,
bool IsZeroed = false) {
AggValueSlot AV;
AV.Addr = Addr;
- AV.VolatileFlag = Volatile;
+ AV.Quals = Quals;
AV.LifetimeFlag = LifetimeExternallyManaged;
AV.RequiresGCollection = RequiresGCollection;
AV.IsZeroed = IsZeroed;
@@ -361,9 +385,10 @@ public:
}
static AggValueSlot forLValue(LValue LV, bool LifetimeExternallyManaged,
- bool RequiresGCollection = false) {
- return forAddr(LV.getAddress(), LV.isVolatileQualified(),
- LifetimeExternallyManaged, RequiresGCollection);
+ bool RequiresGCollection = false,
+ bool IsZeroed = false) {
+ return forAddr(LV.getAddress(), LV.getQuals(),
+ LifetimeExternallyManaged, RequiresGCollection, IsZeroed);
}
bool isLifetimeExternallyManaged() const {
@@ -373,8 +398,14 @@ public:
LifetimeFlag = Managed;
}
+ Qualifiers getQualifiers() const { return Quals; }
+
bool isVolatile() const {
- return VolatileFlag;
+ return Quals.hasVolatile();
+ }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
}
bool requiresGCollection() const {
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 62fa1f9843de..263e01e4f18a 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -34,6 +34,7 @@ namespace clang {
BackendAction Action;
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
llvm::raw_ostream *AsmOutStream;
ASTContext *Context;
@@ -46,13 +47,16 @@ namespace clang {
public:
BackendConsumer(BackendAction action, Diagnostic &_Diags,
const CodeGenOptions &compopts,
- const TargetOptions &targetopts, bool TimePasses,
+ const TargetOptions &targetopts,
+ const LangOptions &langopts,
+ bool TimePasses,
const std::string &infile, llvm::raw_ostream *OS,
LLVMContext &C) :
Diags(_Diags),
Action(action),
CodeGenOpts(compopts),
TargetOpts(targetopts),
+ LangOpts(langopts),
AsmOutStream(OS),
LLVMIRGeneration("LLVM IR Generation Time"),
Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
@@ -126,7 +130,7 @@ namespace clang {
void *OldContext = Ctx.getInlineAsmDiagnosticContext();
Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
- EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
TheModule.get(), Action, AsmOutStream);
Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
@@ -285,6 +289,7 @@ ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
BEConsumer =
new BackendConsumer(BA, CI.getDiagnostics(),
CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(),
CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
*VMContext);
return BEConsumer;
@@ -332,7 +337,8 @@ void CodeGenAction::ExecuteAction() {
}
EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), TheModule.get(),
+ CI.getTargetOpts(), CI.getLangOpts(),
+ TheModule.get(),
BA, OS);
return;
}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 150cb69b4d1a..702897a7c448 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -31,7 +31,7 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CodeGenTypeCache(cgm), CGM(cgm),
Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
- BlockInfo(0), BlockPointer(0),
+ AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
ExceptionSlot(0), EHSelectorSlot(0),
DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
@@ -45,11 +45,11 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
}
-const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
}
-const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+llvm::Type *CodeGenFunction::ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
@@ -142,6 +142,13 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ if (EHStack.stable_begin() != PrologueCleanupDepth)
+ PopCleanupBlocks(PrologueCleanupDepth);
+
// Emit function epilog (to return).
EmitReturnBlock();
@@ -206,15 +213,15 @@ bool CodeGenFunction::ShouldInstrumentFunction() {
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
- const llvm::PointerType *PointerTy = Int8PtrTy;
- const llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
+ llvm::PointerType *PointerTy = Int8PtrTy;
+ llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
const llvm::FunctionType *FunctionTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
llvm::ConstantInt::get(Int32Ty, 0),
"callsite");
@@ -311,9 +318,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
ReturnValue = CurFn->arg_begin();
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
+
+ // Tell the epilog emitter to autorelease the result. We do this
+ // now so that various specialized functions can suppress it
+ // during their IR-generation.
+ if (getLangOptions().ObjCAutoRefCount &&
+ !CurFnInfo->isReturnsRetained() &&
+ RetTy->isObjCRetainableType())
+ AutoreleaseResult = true;
}
EmitStartEHSpec(CurCodeDecl);
+
+ PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
@@ -326,7 +343,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
QualType Ty = (*i)->getType();
if (Ty->isVariablyModifiedType())
- EmitVLASize(Ty);
+ EmitVariablyModifiedType(Ty);
}
}
@@ -692,13 +709,20 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
- SizeVal = GetVLASize(vlaType);
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = getVLASize(vlaType);
+
+ SizeVal = numElts;
+ CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
vla = vlaType;
} else {
return;
}
} else {
- SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
+ SizeVal = CGM.getSize(Size);
vla = 0;
}
@@ -761,60 +785,198 @@ llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
return IndirectBranch->getParent();
}
-llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
- llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
+ QualType &baseType,
+ llvm::Value *&addr) {
+ const ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ llvm::Value *numVLAElements = 0;
+ if (isa<VariableArrayType>(arrayType)) {
+ numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
+
+ // Walk into all VLAs. This doesn't require changes to addr,
+ // which has type T* where T is the first non-VLA element type.
+ do {
+ QualType elementType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(elementType);
+
+ // If we only have VLA components, 'addr' requires no adjustment.
+ if (!arrayType) {
+ baseType = elementType;
+ return numVLAElements;
+ }
+ } while (isa<VariableArrayType>(arrayType));
- assert(SizeEntry && "Did not emit size for type");
- return SizeEntry;
-}
+ // We get out here only if we find a constant array type
+ // inside the VLA.
+ }
-llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
- assert(Ty->isVariablyModifiedType() &&
- "Must pass variably modified type to EmitVLASizes!");
+ // We have some number of constant-length arrays, so addr should
+ // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
+ // down to the first element of addr.
+ llvm::SmallVector<llvm::Value*, 8> gepIndices;
- EnsureInsertPoint();
+ // GEP down to the array type.
+ llvm::ConstantInt *zero = Builder.getInt32(0);
+ gepIndices.push_back(zero);
+
+ // It's more efficient to calculate the count from the LLVM
+ // constant-length arrays than to re-evaluate the array bounds.
+ uint64_t countFromCLAs = 1;
+
+ const llvm::ArrayType *llvmArrayType =
+ cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+ while (true) {
+ assert(isa<ConstantArrayType>(arrayType));
+ assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
+ == llvmArrayType->getNumElements());
+
+ gepIndices.push_back(zero);
+ countFromCLAs *= llvmArrayType->getNumElements();
+
+ llvmArrayType =
+ dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
+ if (!llvmArrayType) break;
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert(arrayType && "LLVM and Clang types are out-of-synch");
+ }
+
+ baseType = arrayType->getElementType();
+
+ // Create the actual GEP.
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices.begin(),
+ gepIndices.end(), "array.begin");
- if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
- // unknown size indication requires no size computation.
- if (!VAT->getSizeExpr())
- return 0;
- llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+ llvm::Value *numElements
+ = llvm::ConstantInt::get(SizeTy, countFromCLAs);
- if (!SizeEntry) {
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ // If we had any VLA dimensions, factor them in.
+ if (numVLAElements)
+ numElements = Builder.CreateNUWMul(numVLAElements, numElements);
- // Get the element size;
- QualType ElemTy = VAT->getElementType();
- llvm::Value *ElemSize;
- if (ElemTy->isVariableArrayType())
- ElemSize = EmitVLASize(ElemTy);
- else
- ElemSize = llvm::ConstantInt::get(SizeTy,
- getContext().getTypeSizeInChars(ElemTy).getQuantity());
+ return numElements;
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(QualType type) {
+ const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLASize(vla);
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(const VariableArrayType *type) {
+ // The number of elements so far; always size_t.
+ llvm::Value *numElements = 0;
- llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
- NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+ QualType elementType;
+ do {
+ elementType = type->getElementType();
+ llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
+ assert(vlaSize && "no size for VLA!");
+ assert(vlaSize->getType() == SizeTy);
- SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+ if (!numElements) {
+ numElements = vlaSize;
+ } else {
+ // It's undefined behavior if this wraps around, so mark it that way.
+ numElements = Builder.CreateNUWMul(numElements, vlaSize);
}
+ } while ((type = getContext().getAsVariableArrayType(elementType)));
- return SizeEntry;
- }
+ return std::pair<llvm::Value*,QualType>(numElements, elementType);
+}
- if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- EmitVLASize(AT->getElementType());
- return 0;
- }
+void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
- if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
- EmitVLASize(PT->getInnerType());
- return 0;
- }
+ EnsureInsertPoint();
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ type = type.getCanonicalType();
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("unexpected dependent or non-canonical type!");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Pointer:
+ type = cast<PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ // Losing element qualification here is fine.
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (const Expr *size = vat->getSizeExpr()) {
+ // It's possible that we might have emitted this already,
+ // e.g. with a typedef and a pointer to it.
+ llvm::Value *&entry = VLASizeMap[size];
+ if (!entry) {
+ // Always zexting here would be wrong if it weren't
+ // undefined behavior to have a negative bound.
+ entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
+ /*signed*/ false);
+ }
+ }
+ type = vat->getElementType();
+ break;
+ }
- const PointerType *PT = Ty->getAs<PointerType>();
- assert(PT && "unknown VM type!");
- EmitVLASize(PT->getPointeeType());
- return 0;
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<FunctionType>(ty)->getResultType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index bb8fd8e24306..f27ed947b8d8 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -18,8 +18,10 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/CharUnits.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
@@ -63,6 +65,7 @@ namespace clang {
class ObjCAtTryStmt;
class ObjCAtThrowStmt;
class ObjCAtSynchronizedStmt;
+ class ObjCAutoreleasePoolStmt;
namespace CodeGen {
class CodeGenTypes;
@@ -179,15 +182,39 @@ public:
/// Cleanup implementations should generally be declared in an
/// anonymous namespace.
class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
public:
- // Anchor the construction vtable. We use the destructor because
- // gcc gives an obnoxious warning if there are virtual methods
- // with an accessible non-virtual destructor. Unfortunately,
- // declaring this destructor makes it non-trivial, but there
- // doesn't seem to be any other way around this warning.
- //
- // This destructor will never be called.
- virtual ~Cleanup();
+ /// Generation flags.
+ class Flags {
+ enum {
+ F_IsForEH = 0x1,
+ F_IsNormalCleanupKind = 0x2,
+ F_IsEHCleanupKind = 0x4
+ };
+ unsigned flags;
+
+ public:
+ Flags() : flags(0) {}
+
+ /// isForEH - true if the current emission is for an EH cleanup.
+ bool isForEHCleanup() const { return flags & F_IsForEH; }
+ bool isForNormalCleanup() const { return !isForEHCleanup(); }
+ void setIsForEHCleanup() { flags |= F_IsForEH; }
+
+ bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
+ void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
+
+ /// isEHCleanupKind - true if the cleanup was pushed as an EH
+ /// cleanup.
+ bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
+ void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
+ };
+
+ // Provide a virtual destructor to suppress a very common warning
+ // that unfortunately cannot be suppressed without this. Cleanups
+ // should not rely on this destructor ever being called.
+ virtual ~Cleanup() {}
/// Emit the cleanup. For normal cleanups, this is run in the
/// same EH context as when the cleanup was pushed, i.e. the
@@ -196,29 +223,7 @@ public:
///
// \param IsForEHCleanup true if this is for an EH cleanup, false
/// if for a normal cleanup.
- virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
- };
-
- /// UnconditionalCleanupN stores its N parameters and just passes
- /// them to the real cleanup function.
- template <class T, class A0>
- class UnconditionalCleanup1 : public Cleanup {
- A0 a0;
- public:
- UnconditionalCleanup1(A0 a0) : a0(a0) {}
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
- T::Emit(CGF, IsForEHCleanup, a0);
- }
- };
-
- template <class T, class A0, class A1>
- class UnconditionalCleanup2 : public Cleanup {
- A0 a0; A1 a1;
- public:
- UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {}
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
- T::Emit(CGF, IsForEHCleanup, a0, a1);
- }
+ virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
};
/// ConditionalCleanupN stores the saved form of its N parameters,
@@ -228,9 +233,9 @@ public:
typedef typename DominatingValue<A0>::saved_type A0_saved;
A0_saved a0_saved;
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
- T::Emit(CGF, IsForEHCleanup, a0);
+ T(a0).Emit(CGF, flags);
}
public:
@@ -245,10 +250,10 @@ public:
A0_saved a0_saved;
A1_saved a1_saved;
- void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
- T::Emit(CGF, IsForEHCleanup, a0, a1);
+ T(a0, a1).Emit(CGF, flags);
}
public:
@@ -256,6 +261,51 @@ public:
: a0_saved(a0), a1_saved(a1) {}
};
+ template <class T, class A0, class A1, class A2>
+ class ConditionalCleanup3 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ T(a0, a1, a2).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
+ };
+
+ template <class T, class A0, class A1, class A2, class A3>
+ class ConditionalCleanup4 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ typedef typename DominatingValue<A3>::saved_type A3_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+ A3_saved a3_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
+ T(a0, a1, a2, a3).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
+ };
+
private:
// The implementation for this class is in CGException.h and
// CGException.cpp; the definition is here because it's used as a
@@ -568,6 +618,10 @@ public:
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
+ /// PrologueCleanupDepth - The cleanup depth enclosing all the
+ /// cleanups associated with the parameters.
+ EHScopeStack::stable_iterator PrologueCleanupDepth;
+
/// ReturnBlock - Unified return block.
JumpDest ReturnBlock;
@@ -584,6 +638,9 @@ public:
bool CatchUndefined;
+ /// In ARC, whether we should autorelease the return value.
+ bool AutoreleaseResult;
+
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
@@ -626,16 +683,28 @@ public:
/// rethrows.
llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- // A struct holding information about a finally block's IR
- // generation. For now, doesn't actually hold anything.
- struct FinallyInfo {
- };
+ /// A class controlling the emission of a finally block.
+ class FinallyInfo {
+ /// Where the catchall's edge through the cleanup should go.
+ JumpDest RethrowDest;
+
+ /// A function to call to enter the catch.
+ llvm::Constant *BeginCatchFn;
- FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
- llvm::Constant *BeginCatchFn,
- llvm::Constant *EndCatchFn,
- llvm::Constant *RethrowFn);
- void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+ /// An i1 variable indicating whether or not the @finally is
+ /// running for an exception.
+ llvm::AllocaInst *ForEHVar;
+
+ /// An i8* variable into which the exception pointer to rethrow
+ /// has been saved.
+ llvm::AllocaInst *SavedExnVar;
+
+ public:
+ void enter(CodeGenFunction &CGF, const Stmt *Finally,
+ llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn);
+ void exit(CodeGenFunction &CGF);
+ };
/// pushFullExprCleanup - Push a cleanup to be run at the end of the
/// current full-expression. Safe against the possibility that
@@ -644,10 +713,8 @@ public:
void pushFullExprCleanup(CleanupKind kind, A0 a0) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
- if (!isInConditionalBranch()) {
- typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
- return EHStack.pushCleanup<CleanupType>(kind, a0);
- }
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0);
typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
@@ -663,10 +730,8 @@ public:
void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
- if (!isInConditionalBranch()) {
- typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
- return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
- }
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0, a1);
typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
@@ -676,6 +741,48 @@ public:
initFullExprCleanup();
}
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+
+ typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+ typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
+
+ typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
+ a2_saved, a3_saved);
+ initFullExprCleanup();
+ }
+
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
@@ -1048,6 +1155,9 @@ public:
void disableDebugInfo() { DisableDebugInfo = true; }
void enableDebugInfo() { DisableDebugInfo = false; }
+ bool shouldUseFusedARCCalls() {
+ return CGM.getCodeGenOpts().OptimizationLevel == 0;
+ }
const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
@@ -1075,6 +1185,57 @@ public:
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
//===--------------------------------------------------------------------===//
+ // Cleanups
+ //===--------------------------------------------------------------------===//
+
+ typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
+
+ void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer &destroyer);
+ void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer &destroyer);
+
+ void pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type);
+ void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
+ Destroyer &destroyer, bool useEHCleanupForArray);
+ void emitDestroy(llvm::Value *addr, QualType type, Destroyer &destroyer,
+ bool useEHCleanupForArray);
+ llvm::Function *generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer &destroyer,
+ bool useEHCleanupForArray);
+ void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
+ QualType type, Destroyer &destroyer,
+ bool checkZeroLength, bool useEHCleanup);
+
+ Destroyer &getDestroyer(QualType::DestructionKind destructionKind);
+
+ /// Determines whether an EH cleanup is required to destroy a type
+ /// with the given destruction kind.
+ bool needsEHCleanup(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ return false;
+ case QualType::DK_cxx_destructor:
+ case QualType::DK_objc_weak_lifetime:
+ return getLangOptions().Exceptions;
+ case QualType::DK_objc_strong_lifetime:
+ return getLangOptions().Exceptions &&
+ CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
+ }
+ llvm_unreachable("bad destruction kind");
+ }
+
+ CleanupKind getCleanupKind(QualType::DestructionKind kind) {
+ return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
+ }
+
+ //===--------------------------------------------------------------------===//
// Objective-C
//===--------------------------------------------------------------------===//
@@ -1236,9 +1397,9 @@ public:
/// a terminate scope encloses a try.
llvm::BasicBlock *getTerminateHandler();
- const llvm::Type *ConvertTypeForMem(QualType T);
- const llvm::Type *ConvertType(QualType T);
- const llvm::Type *ConvertType(const TypeDecl *T) {
+ llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertType(QualType T);
+ llvm::Type *ConvertType(const TypeDecl *T) {
return ConvertType(getContext().getTypeDeclType(T));
}
@@ -1345,7 +1506,8 @@ public:
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
- return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), T.getQualifiers(),
+ false);
}
/// Emit a cast to void* in the appropriate address space.
@@ -1379,14 +1541,12 @@ public:
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
- bool IsLocationVolatile,
- bool IsInitializer);
+ Qualifiers Quals, bool IsInitializer);
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
- void EmitExprAsInit(const Expr *init, const VarDecl *var,
- llvm::Value *loc, CharUnits alignment,
- bool capturedByInit);
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
/// EmitAggregateCopy - Emit an aggrate copy.
///
@@ -1451,16 +1611,24 @@ public:
// instruction in LLVM instead once it works well enough.
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
- /// EmitVLASize - Generate code for any VLA size expressions that might occur
- /// in a variably modified type. If Ty is a VLA, will return the value that
- /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+ /// emitArrayLength - Compute the length of an array, even if it's a
+ /// VLA, and drill down to the base element type.
+ llvm::Value *emitArrayLength(const ArrayType *arrayType,
+ QualType &baseType,
+ llvm::Value *&addr);
+
+ /// EmitVLASize - Capture all the sizes for the VLA expressions in
+ /// the given variably-modified type and store them in the VLASizeMap.
///
/// This function can be called with a null (unreachable) insert point.
- llvm::Value *EmitVLASize(QualType Ty);
+ void EmitVariablyModifiedType(QualType Ty);
- // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
- // of a variable length array type.
- llvm::Value *GetVLASize(const VariableArrayType *);
+ /// getVLASize - Returns an LLVM value that corresponds to the size,
+ /// in non-variably-sized elements, of a variable length array type,
+ /// plus that largest non-variably-sized element type. Assumes that
+ /// the type has already been emitted with EmitVariablyModifiedType.
+ std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
+ std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
@@ -1535,17 +1703,7 @@ public:
CallExpr::const_arg_iterator ArgEnd,
bool ZeroInitialization = false);
- void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This);
-
- void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
- llvm::Value *NumElements,
- llvm::Value *This);
-
- llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This);
+ static Destroyer destroyCXXObject;
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
@@ -1584,6 +1742,10 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitVarDecl(const VarDecl &D);
+ void EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
+ void EmitScalarInit(llvm::Value *init, LValue lvalue);
+
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
@@ -1639,6 +1801,8 @@ public:
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
void EmitAutoVarInit(const AutoVarEmission &emission);
void EmitAutoVarCleanups(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind);
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
@@ -1709,6 +1873,7 @@ public:
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
llvm::Constant *getUnwindResumeFn();
llvm::Constant *getUnwindResumeOrRethrowFn();
@@ -1775,6 +1940,12 @@ public:
unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo = 0);
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ llvm::Value *EmitLoadOfScalar(LValue lvalue);
+
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
@@ -1782,21 +1953,26 @@ public:
bool Volatile, unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo = 0);
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue);
+
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
/// rvalue, returning the rvalue.
- RValue EmitLoadOfLValue(LValue V, QualType LVType);
- RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
- RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfLValue(LValue V);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V);
+ RValue EmitLoadOfBitfieldLValue(LValue LV);
RValue EmitLoadOfPropertyRefLValue(LValue LV,
ReturnValueSlot Return = ReturnValueSlot());
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
- void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
- void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
- QualType Ty);
+ void EmitStoreThroughLValue(RValue Src, LValue Dst);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
@@ -1805,7 +1981,7 @@ public:
/// \param Result [out] - If non-null, this will be set to a Value* for the
/// bit-field contents after the store, appropriate for use as the result of
/// an assignment to the bit-field.
- void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result=0);
/// Emit an l-value for an assignment (simple or compound) of complex type.
@@ -1832,6 +2008,7 @@ public:
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
+ LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
@@ -1897,8 +2074,9 @@ public:
ReturnValueSlot ReturnValue = ReturnValueSlot());
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
- llvm::Value * const *ArgBegin,
- llvm::Value * const *ArgEnd,
+ llvm::ArrayRef<llvm::Value *> Args,
+ const llvm::Twine &Name = "");
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
const llvm::Twine &Name = "");
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
@@ -1961,6 +2139,59 @@ public:
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
+ /// Retrieves the default cleanup kind for an ARC cleanup.
+ /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
+ CleanupKind getARCCleanupKind() {
+ return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
+ ? NormalAndEHCleanup : NormalCleanup;
+ }
+
+ // ARC primitives.
+ void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
+ void EmitARCDestroyWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
+ llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
+ bool ignored);
+ void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value);
+ void EmitARCRelease(llvm::Value *value, bool precise);
+ llvm::Value *EmitARCAutorelease(llvm::Value *value);
+ llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
+
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreAutoreleasing(const BinaryOperator *e);
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+
+ llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+
+ llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
+ llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+
+ static Destroyer destroyARCStrongImprecise;
+ static Destroyer destroyARCStrongPrecise;
+ static Destroyer destroyARCWeak;
+
+ void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
+ llvm::Value *EmitObjCAutoreleasePoolPush();
+ llvm::Value *EmitObjCMRRAutoreleasePoolPush();
+ void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
+ void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
+
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
RValue EmitReferenceBindingToExpr(const Expr* E,
@@ -2002,6 +2233,10 @@ public:
void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType Ty);
+ /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+ /// make sure it survives garbage collection until this point.
+ void EmitExtendGCLifetime(llvm::Value *object);
+
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, returning the result.
ComplexPairTy EmitComplexExpr(const Expr *E,
@@ -2135,7 +2370,8 @@ private:
/// Ty, into individual arguments on the provided vector \arg Args. See
/// ABIArgInfo::Expand.
void ExpandTypeToArgs(QualType Ty, RValue Src,
- llvm::SmallVector<llvm::Value*, 16> &Args);
+ llvm::SmallVector<llvm::Value*, 16> &Args,
+ llvm::FunctionType *IRFuncTy);
llvm::Value* EmitAsmInput(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 7a1a968259f3..0668039a892c 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -62,9 +62,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
: Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
- Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
+ Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI, CGO),
TBAA(0),
- VTables(*this), Runtime(0), DebugInfo(0),
+ VTables(*this), Runtime(0), DebugInfo(0), ARCData(0), RRData(0),
CFConstantStringClassRef(0), ConstantStringClassRef(0),
VMContext(M.getContext()),
NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
@@ -88,6 +88,10 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Block.GlobalUniqueCount = 0;
+ if (C.getLangOptions().ObjCAutoRefCount)
+ ARCData = new ARCEntrypoints();
+ RRData = new RREntrypoints();
+
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
VoidTy = llvm::Type::getVoidTy(LLVMContext);
@@ -108,6 +112,8 @@ CodeGenModule::~CodeGenModule() {
delete &ABI;
delete TBAA;
delete DebugInfo;
+ delete ARCData;
+ delete RRData;
}
void CodeGenModule::createObjCRuntime() {
@@ -190,6 +196,10 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
+llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
+ return llvm::ConstantInt::get(SizeTy, size.getQuantity());
+}
+
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
const NamedDecl *D) const {
// Internal definitions always have default visibility.
@@ -347,8 +357,8 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
- llvm::StructType* CtorStructTy =
- llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
+ llvm::StructType *CtorStructTy =
+ llvm::StructType::get(llvm::Type::getInt32Ty(VMContext),
llvm::PointerType::getUnqual(CtorFTy), NULL);
// Construct the constructor and destructor arrays.
@@ -670,7 +680,7 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
llvm::ConstantExpr::getBitCast(unitGV, SBP),
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo)
};
- return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
+ return llvm::ConstantStruct::getAnon(Fields);
}
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
@@ -830,7 +840,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
- GlobalDecl D, bool ForVTable) {
+ GlobalDecl D, bool ForVTable,
+ llvm::Attributes ExtraAttrs) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -846,8 +857,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
return Entry;
// Make sure the result is of the correct type.
- const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
- return llvm::ConstantExpr::getBitCast(Entry, PTy);
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
}
// This function doesn't have a complete type (for example, the return
@@ -869,6 +879,8 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
assert(F->getName() == MangledName && "name was uniqued!");
if (D.getDecl())
SetFunctionAttributes(D, F, IsIncompleteFunction);
+ if (ExtraAttrs != llvm::Attribute::None)
+ F->addFnAttr(ExtraAttrs);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
@@ -915,7 +927,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
return F;
}
- const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
return llvm::ConstantExpr::getBitCast(F, PTy);
}
@@ -937,8 +949,10 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
/// type and name.
llvm::Constant *
CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
- llvm::StringRef Name) {
- return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false);
+ llvm::StringRef Name,
+ llvm::Attributes ExtraAttrs) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ ExtraAttrs);
}
static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
@@ -1340,7 +1354,8 @@ CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
D->getAttr<CommonAttr>()) &&
!D->hasExternalStorage() && !D->getInit() &&
- !D->getAttr<SectionAttr>() && !D->isThreadSpecified()) {
+ !D->getAttr<SectionAttr>() && !D->isThreadSpecified() &&
+ !D->getAttr<WeakImportAttr>()) {
// Thread local vars aren't considered common linkage.
return llvm::GlobalVariable::CommonLinkage;
}
@@ -1398,8 +1413,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
- llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
- ArgList.end(), "", CI);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList, "", CI);
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
@@ -1426,7 +1440,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
bool variadic = false;
if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
variadic = fpt->isVariadic();
- const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic, false);
+ const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1597,10 +1611,10 @@ llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
-llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
- unsigned NumTys) {
- return llvm::Intrinsic::getDeclaration(&getModule(),
- (llvm::Intrinsic::ID)IID, Tys, NumTys);
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
+ llvm::ArrayRef<llvm::Type*> Tys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
+ Tys);
}
static llvm::StringMapEntry<llvm::Constant*> &
@@ -1997,6 +2011,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
false, true, false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ D->setHasCXXStructors(true);
}
// If the implementation doesn't have any ivar initializers, we don't need
@@ -2015,6 +2030,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+ D->setHasCXXStructors(true);
}
/// EmitNamespace - Emit all declarations in a namespace.
@@ -2276,7 +2292,7 @@ llvm::Constant *CodeGenModule::getBlockObjectDispose() {
}
// Otherwise construct the function by hand.
- const llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ llvm::Type *args[] = { Int8PtrTy, Int32Ty };
const llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectDispose =
@@ -2295,7 +2311,7 @@ llvm::Constant *CodeGenModule::getBlockObjectAssign() {
}
// Otherwise construct the function by hand.
- const llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
const llvm::FunctionType *fty
= llvm::FunctionType::get(VoidTy, args, false);
return BlockObjectAssign =
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 779a3523b7f6..86fb6d4d030e 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -19,10 +19,10 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Mangle.h"
#include "CGVTables.h"
#include "CodeGenTypes.h"
-#include "GlobalDecl.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
@@ -33,6 +33,7 @@
namespace llvm {
class Module;
class Constant;
+ class ConstantInt;
class Function;
class GlobalValue;
class TargetData;
@@ -98,31 +99,31 @@ namespace CodeGen {
struct CodeGenTypeCache {
/// void
- const llvm::Type *VoidTy;
+ llvm::Type *VoidTy;
/// i8, i32, and i64
- const llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
+ llvm::IntegerType *Int8Ty, *Int32Ty, *Int64Ty;
/// int
- const llvm::IntegerType *IntTy;
+ llvm::IntegerType *IntTy;
/// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
union {
- const llvm::IntegerType *IntPtrTy;
- const llvm::IntegerType *SizeTy;
- const llvm::IntegerType *PtrDiffTy;
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
};
/// void* in address space 0
union {
- const llvm::PointerType *VoidPtrTy;
- const llvm::PointerType *Int8PtrTy;
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
};
/// void** in address space 0
union {
- const llvm::PointerType *VoidPtrPtrTy;
- const llvm::PointerType *Int8PtrPtrTy;
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
};
/// The width of a pointer into the generic address space.
@@ -131,6 +132,71 @@ namespace CodeGen {
/// The alignment of a pointer into the generic address space.
unsigned char PointerAlignInBytes;
};
+
+struct RREntrypoints {
+ RREntrypoints() { memset(this, 0, sizeof(*this)); }
+ /// void objc_autoreleasePoolPop(void*);
+ llvm::Constant *objc_autoreleasePoolPop;
+
+ /// void *objc_autoreleasePoolPush(void);
+ llvm::Constant *objc_autoreleasePoolPush;
+};
+
+struct ARCEntrypoints {
+ ARCEntrypoints() { memset(this, 0, sizeof(*this)); }
+
+ /// id objc_autorelease(id);
+ llvm::Constant *objc_autorelease;
+
+ /// id objc_autoreleaseReturnValue(id);
+ llvm::Constant *objc_autoreleaseReturnValue;
+
+ /// void objc_copyWeak(id *dest, id *src);
+ llvm::Constant *objc_copyWeak;
+
+ /// void objc_destroyWeak(id*);
+ llvm::Constant *objc_destroyWeak;
+
+ /// id objc_initWeak(id*, id);
+ llvm::Constant *objc_initWeak;
+
+ /// id objc_loadWeak(id*);
+ llvm::Constant *objc_loadWeak;
+
+ /// id objc_loadWeakRetained(id*);
+ llvm::Constant *objc_loadWeakRetained;
+
+ /// void objc_moveWeak(id *dest, id *src);
+ llvm::Constant *objc_moveWeak;
+
+ /// id objc_retain(id);
+ llvm::Constant *objc_retain;
+
+ /// id objc_retainAutorelease(id);
+ llvm::Constant *objc_retainAutorelease;
+
+ /// id objc_retainAutoreleaseReturnValue(id);
+ llvm::Constant *objc_retainAutoreleaseReturnValue;
+
+ /// id objc_retainAutoreleasedReturnValue(id);
+ llvm::Constant *objc_retainAutoreleasedReturnValue;
+
+ /// id objc_retainBlock(id);
+ llvm::Constant *objc_retainBlock;
+
+ /// void objc_release(id);
+ llvm::Constant *objc_release;
+
+ /// id objc_storeStrong(id*, id);
+ llvm::Constant *objc_storeStrong;
+
+ /// id objc_storeWeak(id*, id);
+ llvm::Constant *objc_storeWeak;
+
+ /// A void(void) inline asm to use to mark that the return value of
+ /// a call will be immediately retain.
+ llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+};
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
@@ -157,6 +223,8 @@ class CodeGenModule : public CodeGenTypeCache {
CGObjCRuntime* Runtime;
CGDebugInfo* DebugInfo;
+ ARCEntrypoints *ARCData;
+ RREntrypoints *RRData;
// WeakRefReferences - A set of references that have only been seen via
// a weakref so far. This is used to remove the weak of the reference if we ever
@@ -244,8 +312,8 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::Constant *BlockObjectAssign;
llvm::Constant *BlockObjectDispose;
- const llvm::Type *BlockDescriptorType;
- const llvm::Type *GenericBlockLiteralType;
+ llvm::Type *BlockDescriptorType;
+ llvm::Type *GenericBlockLiteralType;
struct {
int GlobalUniqueCount;
@@ -275,6 +343,16 @@ public:
/// getCXXABI() - Return a reference to the configured C++ ABI.
CGCXXABI &getCXXABI() { return ABI; }
+ ARCEntrypoints &getARCEntrypoints() const {
+ assert(getLangOptions().ObjCAutoRefCount && ARCData != 0);
+ return *ARCData;
+ }
+
+ RREntrypoints &getRREntrypoints() const {
+ assert(RRData != 0);
+ return *RRData;
+ }
+
llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
return StaticLocalDeclMap[VD];
}
@@ -305,6 +383,9 @@ public:
static void DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo);
+ /// getSize - Emit the given number of characters as a value of type size_t.
+ llvm::ConstantInt *getSize(CharUnits numChars);
+
/// setGlobalVisibility - Set the visibility for the given LLVM
/// GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
@@ -422,10 +503,10 @@ public:
/// getBlockDescriptorType - Fetches the type of a generic block
/// descriptor.
- const llvm::Type *getBlockDescriptorType();
+ llvm::Type *getBlockDescriptorType();
/// getGenericBlockLiteralType - The type of a generic block literal.
- const llvm::Type *getGenericBlockLiteralType();
+ llvm::Type *getGenericBlockLiteralType();
/// GetAddrOfGlobalBlock - Gets the address of a block which
/// requires no captures.
@@ -474,7 +555,7 @@ public:
/// created).
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
const char *GlobalName=0);
-
+
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
@@ -492,8 +573,8 @@ public:
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
- unsigned NumTys = 0);
+ llvm::Function *getIntrinsic(unsigned IID, llvm::ArrayRef<llvm::Type*> Tys =
+ llvm::ArrayRef<llvm::Type*>());
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
@@ -514,7 +595,9 @@ public:
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
- llvm::StringRef Name);
+ llvm::StringRef Name,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
@@ -642,7 +725,9 @@ private:
llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
const llvm::Type *Ty,
GlobalDecl D,
- bool ForVTable);
+ bool ForVTable,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
const llvm::PointerType *PTy,
const VarDecl *D,
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 8db6fe518684..764688fcb12e 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -28,9 +28,10 @@ using namespace CodeGen;
CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
const llvm::TargetData &TD, const ABIInfo &Info,
- CGCXXABI &CXXABI)
+ CGCXXABI &CXXABI, const CodeGenOptions &CGO)
: Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
- TheABIInfo(Info), TheCXXABI(CXXABI) {
+ TheABIInfo(Info), TheCXXABI(CXXABI), CodeGenOpts(CGO) {
+ SkippedLayout = false;
}
CodeGenTypes::~CodeGenTypes() {
@@ -44,28 +45,8 @@ CodeGenTypes::~CodeGenTypes() {
delete &*I++;
}
-/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
-/// pointers that are referenced but have not been converted yet. This is used
-/// to handle cyclic structures properly.
-void CodeGenTypes::HandleLateResolvedPointers() {
- assert(!PointersToResolve.empty() && "No pointers to resolve!");
-
- // Any pointers that were converted deferred evaluation of their pointee type,
- // creating an opaque type instead. This is in order to avoid problems with
- // circular types. Loop through all these defered pointees, if any, and
- // resolve them now.
- while (!PointersToResolve.empty()) {
- std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
-
- // We can handle bare pointers here because we know that the only pointers
- // to the Opaque type are P.second and from other types. Refining the
- // opqaue type away will invalidate P.second, but we don't mind :).
- const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
- P.second->refineAbstractTypeTo(NT);
- }
-}
-
-void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
+void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
+ llvm::StructType *Ty,
llvm::StringRef suffix) {
llvm::SmallString<256> TypeName;
llvm::raw_svector_ostream OS(TypeName);
@@ -93,47 +74,15 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
if (!suffix.empty())
OS << suffix;
- TheModule.addTypeName(OS.str(), Ty);
-}
-
-/// ConvertType - Convert the specified type to its LLVM form.
-const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
- const llvm::Type *Result = ConvertTypeRecursive(T);
-
- // If this is a top-level call to ConvertType and sub-conversions caused
- // pointers to get lazily built as opaque types, resolve the pointers, which
- // might cause Result to be merged away.
- if (!IsRecursive && !PointersToResolve.empty()) {
- llvm::PATypeHolder ResultHandle = Result;
- HandleLateResolvedPointers();
- Result = ResultHandle;
- }
- return Result;
-}
-
-const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
- T = Context.getCanonicalType(T);
-
- // See if type is already cached.
- llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator
- I = TypeCache.find(T.getTypePtr());
- // If type is found in map and this is not a definition for a opaque
- // place holder type then use it. Otherwise, convert type T.
- if (I != TypeCache.end())
- return I->second.get();
-
- const llvm::Type *ResultType = ConvertNewType(T);
- TypeCache.insert(std::make_pair(T.getTypePtr(),
- llvm::PATypeHolder(ResultType)));
- return ResultType;
+ Ty->setName(OS.str());
}
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
- const llvm::Type *R = ConvertType(T, IsRecursive);
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){
+ llvm::Type *R = ConvertType(T);
// If this is a non-bool type, don't map it.
if (!R->isIntegerTy(1))
@@ -142,69 +91,178 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
// Otherwise, return an integer of the target-specified size.
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
+}
+
+/// isRecordLayoutComplete - Return true if the specified type is already
+/// completely laid out.
+bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
+ llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
+ RecordDeclTypes.find(Ty);
+ return I != RecordDeclTypes.end() && !I->second->isOpaque();
}
-// Code to verify a given function type is complete, i.e. the return type
-// and all of the argument types are complete.
-const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
- const FunctionType *FT = cast<FunctionType>(T);
- if (const TagType* TT = FT->getResultType()->getAs<TagType>())
- if (!TT->getDecl()->isDefinition())
- return TT;
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
- for (unsigned i = 0; i < FPT->getNumArgs(); i++)
- if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
- if (!TT->getDecl()->isDefinition())
- return TT;
- return 0;
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool
+isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ // If we have already checked this type (maybe the same type is used by-value
+ // multiple times in multiple structure fields, don't check again.
+ if (!AlreadyChecked.insert(RD)) return true;
+
+ const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
+
+ // If this type is already laid out, converting it is a noop.
+ if (CGT.isRecordLayoutComplete(Key)) return true;
+
+ // If this type is currently being laid out, we can't recursively compile it.
+ if (CGT.isRecordBeingLaidOut(Key))
+ return false;
+
+ // If this type would require laying out bases that are currently being laid
+ // out, don't do it. This includes virtual base classes which get laid out
+ // when a class is translated, even though they aren't embedded by-value into
+ // the class.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
+ E = CRD->bases_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
+ CGT, AlreadyChecked))
+ return false;
+ }
+
+ // If this type would require laying out members that are currently being laid
+ // out, don't do it.
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
+ return false;
+
+ // If there are no problems, lets do it.
+ return true;
+}
+
+/// isSafeToConvert - Return true if it is safe to convert this field type,
+/// which requires the structure elements contained by-value to all be
+/// recursively safe to convert.
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ T = T.getCanonicalType();
+
+ // If this is a record, check it.
+ if (const RecordType *RT = dyn_cast<RecordType>(T))
+ return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
+
+ // If this is an array, check the elements, which are embedded inline.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
+
+ // Otherwise, there is no concern about transforming this. We only care about
+ // things that are contained by-value in a structure that can have another
+ // structure as a member.
+ return true;
+}
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
+ // If no structs are being laid out, we can certainly do this one.
+ if (CGT.noRecordsBeingLaidOut()) return true;
+
+ llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
+ return isSafeToConvert(RD, CGT, AlreadyChecked);
+}
+
+
+/// isFuncTypeArgumentConvertible - Return true if the specified type in a
+/// function argument or result position can be converted to an IR type at this
+/// point. This boils down to being whether it is complete, as well as whether
+/// we've temporarily deferred expanding the type because we're in a recursive
+/// context.
+bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
+ // If this isn't a tagged type, we can convert it!
+ const TagType *TT = Ty->getAs<TagType>();
+ if (TT == 0) return true;
+
+
+ // If it's a tagged type used by-value, but is just a forward decl, we can't
+ // convert it. Note that getDefinition()==0 is not the same as !isDefinition.
+ if (TT->getDecl()->getDefinition() == 0)
+ return false;
+
+ // If this is an enum, then it is always safe to convert.
+ const RecordType *RT = dyn_cast<RecordType>(TT);
+ if (RT == 0) return true;
+
+ // Otherwise, we have to be careful. If it is a struct that we're in the
+ // process of expanding, then we can't convert the function type. That's ok
+ // though because we must be in a pointer context under the struct, so we can
+ // just convert it to a dummy type.
+ //
+ // We decide this by checking whether ConvertRecordDeclType returns us an
+ // opaque type for a struct that we know is defined.
+ return isSafeToConvert(RT->getDecl(), *this);
+}
+
+
+/// Code to verify a given function type is complete, i.e. the return type
+/// and all of the argument types are complete. Also check to see if we are in
+/// a RS_StructPointer context, and if so whether any struct types have been
+/// pended. If so, we don't want to ask the ABI lowering code to handle a type
+/// that cannot be converted to an IR type.
+bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
+ if (!isFuncTypeArgumentConvertible(FT->getResultType()))
+ return false;
+
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++)
+ if (!isFuncTypeArgumentConvertible(FPT->getArgType(i)))
+ return false;
+
+ return true;
}
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
- const Type *Key = Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
- TagDeclTypes.find(Key);
- if (TDTI == TagDeclTypes.end()) return;
-
- // Remember the opaque LLVM type for this tagdecl.
- llvm::PATypeHolder OpaqueHolder = TDTI->second;
- assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
- "Updating compilation of an already non-opaque type?");
-
- // Remove it from TagDeclTypes so that it will be regenerated.
- TagDeclTypes.erase(TDTI);
-
- // Generate the new type.
- const llvm::Type *NT = ConvertTagDeclType(TD);
-
- // Refine the old opaque type to its new definition.
- cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
-
- // Since we just completed a tag type, check to see if any function types
- // were completed along with the tag type.
- // FIXME: This is very inefficient; if we track which function types depend
- // on which tag types, though, it should be reasonably efficient.
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
- for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
- if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
- // This function type still depends on an incomplete tag type; make sure
- // that tag type has an associated opaque type.
- ConvertTagDeclType(TT->getDecl());
- } else {
- // This function no longer depends on an incomplete tag type; create the
- // function type, and refine the opaque type to the new function type.
- llvm::PATypeHolder OpaqueHolder = i->second;
- const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
- cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
- FunctionTypes.erase(i);
+ // If this is an enum being completed, then we flush all non-struct types from
+ // the cache. This allows function types and other things that may be derived
+ // from the enum to be recomputed.
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
+ // Only flush the cache if we've actually already converted this type.
+ if (TypeCache.count(ED->getTypeForDecl())) {
+ // Okay, we formed some types based on this. We speculated that the enum
+ // would be lowered to i32, so we only need to flush the cache if this
+ // didn't happen.
+ if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
+ TypeCache.clear();
}
+ return;
}
+
+ // If we completed a RecordDecl that we previously used and converted to an
+ // anonymous type, then go ahead and complete it now.
+ const RecordDecl *RD = cast<RecordDecl>(TD);
+ if (RD->isDependentType()) return;
+
+ // Only complete it if we converted it already. If we haven't converted it
+ // yet, we'll just do it lazily.
+ if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ ConvertRecordDeclType(RD);
}
-static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
- const llvm::fltSemantics &format) {
+static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format) {
if (&format == &llvm::APFloat::IEEEsingle)
return llvm::Type::getFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEdouble)
@@ -219,10 +277,26 @@ static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
return 0;
}
-const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
- const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
+/// ConvertType - Convert the specified type to its LLVM form.
+llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ const Type *Ty = T.getTypePtr();
- switch (Ty.getTypeClass()) {
+ // RecordTypes are cached and processed specially.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return ConvertRecordDeclType(RT->getDecl());
+
+ // See if type is already cached.
+ llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
+ // If type is found in map then use it. Otherwise, convert type T.
+ if (TCI != TypeCache.end())
+ return TCI->second;
+
+ // If we don't have it in the cache, convert it now.
+ llvm::Type *ResultType = 0;
+ switch (Ty->getTypeClass()) {
+ case Type::Record: // Handled above.
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
@@ -233,18 +307,20 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
break;
case Type::Builtin: {
- switch (cast<BuiltinType>(Ty).getKind()) {
+ switch (cast<BuiltinType>(Ty)->getKind()) {
case BuiltinType::Void:
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
// LLVM void type can only be used as the result of a function call. Just
// map to the same as char.
- return llvm::Type::getInt8Ty(getLLVMContext());
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ break;
case BuiltinType::Bool:
// Note that we always return bool as i1 for use as a scalar type.
- return llvm::Type::getInt1Ty(getLLVMContext());
+ ResultType = llvm::Type::getInt1Ty(getLLVMContext());
+ break;
case BuiltinType::Char_S:
case BuiltinType::Char_U:
@@ -262,24 +338,26 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::WChar_U:
case BuiltinType::Char16:
case BuiltinType::Char32:
- return llvm::IntegerType::get(getLLVMContext(),
- static_cast<unsigned>(Context.getTypeSize(T)));
+ ResultType = llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+ break;
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
- return getTypeForFormat(getLLVMContext(),
- Context.getFloatTypeSemantics(T));
+ ResultType = getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T));
+ break;
- case BuiltinType::NullPtr: {
+ case BuiltinType::NullPtr:
// Model std::nullptr_t as i8*
- const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
- return llvm::PointerType::getUnqual(Ty);
- }
+ ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
+ break;
case BuiltinType::UInt128:
case BuiltinType::Int128:
- return llvm::IntegerType::get(getLLVMContext(), 128);
+ ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
+ break;
case BuiltinType::Overload:
case BuiltinType::Dependent:
@@ -288,107 +366,145 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
llvm_unreachable("Unexpected placeholder builtin type!");
break;
}
- llvm_unreachable("Unknown builtin type!");
break;
}
case Type::Complex: {
- const llvm::Type *EltTy =
- ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
- return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
+ llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
+ ResultType = llvm::StructType::get(EltTy, EltTy, NULL);
+ break;
}
case Type::LValueReference:
case Type::RValueReference: {
- const ReferenceType &RTy = cast<ReferenceType>(Ty);
- QualType ETy = RTy.getPointeeType();
- llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
- PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ const ReferenceType *RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
unsigned AS = Context.getTargetAddressSpace(ETy);
- return llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
}
case Type::Pointer: {
- const PointerType &PTy = cast<PointerType>(Ty);
- QualType ETy = PTy.getPointeeType();
- llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
- PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ const PointerType *PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ if (PointeeType->isVoidTy())
+ PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
unsigned AS = Context.getTargetAddressSpace(ETy);
- return llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
}
case Type::VariableArray: {
- const VariableArrayType &A = cast<VariableArrayType>(Ty);
- assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ const VariableArrayType *A = cast<VariableArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
"FIXME: We only handle trivial array types so far!");
// VLAs resolve to the innermost element type; this matches
// the return of alloca, and there isn't any obviously better choice.
- return ConvertTypeForMemRecursive(A.getElementType());
+ ResultType = ConvertTypeForMem(A->getElementType());
+ break;
}
case Type::IncompleteArray: {
- const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
- assert(A.getIndexTypeCVRQualifiers() == 0 &&
+ const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
"FIXME: We only handle trivial array types so far!");
- // int X[] -> [0 x int]
- return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
- 0);
+ // int X[] -> [0 x int], unless the element type is not sized. If it is
+ // unsized (e.g. an incomplete struct) just use [0 x i8].
+ ResultType = ConvertTypeForMem(A->getElementType());
+ if (!ResultType->isSized()) {
+ SkippedLayout = true;
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+ ResultType = llvm::ArrayType::get(ResultType, 0);
+ break;
}
case Type::ConstantArray: {
- const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
- const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
- return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+ const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
+ const llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+ ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ break;
}
case Type::ExtVector:
case Type::Vector: {
- const VectorType &VT = cast<VectorType>(Ty);
- return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
- VT.getNumElements());
+ const VectorType *VT = cast<VectorType>(Ty);
+ ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
}
case Type::FunctionNoProto:
case Type::FunctionProto: {
+ const FunctionType *FT = cast<FunctionType>(Ty);
// First, check whether we can build the full function type. If the
// function type depends on an incomplete type (e.g. a struct or enum), we
- // cannot lower the function type. Instead, turn it into an Opaque pointer
- // and have UpdateCompletedType revisit the function type when/if the opaque
- // argument type is defined.
- if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
- // This function's type depends on an incomplete tag type; make sure
- // we have an opaque type corresponding to the tag type.
- ConvertTagDeclType(TT->getDecl());
- // Create an opaque type for this function type, save it, and return it.
- llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
- FunctionTypes.insert(std::make_pair(&Ty, ResultType));
- return ResultType;
+ // cannot lower the function type.
+ if (!isFuncTypeConvertible(FT)) {
+ // This function's type depends on an incomplete tag type.
+ // Return a placeholder type.
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
+ }
+
+ // While we're converting the argument types for a function, we don't want
+ // to recursively convert any pointed-to structs. Converting directly-used
+ // structs is ok though.
+ if (!RecordsBeingLaidOut.insert(Ty)) {
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
}
// The function type can be built; call the appropriate routines to
// build it.
const CGFunctionInfo *FI;
bool isVariadic;
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
FI = &getFunctionInfo(
- CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
- true /*Recursive*/);
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
isVariadic = FPT->isVariadic();
} else {
- const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
FI = &getFunctionInfo(
- CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
- true /*Recursive*/);
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
isVariadic = true;
}
+
+ // If there is something higher level prodding our CGFunctionInfo, then
+ // don't recurse into it again.
+ if (FunctionsBeingProcessed.count(FI)) {
+
+ ResultType = llvm::StructType::get(getLLVMContext());
+ SkippedLayout = true;
+ } else {
- return GetFunctionType(*FI, isVariadic, true);
+ // Otherwise, we're good to go, go ahead and convert it.
+ ResultType = GetFunctionType(*FI, isVariadic);
+ }
+
+ RecordsBeingLaidOut.erase(Ty);
+
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ break;
}
case Type::ObjCObject:
- return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
+ ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
+ break;
case Type::ObjCInterface: {
// Objective-C interfaces are always opaque (outside of the
// runtime, which can do whatever it likes); we never refine
// these.
- const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+ llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
if (!T)
- T = llvm::OpaqueType::get(getLLVMContext());
- return T;
+ T = llvm::StructType::createNamed(getLLVMContext(), "");
+ ResultType = T;
+ break;
}
case Type::ObjCObjectPointer: {
@@ -396,103 +512,105 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
// pointer to the underlying interface type. We don't need to worry about
// recursive conversion.
const llvm::Type *T =
- ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
- return llvm::PointerType::getUnqual(T);
+ ConvertType(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ ResultType = T->getPointerTo();
+ break;
}
- case Type::Record:
case Type::Enum: {
- const TagDecl *TD = cast<TagType>(Ty).getDecl();
- const llvm::Type *Res = ConvertTagDeclType(TD);
-
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(TD))
- addRecordTypeName(RD, Res, llvm::StringRef());
- return Res;
+ const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ if (ED->isDefinition() || ED->isFixed())
+ return ConvertType(ED->getIntegerType());
+ // Return a placeholder 'i32' type. This can be changed later when the
+ // type is defined (see UpdateCompletedType), but is likely to be the
+ // "right" answer.
+ ResultType = llvm::Type::getInt32Ty(getLLVMContext());
+ break;
}
case Type::BlockPointer: {
- const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
- llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
- PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+ const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(FTy);
unsigned AS = Context.getTargetAddressSpace(FTy);
- return llvm::PointerType::get(PointeeType, AS);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
}
case Type::MemberPointer: {
- return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty));
+ ResultType =
+ getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
+ break;
}
}
-
- // FIXME: implement.
- return llvm::OpaqueType::get(getLLVMContext());
+
+ assert(ResultType && "Didn't convert a type?");
+
+ TypeCache[Ty] = ResultType;
+ return ResultType;
}
-/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
-/// enum.
-const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
- const Type *Key =
- Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
- TagDeclTypes.find(Key);
-
- // If we've already compiled this tag type, use the previous definition.
- if (TDTI != TagDeclTypes.end())
- return TDTI->second;
-
- const EnumDecl *ED = dyn_cast<EnumDecl>(TD);
-
- // If this is still a forward declaration, just define an opaque
- // type to use for this tagged decl.
- // C++0x: If this is a enumeration type with fixed underlying type,
- // consider it complete.
- if (!TD->isDefinition() && !(ED && ED->isFixed())) {
- llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
- TagDeclTypes.insert(std::make_pair(Key, ResultType));
- return ResultType;
- }
-
- // Okay, this is a definition of a type. Compile the implementation now.
-
- if (ED) // Don't bother storing enums in TagDeclTypes.
- return ConvertTypeRecursive(ED->getIntegerType());
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
- // This decl could well be recursive. In this case, insert an opaque
- // definition of this type, which the recursive uses will get. We will then
- // refine this opaque version later.
+ llvm::StructType *&Entry = RecordDeclTypes[Key];
- // Create new OpaqueType now for later use in case this is a recursive
- // type. This will later be refined to the actual type.
- llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
- TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+ // If we don't have a StructType at all yet, create the forward declaration.
+ if (Entry == 0) {
+ Entry = llvm::StructType::createNamed(getLLVMContext(), "");
+ addRecordTypeName(RD, Entry, "");
+ }
+ llvm::StructType *Ty = Entry;
- const RecordDecl *RD = cast<const RecordDecl>(TD);
+ // If this is still a forward declaration, or the LLVM type is already
+ // complete, there's nothing more to do.
+ RD = RD->getDefinition();
+ if (RD == 0 || !Ty->isOpaque())
+ return Ty;
+
+ // If converting this type would cause us to infinitely loop, don't do it!
+ if (!isSafeToConvert(RD, *this)) {
+ DeferredRecords.push_back(RD);
+ return Ty;
+ }
+ // Okay, this is a definition of a type. Compile the implementation now.
+ bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult;
+ assert(InsertResult && "Recursively compiling a struct?");
+
// Force conversion of non-virtual base classes recursively.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- if (!i->isVirtual()) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- ConvertTagDeclType(Base);
- }
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(),
+ e = CRD->bases_end(); i != e; ++i) {
+ if (i->isVirtual()) continue;
+
+ ConvertRecordDeclType(i->getType()->getAs<RecordType>()->getDecl());
}
}
// Layout fields.
- CGRecordLayout *Layout = ComputeRecordLayout(RD);
-
+ CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
CGRecordLayouts[Key] = Layout;
- const llvm::Type *ResultType = Layout->getLLVMType();
- // Refine our Opaque type to ResultType. This can invalidate ResultType, so
- // make sure to read the result out of the holder.
- cast<llvm::OpaqueType>(ResultHolder.get())
- ->refineAbstractTypeTo(ResultType);
+ // We're done laying out this struct.
+ bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
+ assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
+
+ // If this struct blocked a FunctionType conversion, then recompute whatever
+ // was derived from that.
+ // FIXME: This is hugely overconservative.
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ // If we're done converting the outer-most record, then convert any deferred
+ // structs as well.
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
- return ResultHolder.get();
+ return Ty;
}
/// getCGRecordLayout - Return record layout info for the given record decl.
@@ -503,7 +621,7 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
if (!Layout) {
// Compute the type information.
- ConvertTagDeclType(RD);
+ ConvertRecordDeclType(RD);
// Now try again.
Layout = CGRecordLayouts.lookup(Key);
@@ -513,15 +631,6 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
return *Layout;
}
-void CodeGenTypes::addBaseSubobjectTypeName(const CXXRecordDecl *RD,
- const CGRecordLayout &layout) {
- llvm::StringRef suffix;
- if (layout.getBaseSubobjectLLVMType() != layout.getLLVMType())
- suffix = ".base";
-
- addRecordTypeName(RD, layout.getBaseSubobjectLLVMType(), suffix);
-}
-
bool CodeGenTypes::isZeroInitializable(QualType T) {
// No need to check for member pointers when not compiling C++.
if (!Context.getLangOptions().CPlusPlus)
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index ff1eb4c45b59..7c0fb8164373 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -15,7 +15,7 @@
#define CLANG_CODEGEN_CODEGENTYPES_H
#include "CGCall.h"
-#include "GlobalDecl.h"
+#include "clang/AST/GlobalDecl.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include <vector>
@@ -23,11 +23,10 @@
namespace llvm {
class FunctionType;
class Module;
- class OpaqueType;
- class PATypeHolder;
class TargetData;
class Type;
class LLVMContext;
+ class StructType;
}
namespace clang {
@@ -37,6 +36,7 @@ namespace clang {
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
+ class CodeGenOptions;
class FieldDecl;
class FunctionProtoType;
class ObjCInterfaceDecl;
@@ -58,91 +58,83 @@ namespace CodeGen {
class CodeGenTypes {
ASTContext &Context;
const TargetInfo &Target;
- llvm::Module& TheModule;
- const llvm::TargetData& TheTargetData;
- const ABIInfo& TheABIInfo;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ const ABIInfo &TheABIInfo;
CGCXXABI &TheCXXABI;
-
- llvm::SmallVector<std::pair<QualType,
- llvm::OpaqueType *>, 8> PointersToResolve;
-
- llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
-
- llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+ const CodeGenOptions &CodeGenOpts;
/// The opaque type map for Objective-C interfaces. All direct
/// manipulation is done by the runtime interfaces, which are
/// responsible for coercing to the appropriate type; these opaque
/// types are never refined.
- llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+ llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
/// CGRecordLayouts - This maps llvm struct type with corresponding
/// record layout info.
llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+ /// RecordDeclTypes - This contains the LLVM IR type for any converted
+ /// RecordDecl.
+ llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
+
/// FunctionInfos - Hold memoized CGFunctionInfo results.
llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+ /// RecordsBeingLaidOut - This set keeps track of records that we're currently
+ /// converting to an IR type. For example, when converting:
+ /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
+ /// types will be in this set.
+ llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
+
+ llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
+
+ /// SkippedLayout - True if we didn't layout a function due to a being inside
+ /// a recursive struct conversion, set this to true.
+ bool SkippedLayout;
+
+ llvm::SmallVector<const RecordDecl *, 8> DeferredRecords;
+
private:
- /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
- /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
- /// used instead of llvm::Type because it allows us to bypass potential
- /// dangling type pointers due to type refinement on llvm side.
- llvm::DenseMap<const Type *, llvm::PATypeHolder> TypeCache;
-
- /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
- /// method directly because it does not do any type caching. This method
- /// is available only for ConvertType(). CovertType() is preferred
- /// interface to convert type T into a llvm::Type.
- const llvm::Type *ConvertNewType(QualType T);
-
- /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
- /// pointers that are referenced but have not been converted yet. This is
- /// used to handle cyclic structures properly.
- void HandleLateResolvedPointers();
-
- /// addRecordTypeName - Compute a name from the given record decl with an
- /// optional suffix and name the given LLVM type using it.
- void addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
- llvm::StringRef suffix);
+ /// TypeCache - This map keeps cache of llvm::Types
+ /// and maps llvm::Types to corresponding clang::Type.
+ llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
- const ABIInfo &Info, CGCXXABI &CXXABI);
+ const ABIInfo &Info, CGCXXABI &CXXABI,
+ const CodeGenOptions &Opts);
~CodeGenTypes();
const llvm::TargetData &getTargetData() const { return TheTargetData; }
const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
CGCXXABI &getCXXABI() const { return TheCXXABI; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
- const llvm::Type *ConvertType(QualType T, bool IsRecursive = false);
- const llvm::Type *ConvertTypeRecursive(QualType T);
+ llvm::Type *ConvertType(QualType T);
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false);
- const llvm::Type *ConvertTypeForMemRecursive(QualType T) {
- return ConvertTypeForMem(T, true);
- }
+ llvm::Type *ConvertTypeForMem(QualType T);
/// GetFunctionType - Get the LLVM function type for \arg Info.
- const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
- bool IsVariadic,
- bool IsRecursive = false);
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+ bool IsVariadic);
- const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+ llvm::FunctionType *GetFunctionType(GlobalDecl GD);
- /// VerifyFuncTypeComplete - Utility to check whether a function type can
+ /// isFuncTypeConvertible - Utility to check whether a function type can
/// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
/// type).
- static const TagType *VerifyFuncTypeComplete(const Type* T);
-
+ bool isFuncTypeConvertible(const FunctionType *FT);
+ bool isFuncTypeArgumentConvertible(QualType Ty);
+
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
@@ -150,11 +142,6 @@ public:
const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
- /// addBaseSubobjectTypeName - Add a type name for the base subobject of the
- /// given record layout.
- void addBaseSubobjectTypeName(const CXXRecordDecl *RD,
- const CGRecordLayout &layout);
-
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
@@ -180,10 +167,8 @@ public:
Ty->getExtInfo());
}
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty,
- bool IsRecursive = false);
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty,
- bool IsRecursive = false);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
/// getFunctionInfo - Get the function info for a member function of
/// the given type. This is used for calls through member function
@@ -206,23 +191,27 @@ public:
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info,
- bool IsRecursive = false);
+ const FunctionType::ExtInfo &Info);
/// \brief Compute a new LLVM record layout object for the given record.
- CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
+
+ /// addRecordTypeName - Compute a name from the given record decl with an
+ /// optional suffix and name the given LLVM type using it.
+ void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
+ llvm::StringRef suffix);
+
public: // These are internal details of CGT that shouldn't be used externally.
- /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
- /// enum.
- const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+ /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+ llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
/// GetExpandedTypes - Expand the type \arg Ty into the LLVM
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
void GetExpandedTypes(QualType type,
- llvm::SmallVectorImpl<const llvm::Type*> &expanded,
- bool isRecursive);
+ llvm::SmallVectorImpl<llvm::Type*> &expanded);
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
@@ -231,6 +220,15 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// IsZeroInitializable - Return whether a record type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const CXXRecordDecl *RD);
+
+ bool isRecordLayoutComplete(const Type *Ty) const;
+ bool noRecordsBeingLaidOut() const {
+ return RecordsBeingLaidOut.empty();
+ }
+ bool isRecordBeingLaidOut(const Type *Ty) const {
+ return RecordsBeingLaidOut.count(Ty);
+ }
+
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/GlobalDecl.h b/lib/CodeGen/GlobalDecl.h
deleted file mode 100644
index c2f36d210bfc..000000000000
--- a/lib/CodeGen/GlobalDecl.h
+++ /dev/null
@@ -1,127 +0,0 @@
-//===--- GlobalDecl.h - Global declaration holder ---------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
-// together with its type.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_GLOBALDECL_H
-#define CLANG_CODEGEN_GLOBALDECL_H
-
-#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/Basic/ABI.h"
-
-namespace clang {
-
-namespace CodeGen {
-
-/// GlobalDecl - represents a global declaration. This can either be a
-/// CXXConstructorDecl and the constructor type (Base, Complete).
-/// a CXXDestructorDecl and the destructor type (Base, Complete) or
-/// a VarDecl, a FunctionDecl or a BlockDecl.
-class GlobalDecl {
- llvm::PointerIntPair<const Decl*, 2> Value;
-
- void Init(const Decl *D) {
- assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
- assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
-
- Value.setPointer(D);
- }
-
-public:
- GlobalDecl() {}
-
- GlobalDecl(const VarDecl *D) { Init(D);}
- GlobalDecl(const FunctionDecl *D) { Init(D); }
- GlobalDecl(const BlockDecl *D) { Init(D); }
- GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
-
- GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
- : Value(D, Type) {}
- GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
- : Value(D, Type) {}
-
- GlobalDecl getCanonicalDecl() const {
- GlobalDecl CanonGD;
- CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
- CanonGD.Value.setInt(Value.getInt());
-
- return CanonGD;
- }
-
- const Decl *getDecl() const { return Value.getPointer(); }
-
- CXXCtorType getCtorType() const {
- assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
- return static_cast<CXXCtorType>(Value.getInt());
- }
-
- CXXDtorType getDtorType() const {
- assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
- return static_cast<CXXDtorType>(Value.getInt());
- }
-
- friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
- return LHS.Value == RHS.Value;
- }
-
- void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
-
- static GlobalDecl getFromOpaquePtr(void *P) {
- GlobalDecl GD;
- GD.Value.setFromOpaqueValue(P);
- return GD;
- }
-
- GlobalDecl getWithDecl(const Decl *D) {
- GlobalDecl Result(*this);
- Result.Value.setPointer(D);
- return Result;
- }
-};
-
-} // end namespace CodeGen
-} // end namespace clang
-
-namespace llvm {
- template<class> struct DenseMapInfo;
-
- template<> struct DenseMapInfo<clang::CodeGen::GlobalDecl> {
- static inline clang::CodeGen::GlobalDecl getEmptyKey() {
- return clang::CodeGen::GlobalDecl();
- }
-
- static inline clang::CodeGen::GlobalDecl getTombstoneKey() {
- return clang::CodeGen::GlobalDecl::
- getFromOpaquePtr(reinterpret_cast<void*>(-1));
- }
-
- static unsigned getHashValue(clang::CodeGen::GlobalDecl GD) {
- return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
- }
-
- static bool isEqual(clang::CodeGen::GlobalDecl LHS,
- clang::CodeGen::GlobalDecl RHS) {
- return LHS == RHS;
- }
-
- };
-
- // GlobalDecl isn't *technically* a POD type. However, its copy constructor,
- // copy assignment operator, and destructor are all trivial.
- template <>
- struct isPodLike<clang::CodeGen::GlobalDecl> {
- static const bool value = true;
- };
-} // end namespace llvm
-
-#endif
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 12ef9bd030bd..0c86080fa80e 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -24,6 +24,7 @@
#include "CodeGenModule.h"
#include <clang/AST/Mangle.h>
#include <clang/AST/Type.h>
+#include <llvm/Intrinsics.h>
#include <llvm/Target/TargetData.h>
#include <llvm/Value.h>
@@ -33,15 +34,15 @@ using namespace CodeGen;
namespace {
class ItaniumCXXABI : public CodeGen::CGCXXABI {
private:
- const llvm::IntegerType *PtrDiffTy;
+ llvm::IntegerType *PtrDiffTy;
protected:
bool IsARM;
// It's a little silly for us to cache this.
- const llvm::IntegerType *getPtrDiffTy() {
+ llvm::IntegerType *getPtrDiffTy() {
if (!PtrDiffTy) {
QualType T = getContext().getPointerDiffType();
- const llvm::Type *Ty = CGM.getTypes().ConvertTypeRecursive(T);
+ llvm::Type *Ty = CGM.getTypes().ConvertType(T);
PtrDiffTy = cast<llvm::IntegerType>(Ty);
}
return PtrDiffTy;
@@ -57,7 +58,7 @@ public:
bool isZeroInitializable(const MemberPointerType *MPT);
- const llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+ llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
llvm::Value *&This,
@@ -175,13 +176,11 @@ CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
return new ARMCXXABI(CGM);
}
-const llvm::Type *
+llvm::Type *
ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
if (MPT->isMemberDataPointer())
return getPtrDiffTy();
- else
- return llvm::StructType::get(CGM.getLLVMContext(),
- getPtrDiffTy(), getPtrDiffTy(), NULL);
+ return llvm::StructType::get(getPtrDiffTy(), getPtrDiffTy(), NULL);
}
/// In the Itanium and ARM ABIs, method pointers have the form:
@@ -473,8 +472,7 @@ ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
else
Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
- return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
- /*Packed=*/false);
+ return llvm::ConstantStruct::get(CS->getType(), Values);
}
@@ -489,8 +487,7 @@ ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
llvm::Constant *Values[2] = { Zero, Zero };
- return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
- /*Packed=*/false);
+ return llvm::ConstantStruct::getAnon(Values);
}
llvm::Constant *
@@ -540,7 +537,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
const llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
- if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
+ if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
Ty = Types.GetFunctionType(Types.getFunctionInfo(MD),
FPT->isVariadic());
@@ -555,8 +552,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
}
- return llvm::ConstantStruct::get(CGM.getLLVMContext(),
- MemPtr, 2, /*Packed=*/false);
+ return llvm::ConstantStruct::getAnon(MemPtr);
}
/// The comparison algorithm is pretty easy: the member pointers are
@@ -802,10 +798,27 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
if (expr->doesUsualArrayDeleteWantSize())
return true;
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ QualType AllocatedType = expr->getAllocatedType();
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ AllocatedType->isObjCLifetimeType()) {
+ switch (AllocatedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
// Otherwise, if the class has a non-trivial destructor, it always
// needs a cookie.
const CXXRecordDecl *record =
- expr->getAllocatedType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
return (record && !record->hasTrivialDestructor());
}
@@ -816,6 +829,22 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
if (expr->doesUsualArrayDeleteWantSize())
return true;
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ if (getContext().getLangOptions().ObjCAutoRefCount &&
+ elementType->isObjCLifetimeType()) {
+ switch (elementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
// Otherwise, if the class has a non-trivial destructor, it always
// needs a cookie.
const CXXRecordDecl *record =
@@ -1005,31 +1034,34 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
/*********************** Static local initialization **************************/
static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
- const llvm::PointerType *GuardPtrTy) {
+ llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
+ llvm::Type *ArgTys[] = { GuardPtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
- GuardPtrTy, /*isVarArg=*/false);
+ ArgTys, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
}
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
- const llvm::PointerType *GuardPtrTy) {
+ llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
+ llvm::Type *ArgTys[] = { GuardPtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ ArgTys, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
}
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
- const llvm::PointerType *GuardPtrTy) {
+ llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
+ llvm::Type *ArgTys[] = { GuardPtrTy };
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- GuardPtrTy, /*isVarArg=*/false);
+ ArgTys, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
}
@@ -1039,7 +1071,7 @@ namespace {
llvm::GlobalVariable *Guard;
CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
- void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
->setDoesNotThrow();
}
@@ -1055,21 +1087,21 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// We only need to use thread-safe statics for local variables;
// global initialization is always single-threaded.
- bool ThreadsafeStatics = (getContext().getLangOptions().ThreadsafeStatics &&
- D.isLocalVarDecl());
+ bool threadsafe =
+ (getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
const llvm::IntegerType *GuardTy;
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
- bool UseInt8GuardVariable = !ThreadsafeStatics && GV->hasInternalLinkage();
- if (UseInt8GuardVariable)
- GuardTy = Builder.getInt8Ty();
- else {
+ bool useInt8GuardVariable = !threadsafe && GV->hasInternalLinkage();
+ if (useInt8GuardVariable) {
+ GuardTy = CGF.Int8Ty;
+ } else {
// Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
- GuardTy = (IsARM ? Builder.getInt32Ty() : Builder.getInt64Ty());
+ GuardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
}
- const llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
+ llvm::PointerType *GuardPtrTy = GuardTy->getPointerTo();
// Create the guard variable.
llvm::SmallString<256> GuardVName;
@@ -1099,7 +1131,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// if (__cxa_guard_acquire(&obj_guard))
// ...
// }
- if (IsARM && !UseInt8GuardVariable) {
+ if (IsARM && !useInt8GuardVariable) {
llvm::Value *V = Builder.CreateLoad(GuardVariable);
V = Builder.CreateAnd(V, Builder.getInt32(1));
IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
@@ -1130,13 +1162,16 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+ llvm::BasicBlock *NoCheckBlock = EndBlock;
+ if (threadsafe) NoCheckBlock = CGF.createBasicBlock("init.barrier");
+
// Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
+ Builder.CreateCondBr(IsInitialized, InitCheckBlock, NoCheckBlock);
CGF.EmitBlock(InitCheckBlock);
// Variables used when coping with thread-safe statics and exceptions.
- if (ThreadsafeStatics) {
+ if (threadsafe) {
// Call __cxa_guard_acquire.
llvm::Value *V
= Builder.CreateCall(getGuardAcquireFn(CGM, GuardPtrTy), GuardVariable);
@@ -1155,7 +1190,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Emit the initializer and add a global destructor if appropriate.
CGF.EmitCXXGlobalVarDeclInit(D, GV);
- if (ThreadsafeStatics) {
+ if (threadsafe) {
// Pop the guard-abort cleanup if we pushed one.
CGF.PopCleanupBlock();
@@ -1165,5 +1200,23 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
}
+ // Emit an acquire memory barrier if using thread-safe statics:
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ if (threadsafe) {
+ Builder.CreateBr(EndBlock);
+ CGF.EmitBlock(NoCheckBlock);
+
+ llvm::Value *_false = Builder.getFalse();
+ llvm::Value *_true = Builder.getTrue();
+
+ Builder.CreateCall5(CGM.getIntrinsic(llvm::Intrinsic::memory_barrier),
+ /* load-load, load-store */ _true, _true,
+ /* store-load, store-store */ _false, _false,
+ /* device or I/O */ _false);
+ }
+
CGF.EmitBlock(EndBlock);
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 043ead7e62d9..df2c1bd98cca 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -75,7 +75,7 @@ void ABIArgInfo::dump() const {
break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
- << " Byal=" << getIndirectByVal()
+ << " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
case Expand:
@@ -356,9 +356,9 @@ bool UseX86_MMXType(const llvm::Type *IRType) {
IRType->getScalarSizeInBits() != 64;
}
-static const llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) {
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) {
if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
return Ty;
@@ -374,6 +374,7 @@ class X86_32ABIInfo : public ABIInfo {
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
+ bool IsMMXDisabled;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
@@ -403,14 +404,15 @@ public:
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
- : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
+ IsMMXDisabled(m) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
- :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -425,9 +427,9 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
- const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -562,7 +564,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
} else if (SeltTy->isPointerType()) {
// FIXME: It would be really nice if this could come out as the proper
// pointer type.
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
return ABIArgInfo::getDirect(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
@@ -699,8 +701,11 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
Size));
}
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+ llvm::Type *IRType = CGT.ConvertType(Ty);
if (UseX86_MMXType(IRType)) {
+ if (IsMMXDisabled)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ 64));
ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
return AAI;
@@ -820,6 +825,22 @@ class X86_64ABIInfo : public ABIInfo {
/// should just return Memory for the aggregate).
static Class merge(Class Accum, Class Field);
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
///
@@ -843,13 +864,13 @@ class X86_64ABIInfo : public ABIInfo {
/// also be ComplexX87.
void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
- const llvm::Type *Get16ByteVectorType(QualType Ty) const;
- const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
- const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
@@ -921,9 +942,9 @@ public:
return false;
}
- const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -956,6 +977,39 @@ public:
}
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
@@ -1082,7 +1136,14 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
- } else if (Size == 128) {
+ } else if (Size == 128 || Size == 256) {
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
Lo = SSE;
Hi = SSEUp;
}
@@ -1121,8 +1182,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
@@ -1137,6 +1198,13 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = NoClass;
uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ if (Size > 128 && EltSize != 256)
+ return;
+
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
classify(AT->getElementType(), Offset, FieldLo, FieldHi);
@@ -1146,9 +1214,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
break;
}
- // Do post merger cleanup (see below). Only case we worry about is Memory.
- if (Hi == Memory)
- Lo = Memory;
+ postMerge(Size, Lo, Hi);
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
return;
}
@@ -1157,8 +1223,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than two eightbytes, ..., it has class MEMORY.
- if (Size > 128)
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
@@ -1209,9 +1275,17 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
bool BitField = i->isBitField();
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit wide vector could be used is when the struct
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
//
+ if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ Lo = Memory;
+ return;
+ }
// Note, skip this test for bit-fields, see below.
if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Lo = Memory;
@@ -1257,31 +1331,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
break;
}
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is MEMORY, the whole argument is
- // passed in memory.
- //
- // (b) If X87UP is not preceded by X87, the whole argument is
- // passed in memory.
- //
- // (c) If the size of the aggregate exceeds two eightbytes and the first
- // eight-byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole
- // argument is passed in memory.
- //
- // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
- //
- // Some of these are enforced by the merging logic. Others can arise
- // only with unions; for example:
- // union { _Complex double; unsigned; }
- //
- // Note that clauses (b) and (c) were added in 0.98.
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
+ postMerge(Size, Lo, Hi);
}
}
@@ -1321,24 +1371,25 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
return ABIArgInfo::getIndirect(Align);
}
-/// Get16ByteVectorType - The ABI specifies that a value should be passed in an
-/// full vector XMM register. Pick an LLVM IR type that will be passed as a
+/// GetByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
/// vector register.
-const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
// Wrapper structs that just contain vectors are passed just like vectors,
// strip them off if present.
- const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
while (STy && STy->getNumElements() == 1) {
IRType = STy->getElementType(0);
STy = dyn_cast<llvm::StructType>(IRType);
}
// If the preferred type is a 16-byte vector, prefer to pass it.
- if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
- const llvm::Type *EltTy = VT->getElementType();
- if (VT->getBitWidth() == 128 &&
+ if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ llvm::Type *EltTy = VT->getElementType();
+ unsigned BitWidth = VT->getBitWidth();
+ if ((BitWidth == 128 || BitWidth == 256) &&
(EltTy->isFloatTy() || EltTy->isDoubleTy() ||
EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
@@ -1466,8 +1517,8 @@ static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
/// low 8 bytes of an XMM register, corresponding to the SSE class.
-const llvm::Type *X86_64ABIInfo::
-GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// The only three choices we have are either double, <2 x float>, or float. We
// pass as float if the last 4 bytes is just padding. This happens for
@@ -1501,8 +1552,8 @@ GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// SourceTy is the source level type for the entire argument. SourceOffset is
/// an offset into this that we're processing (which is always either 0 or 8).
///
-const llvm::Type *X86_64ABIInfo::
-GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// If we're dealing with an un-offset LLVM IR type, then it means that we're
// returning an 8-byte unit starting with it. See if we can safely use it.
@@ -1540,7 +1591,7 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
}
if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- const llvm::Type *EltTy = ATy->getElementType();
+ llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
@@ -1566,8 +1617,8 @@ GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
/// first class aggregate to represent them. For example, if the low part of
/// a by-value argument should be passed as i32* and the high part as float,
/// return {i32*, float}.
-static const llvm::Type *
-GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
const llvm::TargetData &TD) {
// In order to correctly satisfy the ABI, we need to the high part to start
// at offset 8. If the high and low parts we inferred are both 4-byte types
@@ -1594,8 +1645,7 @@ GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
}
}
- const llvm::StructType *Result =
- llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
// Verify that the second element is at an 8-byte offset.
@@ -1615,7 +1665,7 @@ classifyReturnType(QualType RetTy) const {
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
- const llvm::Type *ResType = 0;
+ llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
@@ -1638,8 +1688,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
case Integer:
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
- RetTy, 0);
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
@@ -1657,7 +1706,7 @@ classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
// available SSE register of the sequence %xmm0, %xmm1 is used.
case SSE:
- ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
break;
// AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
@@ -1671,14 +1720,13 @@ classifyReturnType(QualType RetTy) const {
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(getVMContext(),
- llvm::Type::getX86_FP80Ty(getVMContext()),
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
llvm::Type::getX86_FP80Ty(getVMContext()),
NULL);
break;
}
- const llvm::Type *HighPart = 0;
+ llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously and X87 should
// never occur as a hi class.
@@ -1691,24 +1739,24 @@ classifyReturnType(QualType RetTy) const {
break;
case Integer:
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
- 8, RetTy, 8);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the upper half of the last used SSE register.
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
//
// SSEUP should always be preceded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = Get16ByteVectorType(RetTy);
+ ResType = GetByteVectorType(RetTy);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
@@ -1719,8 +1767,7 @@ classifyReturnType(QualType RetTy) const {
// preceded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87) {
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
- 8, RetTy, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
}
@@ -1748,7 +1795,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
neededInt = 0;
neededSSE = 0;
- const llvm::Type *ResType = 0;
+ llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
@@ -1767,6 +1814,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ ++neededInt;
return getIndirectResult(Ty);
case SSEUp:
@@ -1780,7 +1829,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
++neededInt;
// Pick an 8-byte type based on the preferred type.
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
@@ -1800,19 +1849,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// available SSE register is used, the registers are taken in the
// order from %xmm0 to %xmm7.
case SSE: {
- const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
- if (Hi != NoClass || !UseX86_MMXType(IRType))
- ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
- else
- // This is an MMX type. Treat it as such.
- ResType = llvm::Type::getX86_MMXTy(getVMContext());
-
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
++neededSSE;
break;
}
}
- const llvm::Type *HighPart = 0;
+ llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously, ComplexX87 and X87 should
// never occur as hi classes, and X87Up must be preceded by X87,
@@ -1828,7 +1872,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case Integer:
++neededInt;
// Pick an 8-byte type based on the preferred type.
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
@@ -1838,7 +1882,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// memory), except in situations involving unions.
case X87Up:
case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
@@ -1851,7 +1895,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
// register. This only happens when 128-bit vectors are passed.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification");
- ResType = Get16ByteVectorType(Ty);
+ ResType = GetByteVectorType(Ty);
break;
}
@@ -2059,10 +2103,10 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// area, we need to collect the two eightbytes together.
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
- const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
const llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ const llvm::StructType *ST = llvm::StructType::get(DoubleTy,
DoubleTy, NULL);
llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
@@ -2277,6 +2321,10 @@ public:
return 13;
}
+ llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
@@ -2290,8 +2338,6 @@ public:
return false;
}
-
-
};
}
@@ -2371,9 +2417,8 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
- const llvm::Type *STy =
- llvm::StructType::get(getVMContext(),
- llvm::ArrayType::get(ElemTy, SizeRegs), NULL, NULL);
+ llvm::Type *STy =
+ llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
return ABIArgInfo::getDirect(STy);
}
@@ -3010,10 +3055,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
- case llvm::Triple::x86:
+ case llvm::Triple::x86: {
+ bool DisableMMX = strcmp(getContext().Target.getABI(), "no-mmx") == 0;
+
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, true, true));
+ new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX));
switch (Triple.getOS()) {
case llvm::Triple::Cygwin:
@@ -3024,12 +3071,13 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::OpenBSD:
case llvm::Triple::NetBSD:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, true));
+ new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Types, false, false));
+ new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX));
}
+ }
case llvm::Triple::x86_64:
switch (Triple.getOS()) {
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 4f59eb619e9d..d5e8884cdffe 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -106,11 +106,25 @@ namespace clang {
return Address;
}
- virtual const llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
- const llvm::Type* Ty) const {
+ virtual llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ llvm::StringRef Constraint,
+ llvm::Type* Ty) const {
return Ty;
}
+
+ /// Retrieve the address of a function to call immediately before
+ /// calling objc_retainAutoreleasedReturnValue. The
+ /// implementation of objc_autoreleaseReturnValue sniffs the
+ /// instruction stream following its return address to decide
+ /// whether it's a call to objc_retainAutoreleasedReturnValue.
+ /// This can be prohibitively expensive, depending on the
+ /// relocation model, and so on some targets it instead sniffs for
+ /// a particular instruction sequence. This functions returns
+ /// that instruction sequence in inline assembly, which will be
+ /// empty if none is required.
+ virtual llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "";
+ }
};
}
diff --git a/lib/Driver/Arg.cpp b/lib/Driver/Arg.cpp
index f1177cf97e28..39b7e5556494 100644
--- a/lib/Driver/Arg.cpp
+++ b/lib/Driver/Arg.cpp
@@ -113,7 +113,7 @@ void Arg::render(const ArgList &Args, ArgStringList &Output) const {
break;
case Option::RenderSeparateStyle:
- Output.push_back(getOption().getName());
+ Output.push_back(getOption().getName().data());
for (unsigned i = 0, e = getNumValues(); i != e; ++i)
Output.push_back(getValue(Args, i));
break;
diff --git a/lib/Driver/ArgList.cpp b/lib/Driver/ArgList.cpp
index 596e2a7529cb..b8af9cc47e09 100644
--- a/lib/Driver/ArgList.cpp
+++ b/lib/Driver/ArgList.cpp
@@ -287,9 +287,9 @@ Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
llvm::StringRef Value) const {
- unsigned Index = BaseArgs.MakeIndex(Opt->getName() + Value.str());
+ unsigned Index = BaseArgs.MakeIndex(Opt->getName().str() + Value.str());
Arg *A = new Arg(Opt, Index,
- BaseArgs.getArgString(Index) + strlen(Opt->getName()),
+ BaseArgs.getArgString(Index) + Opt->getName().size(),
BaseArg);
SynthesizedArgs.push_back(A);
return A;
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index eb878a3f1c5e..789f6f7ae9ce 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -458,11 +458,20 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
llvm::outs() << "\n";
llvm::outs() << "libraries: =";
+
+ std::string sysroot;
+ if (Arg *A = C.getArgs().getLastArg(options::OPT__sysroot_EQ))
+ sysroot = A->getValue(C.getArgs());
+
for (ToolChain::path_list::const_iterator it = TC.getFilePaths().begin(),
ie = TC.getFilePaths().end(); it != ie; ++it) {
if (it != TC.getFilePaths().begin())
llvm::outs() << ':';
- llvm::outs() << *it;
+ const char *path = it->c_str();
+ if (path[0] == '=')
+ llvm::outs() << sysroot << path + 1;
+ else
+ llvm::outs() << path;
}
llvm::outs() << "\n";
return false;
@@ -942,10 +951,6 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
case phases::Precompile:
return new PrecompileJobAction(Input, types::TY_PCH);
case phases::Compile: {
- bool HasO4 = false;
- if (const Arg *A = Args.getLastArg(options::OPT_O_Group))
- HasO4 = A->getOption().matches(options::OPT_O4);
-
if (Args.hasArg(options::OPT_fsyntax_only)) {
return new CompileJobAction(Input, types::TY_Nothing);
} else if (Args.hasArg(options::OPT_rewrite_objc)) {
@@ -954,9 +959,7 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
return new AnalyzeJobAction(Input, types::TY_Plist);
} else if (Args.hasArg(options::OPT_emit_ast)) {
return new CompileJobAction(Input, types::TY_AST);
- } else if (Args.hasArg(options::OPT_emit_llvm) ||
- Args.hasFlag(options::OPT_flto, options::OPT_fno_lto, false) ||
- HasO4) {
+ } else if (IsUsingLTO(Args)) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return new CompileJobAction(Input, Output);
@@ -972,6 +975,19 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
return 0;
}
+bool Driver::IsUsingLTO(const ArgList &Args) const {
+ // Check for -emit-llvm or -flto.
+ if (Args.hasArg(options::OPT_emit_llvm) ||
+ Args.hasFlag(options::OPT_flto, options::OPT_fno_lto, false))
+ return true;
+
+ // Check for -O4.
+ if (const Arg *A = Args.getLastArg(options::OPT_O_Group))
+ return A->getOption().matches(options::OPT_O4);
+
+ return false;
+}
+
void Driver::BuildJobs(Compilation &C) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
@@ -1245,6 +1261,15 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
}
+ // If we're saving temps and the temp filename conflicts with the input
+ // filename, then avoid overwriting input file.
+ if (!AtTopLevel && C.getArgs().hasArg(options::OPT_save_temps) &&
+ NamedOutput == BaseName) {
+ std::string TmpName =
+ GetTemporaryPath(types::getTypeTempSuffix(JA.getType()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
+
// As an annoying special case, PCH generation doesn't strip the pathname.
if (JA.getType() == types::TY_PCH) {
llvm::sys::path::remove_filename(BasePath);
diff --git a/lib/Driver/Option.cpp b/lib/Driver/Option.cpp
index a992cef3d239..90d21a3d0b17 100644
--- a/lib/Driver/Option.cpp
+++ b/lib/Driver/Option.cpp
@@ -144,7 +144,7 @@ FlagOption::FlagOption(OptSpecifier ID, const char *Name,
Arg *FlagOption::accept(const ArgList &Args, unsigned &Index) const {
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
- if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ if (getName().size() != strlen(Args.getArgString(Index)))
return 0;
return new Arg(getUnaliasedOption(), Index++);
@@ -157,7 +157,7 @@ JoinedOption::JoinedOption(OptSpecifier ID, const char *Name,
Arg *JoinedOption::accept(const ArgList &Args, unsigned &Index) const {
// Always matches.
- const char *Value = Args.getArgString(Index) + strlen(getName());
+ const char *Value = Args.getArgString(Index) + getName().size();
return new Arg(getUnaliasedOption(), Index++, Value);
}
@@ -170,7 +170,7 @@ CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name,
Arg *CommaJoinedOption::accept(const ArgList &Args,
unsigned &Index) const {
// Always matches.
- const char *Str = Args.getArgString(Index) + strlen(getName());
+ const char *Str = Args.getArgString(Index) + getName().size();
Arg *A = new Arg(getUnaliasedOption(), Index++);
// Parse out the comma separated values.
@@ -205,7 +205,7 @@ SeparateOption::SeparateOption(OptSpecifier ID, const char *Name,
Arg *SeparateOption::accept(const ArgList &Args, unsigned &Index) const {
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
- if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ if (getName().size() != strlen(Args.getArgString(Index)))
return 0;
Index += 2;
@@ -225,7 +225,7 @@ MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name,
Arg *MultiArgOption::accept(const ArgList &Args, unsigned &Index) const {
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
- if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ if (getName().size() != strlen(Args.getArgString(Index)))
return 0;
Index += 1 + NumArgs;
@@ -250,8 +250,8 @@ Arg *JoinedOrSeparateOption::accept(const ArgList &Args,
unsigned &Index) const {
// If this is not an exact match, it is a joined arg.
// FIXME: Avoid strlen.
- if (strlen(getName()) != strlen(Args.getArgString(Index))) {
- const char *Value = Args.getArgString(Index) + strlen(getName());
+ if (getName().size() != strlen(Args.getArgString(Index))) {
+ const char *Value = Args.getArgString(Index) + getName().size();
return new Arg(this, Index++, Value);
}
@@ -279,6 +279,6 @@ Arg *JoinedAndSeparateOption::accept(const ArgList &Args,
return 0;
return new Arg(getUnaliasedOption(), Index - 2,
- Args.getArgString(Index-2)+strlen(getName()),
+ Args.getArgString(Index-2)+getName().size(),
Args.getArgString(Index-1));
}
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index d9199154104a..74b65918f34f 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -15,7 +15,9 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/HostInfo.h"
+#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/Options.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang::driver;
@@ -47,6 +49,25 @@ bool ToolChain::HasNativeLLVMSupport() const {
return false;
}
+void ToolChain::configureObjCRuntime(ObjCRuntime &runtime) const {
+ switch (runtime.getKind()) {
+ case ObjCRuntime::NeXT:
+ // Assume a minimal NeXT runtime.
+ runtime.HasARC = false;
+ runtime.HasWeak = false;
+ runtime.HasTerminate = false;
+ return;
+
+ case ObjCRuntime::GNU:
+ // Assume a maximal GNU runtime.
+ runtime.HasARC = true;
+ runtime.HasWeak = true;
+ runtime.HasTerminate = false; // to be added
+ return;
+ }
+ llvm_unreachable("invalid runtime kind!");
+}
+
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
//
// FIXME: tblgen this.
@@ -201,18 +222,21 @@ ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
}
void ToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
+ ArgStringList &CmdArgs,
+ bool ObjCXXAutoRefCount) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
+ // Header search paths are handled by the mass of goop in InitHeaderSearch.
+
switch (Type) {
case ToolChain::CST_Libcxx:
- CmdArgs.push_back("-nostdinc++");
- CmdArgs.push_back("-cxx-isystem");
- CmdArgs.push_back("/usr/include/c++/v1");
+ if (ObjCXXAutoRefCount)
+ CmdArgs.push_back("-fobjc-arc-cxxlib=libc++");
break;
case ToolChain::CST_Libstdcxx:
- // Currently handled by the mass of goop in InitHeaderSearch.
+ if (ObjCXXAutoRefCount)
+ CmdArgs.push_back("-fobjc-arc-cxxlib=libstdc++");
break;
}
}
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index ca613e3d6c84..1619ef8f84a9 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -19,6 +19,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/HostInfo.h"
+#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/OptTable.h"
#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
@@ -26,6 +27,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -35,13 +37,16 @@
#include <cstdlib> // ::getenv
+#include "llvm/Config/config.h" // for CXX_INCLUDE_ROOT
+
using namespace clang::driver;
using namespace clang::driver::toolchains;
/// Darwin - Darwin tool chain for i386 and x86_64.
Darwin::Darwin(const HostInfo &Host, const llvm::Triple& Triple)
- : ToolChain(Host, Triple), TargetInitialized(false)
+ : ToolChain(Host, Triple), TargetInitialized(false),
+ ARCRuntimeForSimulator(ARCSimulator_None)
{
// Compute the initial Darwin version based on the host.
bool HadExtra;
@@ -70,6 +75,39 @@ bool Darwin::HasNativeLLVMSupport() const {
return true;
}
+bool Darwin::hasARCRuntime() const {
+ // FIXME: Remove this once there is a proper way to detect an ARC runtime
+ // for the simulator.
+ switch (ARCRuntimeForSimulator) {
+ case ARCSimulator_None:
+ break;
+ case ARCSimulator_HasARCRuntime:
+ return true;
+ case ARCSimulator_NoARCRuntime:
+ return false;
+ }
+
+ if (isTargetIPhoneOS())
+ return !isIPhoneOSVersionLT(5);
+ else
+ return !isMacosxVersionLT(10, 7);
+}
+
+/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
+void Darwin::configureObjCRuntime(ObjCRuntime &runtime) const {
+ if (runtime.getKind() != ObjCRuntime::NeXT)
+ return ToolChain::configureObjCRuntime(runtime);
+
+ runtime.HasARC = runtime.HasWeak = hasARCRuntime();
+
+ // So far, objc_terminate is only available in iOS 5.
+ // FIXME: do the simulator logic properly.
+ if (!ARCRuntimeForSimulator && isTargetIPhoneOS())
+ runtime.HasTerminate = !isIPhoneOSVersionLT(5);
+ else
+ runtime.HasTerminate = false;
+}
+
// FIXME: Can we tablegen this?
static const char *GetArmArchForMArch(llvm::StringRef Value) {
if (Value == "armv6k")
@@ -320,6 +358,45 @@ void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
}
+void DarwinClang::AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+
+ CmdArgs.push_back("-force_load");
+ llvm::sys::Path P(getDriver().ClangExecutable);
+ P.eraseComponent(); // 'clang'
+ P.eraseComponent(); // 'bin'
+ P.appendComponent("lib");
+ P.appendComponent("arc");
+ P.appendComponent("libarclite_");
+ std::string s = P.str();
+ // Mash in the platform.
+ if (isTargetIPhoneOS())
+ s += "iphoneos";
+ // FIXME: isTargetIphoneOSSimulator() is not returning true.
+ else if (ARCRuntimeForSimulator != ARCSimulator_None)
+ s += "iphonesimulator";
+ else
+ s += "macosx";
+ s += ".a";
+
+ CmdArgs.push_back(Args.MakeArgString(s));
+}
+
+void DarwinClang::AddLinkRuntimeLib(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *DarwinStaticLib) const {
+ llvm::sys::Path P(getDriver().ResourceDir);
+ P.appendComponent("lib");
+ P.appendComponent("darwin");
+ P.appendComponent(DarwinStaticLib);
+
+ // For now, allow missing resource libraries to support developers who may
+ // not have compiler-rt checked out or integrated into their build.
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+}
+
void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
// Darwin doesn't support real static executables, don't link any runtime
@@ -341,7 +418,6 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
CmdArgs.push_back("-lSystem");
// Select the dynamic runtime library and the target specific static library.
- const char *DarwinStaticLib = 0;
if (isTargetIPhoneOS()) {
// If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
// it never went into the SDK.
@@ -349,7 +425,7 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
CmdArgs.push_back("-lgcc_s.1");
// We currently always need a static runtime library for iOS.
- DarwinStaticLib = "libclang_rt.ios.a";
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ios.a");
} else {
// The dynamic runtime library was merged with libSystem for 10.6 and
// beyond; only 10.4 and 10.5 need an additional runtime library.
@@ -367,26 +443,42 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
// libSystem. Therefore, we still must provide a runtime library just for
// the tiny tiny handful of projects that *might* use that symbol.
if (isMacosxVersionLT(10, 5)) {
- DarwinStaticLib = "libclang_rt.10.4.a";
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.10.4.a");
} else {
if (getTriple().getArch() == llvm::Triple::x86)
- DarwinStaticLib = "libclang_rt.eprintf.a";
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.eprintf.a");
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.osx.a");
}
}
+}
- /// Add the target specific static library, if needed.
- if (DarwinStaticLib) {
- llvm::sys::Path P(getDriver().ResourceDir);
- P.appendComponent("lib");
- P.appendComponent("darwin");
- P.appendComponent(DarwinStaticLib);
-
- // For now, allow missing resource libraries to support developers who may
- // not have compiler-rt checked out or integrated into their build.
- bool Exists;
- if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
- CmdArgs.push_back(Args.MakeArgString(P.str()));
- }
+static inline llvm::StringRef SimulatorVersionDefineName() {
+ return "__IPHONE_OS_VERSION_MIN_REQUIRED";
+}
+
+/// \brief Parse the simulator version define:
+/// __IPHONE_OS_VERSION_MIN_REQUIRED=([0-9])([0-9][0-9])([0-9][0-9])
+// and return the grouped values as integers, e.g:
+// __IPHONE_OS_VERSION_MIN_REQUIRED=40201
+// will return Major=4, Minor=2, Micro=1.
+static bool GetVersionFromSimulatorDefine(llvm::StringRef define,
+ unsigned &Major, unsigned &Minor,
+ unsigned &Micro) {
+ assert(define.startswith(SimulatorVersionDefineName()));
+ llvm::StringRef name, version;
+ llvm::tie(name, version) = define.split('=');
+ if (version.empty())
+ return false;
+ std::string verstr = version.str();
+ char *end;
+ unsigned num = (unsigned) strtol(verstr.c_str(), &end, 10);
+ if (*end != '\0')
+ return false;
+ Major = num / 10000;
+ num = num % 10000;
+ Minor = num / 100;
+ Micro = num % 100;
+ return true;
}
void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
@@ -396,6 +488,27 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ);
Arg *iOSSimVersion = Args.getLastArg(
options::OPT_mios_simulator_version_min_EQ);
+
+ // FIXME: HACK! When compiling for the simulator we don't get a
+ // '-miphoneos-version-min' to help us know whether there is an ARC runtime
+ // or not; try to parse a __IPHONE_OS_VERSION_MIN_REQUIRED
+ // define passed in command-line.
+ if (!iOSVersion) {
+ for (arg_iterator it = Args.filtered_begin(options::OPT_D),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ llvm::StringRef define = (*it)->getValue(Args);
+ if (define.startswith(SimulatorVersionDefineName())) {
+ unsigned Major, Minor, Micro;
+ if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
+ Major < 10 && Minor < 100 && Micro < 100) {
+ ARCRuntimeForSimulator = Major < 5 ? ARCSimulator_NoARCRuntime
+ : ARCSimulator_HasARCRuntime;
+ }
+ break;
+ }
+ }
+ }
+
if (OSXVersion && (iOSVersion || iOSSimVersion)) {
getDriver().Diag(clang::diag::err_drv_argument_not_allowed_with)
<< OSXVersion->getAsString(Args)
@@ -587,8 +700,13 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
Arg *A = *it;
if (A->getOption().matches(options::OPT_Xarch__)) {
+ // Skip this argument unless the architecture matches either the toolchain
+ // triple arch, or the arch being bound.
+ //
// FIXME: Canonicalize name.
- if (getArchName() != A->getValue(Args, 0))
+ llvm::StringRef XarchArch = A->getValue(Args, 0);
+ if (!(XarchArch == getArchName() ||
+ (BoundArch && XarchArch == BoundArch)))
continue;
Arg *OriginalArg = A;
@@ -1359,7 +1477,7 @@ static std::string findGCCBaseLibDir(const std::string &GccTriple) {
ret.append(Version);
return ret;
}
- static const char* GccVersions[] = {"4.6.0", "4.6",
+ static const char* GccVersions[] = {"4.6.1", "4.6.0", "4.6",
"4.5.2", "4.5.1", "4.5",
"4.4.5", "4.4.4", "4.4.3", "4.4",
"4.3.4", "4.3.3", "4.3.2", "4.3",
@@ -1421,6 +1539,9 @@ Linux::Linux(const HostInfo &Host, const llvm::Triple &Triple)
else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-pc-linux-gnu",
Exists) && Exists)
GccTriple = "x86_64-pc-linux-gnu";
+ else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-redhat-linux6E",
+ Exists) && Exists)
+ GccTriple = "x86_64-redhat-linux6E";
else if (!llvm::sys::fs::exists("/usr/lib/gcc/x86_64-redhat-linux",
Exists) && Exists)
GccTriple = "x86_64-redhat-linux";
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index ace9b847919f..d68016b3a240 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -57,6 +57,16 @@ private:
// the argument translation business.
mutable bool TargetInitialized;
+ // FIXME: Remove this once there is a proper way to detect an ARC runtime
+ // for the simulator.
+ public:
+ mutable enum {
+ ARCSimulator_None,
+ ARCSimulator_HasARCRuntime,
+ ARCSimulator_NoARCRuntime
+ } ARCRuntimeForSimulator;
+
+private:
/// Whether we are targeting iPhoneOS target.
mutable bool TargetIsIPhoneOS;
@@ -70,6 +80,8 @@ private:
/// initialized.
std::string MacosxVersionMin;
+ bool hasARCRuntime() const;
+
private:
void AddDeploymentTarget(DerivedArgList &Args) const;
@@ -157,11 +169,15 @@ public:
virtual void AddLinkSearchPathArgs(const ArgList &Args,
ArgStringList &CmdArgs) const = 0;
+ /// AddLinkARCArgs - Add the linker arguments to link the ARC runtime library.
+ virtual void AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const = 0;
+
/// AddLinkRuntimeLibArgs - Add the linker arguments to link the compiler
/// runtime library.
virtual void AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const = 0;
-
+
/// }
/// @name ToolChain Implementation
/// {
@@ -170,6 +186,8 @@ public:
virtual bool HasNativeLLVMSupport() const;
+ virtual void configureObjCRuntime(ObjCRuntime &runtime) const;
+
virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
const char *BoundArch) const;
@@ -250,13 +268,17 @@ public:
virtual void AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const;
-
+ void AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
+ const char *DarwinStaticLib) const;
+
virtual void AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const;
virtual void AddCCKextLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const;
+ virtual void AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
/// }
};
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 8cd7adc42e84..eb17609d6c34 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -17,6 +17,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/HostInfo.h"
+#include "clang/Driver/ObjCRuntime.h"
#include "clang/Driver/Option.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
@@ -30,6 +31,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/ErrorHandling.h"
#include "InputInfo.h"
#include "ToolChains.h"
@@ -147,20 +149,37 @@ static void AddLinkerInputs(const ToolChain &TC,
}
}
+/// \brief Determine whether Objective-C automated reference counting is
+/// enabled.
+static bool isObjCAutoRefCount(const ArgList &Args) {
+ return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
+}
+
static void addProfileRT(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- if (Args.hasArg(options::OPT_fprofile_arcs) ||
- Args.hasArg(options::OPT_fprofile_generate) ||
- Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_coverage)) {
- // GCC links libgcov.a by adding -L<inst>/gcc/lib/gcc/<triple>/<ver> -lgcov
- // to the link line. We cannot do the same thing because unlike gcov
- // there is a libprofile_rt.so. We used to use the -l:libprofile_rt.a
- // syntax, but that is not supported by old linkers.
- const char *lib = Args.MakeArgString(TC.getDriver().Dir + "/../lib/" +
- "libprofile_rt.a");
- CmdArgs.push_back(lib);
+ ArgStringList &CmdArgs,
+ llvm::Triple Triple) {
+ if (!(Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage)))
+ return;
+
+ // GCC links libgcov.a by adding -L<inst>/gcc/lib/gcc/<triple>/<ver> -lgcov to
+ // the link line. We cannot do the same thing because unlike gcov there is a
+ // libprofile_rt.so. We used to use the -l:libprofile_rt.a syntax, but that is
+ // not supported by old linkers.
+ llvm::Twine ProfileRT =
+ llvm::Twine(TC.getDriver().Dir) + "/../lib/" + "libprofile_rt.a";
+
+ if (Triple.getOS() == llvm::Triple::Darwin) {
+ // On Darwin, if the static library doesn't exist try the dylib.
+ bool Exists;
+ if (llvm::sys::fs::exists(ProfileRT.str(), Exists) || !Exists)
+ ProfileRT =
+ llvm::Twine(TC.getDriver().Dir) + "/../lib/" + "libprofile_rt.dylib";
}
+
+ CmdArgs.push_back(Args.MakeArgString(ProfileRT));
}
void Clang::AddPreprocessingOptions(const Driver &D,
@@ -223,6 +242,13 @@ void Clang::AddPreprocessingOptions(const Driver &D,
CmdArgs.push_back("-sys-header-deps");
}
+ if (Args.hasArg(options::OPT_MG)) {
+ if (!A || A->getOption().matches(options::OPT_MD) ||
+ A->getOption().matches(options::OPT_MMD))
+ D.Diag(clang::diag::err_drv_mg_requires_m_or_mm);
+ CmdArgs.push_back("-MG");
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_MP);
// Convert all -MQ <target> args to -MT <quoted target>
@@ -318,8 +344,13 @@ void Clang::AddPreprocessingOptions(const Driver &D,
// Add C++ include arguments, if needed.
types::ID InputType = Inputs[0].getType();
- if (types::isCXX(InputType))
- getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+ if (types::isCXX(InputType)) {
+ bool ObjCXXAutoRefCount
+ = types::isObjC(InputType) && isObjCAutoRefCount(Args);
+ getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs,
+ ObjCXXAutoRefCount);
+ Args.AddAllArgs(CmdArgs, options::OPT_stdlib_EQ);
+ }
// Add -Wp, and -Xassembler if using the preprocessor.
@@ -737,7 +768,6 @@ void Clang::AddSparcTargetArgs(const ArgList &Args,
//
// FIXME: This changes CPP defines, we need -target-soft-float.
CmdArgs.push_back("-msoft-float");
- CmdArgs.push_back("soft");
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+soft-float");
} else {
@@ -816,6 +846,14 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
CmdArgs.push_back(CPUName);
}
+ // The required algorithm here is slightly strange: the options are applied
+ // in order (so -mno-sse -msse2 disables SSE3), but any option that gets
+ // directly overridden later is ignored (so "-mno-sse -msse2 -mno-sse2 -msse"
+ // is equivalent to "-mno-sse2 -msse"). The -cc1 handling deals with the
+ // former correctly, but not the latter; handle directly-overridden
+ // attributes here.
+ llvm::StringMap<unsigned> PrevFeature;
+ std::vector<const char*> Features;
for (arg_iterator it = Args.filtered_begin(options::OPT_m_x86_Features_Group),
ie = Args.filtered_end(); it != ie; ++it) {
llvm::StringRef Name = (*it)->getOption().getName();
@@ -829,25 +867,34 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
if (IsNegative)
Name = Name.substr(3);
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ unsigned& Prev = PrevFeature[Name];
+ if (Prev)
+ Features[Prev - 1] = 0;
+ Prev = Features.size() + 1;
+ Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ }
+ for (unsigned i = 0; i < Features.size(); i++) {
+ if (Features[i]) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back(Features[i]);
+ }
}
}
static bool
-shouldUseExceptionTablesForObjCExceptions(const ArgList &Args,
+shouldUseExceptionTablesForObjCExceptions(unsigned objcABIVersion,
const llvm::Triple &Triple) {
// We use the zero-cost exception tables for Objective-C if the non-fragile
// ABI is enabled or when compiling for x86_64 and ARM on Snow Leopard and
// later.
- if (Args.hasArg(options::OPT_fobjc_nonfragile_abi))
+ if (objcABIVersion >= 2)
return true;
if (Triple.getOS() != llvm::Triple::Darwin)
return false;
- return (Triple.getDarwinMajorNumber() >= 9 &&
+ return (!Triple.isMacOSXVersionLT(10,5) &&
(Triple.getArch() == llvm::Triple::x86_64 ||
Triple.getArch() == llvm::Triple::arm));
}
@@ -860,6 +907,7 @@ shouldUseExceptionTablesForObjCExceptions(const ArgList &Args,
static void addExceptionArgs(const ArgList &Args, types::ID InputType,
const llvm::Triple &Triple,
bool KernelOrKext, bool IsRewriter,
+ unsigned objcABIVersion,
ArgStringList &CmdArgs) {
if (KernelOrKext)
return;
@@ -896,7 +944,7 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-fobjc-exceptions");
ShouldUseExceptionTables |=
- shouldUseExceptionTablesForObjCExceptions(Args, Triple);
+ shouldUseExceptionTablesForObjCExceptions(objcABIVersion, Triple);
}
if (types::isCXX(InputType)) {
@@ -1040,6 +1088,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else if (Value == "--fatal-warnings") {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-fatal-assembler-warnings");
+ } else if (Value == "--noexecstack") {
+ CmdArgs.push_back("-mnoexecstack");
} else {
D.Diag(clang::diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
@@ -1311,8 +1361,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
types::ID InputType = Inputs[0].getType();
if (!Args.hasArg(options::OPT_fallow_unsupported)) {
Arg *Unsupported;
- if ((Unsupported = Args.getLastArg(options::OPT_MG)) ||
- (Unsupported = Args.getLastArg(options::OPT_iframework)))
+ if ((Unsupported = Args.getLastArg(options::OPT_iframework)))
D.Diag(clang::diag::err_drv_clang_unsupported)
<< Unsupported->getOption().getName();
@@ -1378,6 +1427,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_working_directory);
+ if (!Args.hasArg(options::OPT_fno_objc_arc)) {
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_arcmt_check,
+ options::OPT_ccc_arcmt_modify,
+ options::OPT_ccc_arcmt_migrate)) {
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("missed a case");
+ case options::OPT_ccc_arcmt_check:
+ CmdArgs.push_back("-arcmt-check");
+ break;
+ case options::OPT_ccc_arcmt_modify:
+ CmdArgs.push_back("-arcmt-modify");
+ break;
+ case options::OPT_ccc_arcmt_migrate:
+ CmdArgs.push_back("-arcmt-migrate");
+ CmdArgs.push_back("-arcmt-migrate-directory");
+ CmdArgs.push_back(A->getValue(Args));
+ break;
+ }
+ }
+ }
+
// Add preprocessing options like -I, -D, etc. if we are using the
// preprocessor.
//
@@ -1542,27 +1613,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_lax_vector_conversions))
CmdArgs.push_back("-fno-lax-vector-conversions");
- // -fobjc-infer-related-result-type is the default.
- if (Args.hasFlag(options::OPT_fobjc_infer_related_result_type,
- options::OPT_fno_objc_infer_related_result_type,
- /*Default=*/true))
- CmdArgs.push_back("-fobjc-infer-related-result-type");
-
- // Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
- // takes precedence.
- const Arg *GCArg = Args.getLastArg(options::OPT_fobjc_gc_only);
- if (!GCArg)
- GCArg = Args.getLastArg(options::OPT_fobjc_gc);
- if (GCArg) {
- if (getToolChain().SupportsObjCGC()) {
- GCArg->render(Args, CmdArgs);
- } else {
- // FIXME: We should move this to a hard error.
- D.Diag(clang::diag::warn_drv_objc_gc_unsupported)
- << GCArg->getAsString(Args);
- }
- }
-
if (Args.getLastArg(options::OPT_fapple_kext))
CmdArgs.push_back("-fapple-kext");
@@ -1660,13 +1710,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fno-elide-constructors");
- // Add exception args.
- addExceptionArgs(Args, InputType, getToolChain().getTriple(),
- KernelOrKext, IsRewriter, CmdArgs);
-
- if (getToolChain().UseSjLjExceptions())
- CmdArgs.push_back("-fsjlj-exceptions");
-
// -frtti is default.
if (KernelOrKext ||
!Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti))
@@ -1734,30 +1777,41 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fgnu89-inline");
- // -fnext-runtime defaults to on Darwin and when rewriting Objective-C, and is
- // -the -cc1 default.
- bool NeXTRuntimeIsDefault =
- IsRewriter || getToolChain().getTriple().getOS() == llvm::Triple::Darwin;
- if (!Args.hasFlag(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
- NeXTRuntimeIsDefault))
- CmdArgs.push_back("-fgnu-runtime");
-
// -fobjc-nonfragile-abi=0 is default.
+ ObjCRuntime objCRuntime;
+ unsigned objcABIVersion = 0;
if (types::isObjC(InputType)) {
+ bool NeXTRuntimeIsDefault
+ = (IsRewriter || getToolChain().getTriple().isOSDarwin());
+ if (Args.hasFlag(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
+ NeXTRuntimeIsDefault)) {
+ objCRuntime.setKind(ObjCRuntime::NeXT);
+ } else {
+ CmdArgs.push_back("-fgnu-runtime");
+ objCRuntime.setKind(ObjCRuntime::GNU);
+ }
+ getToolChain().configureObjCRuntime(objCRuntime);
+ if (objCRuntime.HasARC)
+ CmdArgs.push_back("-fobjc-runtime-has-arc");
+ if (objCRuntime.HasWeak)
+ CmdArgs.push_back("-fobjc-runtime-has-weak");
+ if (objCRuntime.HasTerminate)
+ CmdArgs.push_back("-fobjc-runtime-has-terminate");
+
// Compute the Objective-C ABI "version" to use. Version numbers are
// slightly confusing for historical reasons:
// 1 - Traditional "fragile" ABI
// 2 - Non-fragile ABI, version 1
// 3 - Non-fragile ABI, version 2
- unsigned Version = 1;
+ objcABIVersion = 1;
// If -fobjc-abi-version= is present, use that to set the version.
if (Arg *A = Args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
if (llvm::StringRef(A->getValue(Args)) == "1")
- Version = 1;
+ objcABIVersion = 1;
else if (llvm::StringRef(A->getValue(Args)) == "2")
- Version = 2;
+ objcABIVersion = 2;
else if (llvm::StringRef(A->getValue(Args)) == "3")
- Version = 3;
+ objcABIVersion = 3;
else
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
} else {
@@ -1783,13 +1837,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args);
}
- Version = 1 + NonFragileABIVersion;
+ objcABIVersion = 1 + NonFragileABIVersion;
} else {
- Version = 1;
+ objcABIVersion = 1;
}
}
- if (Version == 2 || Version == 3) {
+ if (objcABIVersion == 2 || objcABIVersion == 3) {
CmdArgs.push_back("-fobjc-nonfragile-abi");
// -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
@@ -1818,6 +1872,51 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
#endif
}
+ // Allow -fno-objc-arr to trump -fobjc-arr/-fobjc-arc.
+ // NOTE: This logic is duplicated in ToolChains.cpp.
+ bool ARC = isObjCAutoRefCount(Args);
+ if (ARC) {
+ CmdArgs.push_back("-fobjc-arc");
+
+ // Allow the user to enable full exceptions code emission.
+ // We define off for Objective-CC, on for Objective-C++.
+ if (Args.hasFlag(options::OPT_fobjc_arc_exceptions,
+ options::OPT_fno_objc_arc_exceptions,
+ /*default*/ types::isCXX(InputType)))
+ CmdArgs.push_back("-fobjc-arc-exceptions");
+ }
+
+ // -fobjc-infer-related-result-type is the default, except in the Objective-C
+ // rewriter.
+ if (IsRewriter)
+ CmdArgs.push_back("-fno-objc-infer-related-result-type");
+
+ // Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
+ // takes precedence.
+ const Arg *GCArg = Args.getLastArg(options::OPT_fobjc_gc_only);
+ if (!GCArg)
+ GCArg = Args.getLastArg(options::OPT_fobjc_gc);
+ if (GCArg) {
+ if (ARC) {
+ D.Diag(clang::diag::err_drv_objc_gc_arr)
+ << GCArg->getAsString(Args);
+ } else if (getToolChain().SupportsObjCGC()) {
+ GCArg->render(Args, CmdArgs);
+ } else {
+ // FIXME: We should move this to a hard error.
+ D.Diag(clang::diag::warn_drv_objc_gc_unsupported)
+ << GCArg->getAsString(Args);
+ }
+ }
+
+ // Add exception args.
+ addExceptionArgs(Args, InputType, getToolChain().getTriple(),
+ KernelOrKext, IsRewriter, objcABIVersion, CmdArgs);
+
+ if (getToolChain().UseSjLjExceptions())
+ CmdArgs.push_back("-fsjlj-exceptions");
+
+ // C++ "sane" operator new.
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
options::OPT_fno_assume_sane_operator_new))
CmdArgs.push_back("-fno-assume-sane-operator-new");
@@ -2858,6 +2957,17 @@ void darwin::Link::AddLinkArgs(Compilation &C,
CmdArgs.push_back("-demangle");
}
+ // If we are using LTO, then automatically create a temporary file path for
+ // the linker to use, so that it's lifetime will extend past a possible
+ // dsymutil step.
+ if (Version[0] >= 116 && D.IsUsingLTO(Args)) {
+ const char *TmpPath = C.getArgs().MakeArgString(
+ D.GetTemporaryPath(types::getTypeTempSuffix(types::TY_Object)));
+ C.addTempFile(TmpPath);
+ CmdArgs.push_back("-object_path_lto");
+ CmdArgs.push_back(TmpPath);
+ }
+
// Derived from the "link" spec.
Args.AddAllArgs(CmdArgs, options::OPT_static);
if (!Args.hasArg(options::OPT_static))
@@ -2921,6 +3031,7 @@ void darwin::Link::AddLinkArgs(Compilation &C,
Args.AddLastArg(CmdArgs, options::OPT_dynamic);
Args.AddAllArgs(CmdArgs, options::OPT_exported__symbols__list);
Args.AddLastArg(CmdArgs, options::OPT_flat__namespace);
+ Args.AddAllArgs(CmdArgs, options::OPT_force__load);
Args.AddAllArgs(CmdArgs, options::OPT_headerpad__max__install__names);
Args.AddAllArgs(CmdArgs, options::OPT_image__base);
Args.AddAllArgs(CmdArgs, options::OPT_init);
@@ -3142,6 +3253,16 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
getDarwinToolChain().AddLinkSearchPathArgs(Args, CmdArgs);
+ // In ARC, if we don't have runtime support, link in the runtime
+ // stubs. We have to do this *before* adding any of the normal
+ // linker inputs so that its initializer gets run first.
+ if (isObjCAutoRefCount(Args)) {
+ ObjCRuntime runtime;
+ getDarwinToolChain().configureObjCRuntime(runtime);
+ if (!runtime.HasARC)
+ getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
+ }
+
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
if (LinkingOutput) {
@@ -3170,7 +3291,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
// endfile_spec is empty.
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
Args.AddAllArgs(CmdArgs, options::OPT_F);
@@ -3329,7 +3450,7 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().GetFilePath("crtend.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("ld"));
@@ -3637,7 +3758,7 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
"crtn.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("ld"));
@@ -3792,7 +3913,7 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
"crtn.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
const char *Exec = Args.MakeArgString(FindTargetProgramPath(getToolChain(),
ToolTriple.getTriple(),
@@ -4016,7 +4137,7 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
if (Args.hasArg(options::OPT_use_gold_plugin)) {
CmdArgs.push_back("-plugin");
@@ -4100,7 +4221,7 @@ void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
"/usr/gnu/lib/libend.a")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("/usr/gnu/bin/gld"));
@@ -4257,7 +4378,7 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().GetFilePath("crtn.o")));
}
- addProfileRT(getToolChain(), Args, CmdArgs);
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("ld"));
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 4a5a7e474526..1741d051ce9f 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -360,7 +360,7 @@ namespace netbsd {
public:
Link(const ToolChain &TC, const llvm::Triple &ToolTriple)
- : Tool("netbsd::Ling", "linker", TC), ToolTriple(ToolTriple) {}
+ : Tool("netbsd::Link", "linker", TC), ToolTriple(ToolTriple) {}
virtual bool hasIntegratedCPP() const { return false; }
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 8827116f2e40..5b0a52c65b84 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -102,7 +102,7 @@ ASTUnit::ASTUnit(bool _MainFileIsAST)
ConcurrencyCheckValue(CheckUnlocked),
PreambleRebuildCounter(0), SavedMainFileBuffer(0), PreambleBuffer(0),
ShouldCacheCodeCompletionResults(false),
- NestedMacroInstantiations(true),
+ NestedMacroExpansions(true),
CompletionCacheTopLevelHashValue(0),
PreambleTopLevelHashValue(0),
CurrentTopLevelHashValue(0),
@@ -182,6 +182,10 @@ static unsigned getDeclShowContexts(NamedDecl *ND,
// all types are available due to functional casts.
if (LangOpts.CPlusPlus || isa<ObjCInterfaceDecl>(ND))
Contexts |= (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
+
+ // In Objective-C, you can only be a subclass of another Objective-C class
+ if (isa<ObjCInterfaceDecl>(ND))
+ Contexts |= (1 << (CodeCompletionContext::CCC_ObjCSuperclass - 1));
// Deal with tag names.
if (isa<EnumDecl>(ND)) {
@@ -208,6 +212,8 @@ static unsigned getDeclShowContexts(NamedDecl *ND,
| (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
} else if (isa<ObjCProtocolDecl>(ND)) {
Contexts = (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1));
+ } else if (isa<ObjCCategoryDecl>(ND)) {
+ Contexts = (1 << (CodeCompletionContext::CCC_ObjCCategoryName - 1));
} else if (isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) {
Contexts = (1 << (CodeCompletionContext::CCC_Namespace - 1));
@@ -928,8 +934,8 @@ bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
// If the main file has been overridden due to the use of a preamble,
// make that override happen and introduce the preamble.
PreprocessorOptions &PreprocessorOpts = Clang->getPreprocessorOpts();
- PreprocessorOpts.DetailedRecordIncludesNestedMacroInstantiations
- = NestedMacroInstantiations;
+ PreprocessorOpts.DetailedRecordIncludesNestedMacroExpansions
+ = NestedMacroExpansions;
std::string PriorImplicitPCHInclude;
if (OverrideMainBuffer) {
PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
@@ -1150,16 +1156,19 @@ static llvm::MemoryBuffer *CreatePaddedMainFileBuffer(llvm::MemoryBuffer *Old,
/// buffer that should be used in place of the main file when doing so.
/// Otherwise, returns a NULL pointer.
llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
- CompilerInvocation PreambleInvocation,
+ const CompilerInvocation &PreambleInvocationIn,
bool AllowRebuild,
unsigned MaxLines) {
- FrontendOptions &FrontendOpts = PreambleInvocation.getFrontendOpts();
+
+ llvm::IntrusiveRefCntPtr<CompilerInvocation>
+ PreambleInvocation(new CompilerInvocation(PreambleInvocationIn));
+ FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
PreprocessorOptions &PreprocessorOpts
- = PreambleInvocation.getPreprocessorOpts();
+ = PreambleInvocation->getPreprocessorOpts();
bool CreatedPreambleBuffer = false;
std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> > NewPreamble
- = ComputePreamble(PreambleInvocation, MaxLines, CreatedPreambleBuffer);
+ = ComputePreamble(*PreambleInvocation, MaxLines, CreatedPreambleBuffer);
// If ComputePreamble() Take ownership of the
llvm::OwningPtr<llvm::MemoryBuffer> OwnedPreambleBuffer;
@@ -1260,7 +1269,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
// have occurred in the preamble.
getDiagnostics().Reset();
ProcessWarningOptions(getDiagnostics(),
- PreambleInvocation.getDiagnosticOpts());
+ PreambleInvocation->getDiagnosticOpts());
getDiagnostics().setNumWarnings(NumWarningsInPreamble);
if (StoredDiagnostics.size() > NumStoredDiagnosticsInPreamble)
StoredDiagnostics.erase(
@@ -1357,7 +1366,7 @@ llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
- Clang->setInvocation(&PreambleInvocation);
+ Clang->setInvocation(&*PreambleInvocation);
OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].second;
// Set up diagnostics, capturing all of the diagnostics produced.
@@ -1716,7 +1725,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
bool PrecompilePreamble,
bool CompleteTranslationUnit,
bool CacheCodeCompletionResults,
- bool NestedMacroInstantiations) {
+ bool NestedMacroExpansions) {
// Create the AST unit.
llvm::OwningPtr<ASTUnit> AST;
AST.reset(new ASTUnit(false));
@@ -1727,7 +1736,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
AST->CompleteTranslationUnit = CompleteTranslationUnit;
AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
AST->Invocation = CI;
- AST->NestedMacroInstantiations = NestedMacroInstantiations;
+ AST->NestedMacroExpansions = NestedMacroExpansions;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -1753,7 +1762,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
bool CacheCodeCompletionResults,
bool CXXPrecompilePreamble,
bool CXXChainedPCH,
- bool NestedMacroInstantiations) {
+ bool NestedMacroExpansions) {
if (!Diags.getPtr()) {
// No diagnostics engine was provided, so create our own diagnostics object
// with the default options.
@@ -1820,7 +1829,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
AST->NumStoredDiagnosticsInPreamble = StoredDiagnostics.size();
AST->StoredDiagnostics.swap(StoredDiagnostics);
AST->Invocation = CI;
- AST->NestedMacroInstantiations = NestedMacroInstantiations;
+ AST->NestedMacroExpansions = NestedMacroExpansions;
// Recover resources if we crash before exiting this method.
llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
@@ -1899,7 +1908,7 @@ namespace {
/// results from an ASTUnit with the code-completion results provided to it,
/// then passes the result on to
class AugmentedCodeCompleteConsumer : public CodeCompleteConsumer {
- unsigned NormalContexts;
+ unsigned long long NormalContexts;
ASTUnit &AST;
CodeCompleteConsumer &Next;
@@ -1913,22 +1922,24 @@ namespace {
// Compute the set of contexts in which we will look when we don't have
// any information about the specific context.
NormalContexts
- = (1 << (CodeCompletionContext::CCC_TopLevel - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCInterface - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCImplementation - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
- | (1 << (CodeCompletionContext::CCC_Statement - 1))
- | (1 << (CodeCompletionContext::CCC_Expression - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
- | (1 << (CodeCompletionContext::CCC_MemberAccess - 1))
- | (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1))
- | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
- | (1 << (CodeCompletionContext::CCC_Recovery - 1));
+ = (1LL << (CodeCompletionContext::CCC_TopLevel - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCInterface - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCImplementation - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCIvarList - 1))
+ | (1LL << (CodeCompletionContext::CCC_Statement - 1))
+ | (1LL << (CodeCompletionContext::CCC_Expression - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
+ | (1LL << (CodeCompletionContext::CCC_DotMemberAccess - 1))
+ | (1LL << (CodeCompletionContext::CCC_ArrowMemberAccess - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCPropertyAccess - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCProtocolName - 1))
+ | (1LL << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
+ | (1LL << (CodeCompletionContext::CCC_Recovery - 1));
if (AST.getASTContext().getLangOptions().CPlusPlus)
- NormalContexts |= (1 << (CodeCompletionContext::CCC_EnumTag - 1))
- | (1 << (CodeCompletionContext::CCC_UnionTag - 1))
- | (1 << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
+ NormalContexts |= (1LL << (CodeCompletionContext::CCC_EnumTag - 1))
+ | (1LL << (CodeCompletionContext::CCC_UnionTag - 1))
+ | (1LL << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
}
virtual void ProcessCodeCompleteResults(Sema &S,
@@ -1966,12 +1977,15 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_Statement:
case CodeCompletionContext::CCC_Expression:
case CodeCompletionContext::CCC_ObjCMessageReceiver:
- case CodeCompletionContext::CCC_MemberAccess:
+ case CodeCompletionContext::CCC_DotMemberAccess:
+ case CodeCompletionContext::CCC_ArrowMemberAccess:
+ case CodeCompletionContext::CCC_ObjCPropertyAccess:
case CodeCompletionContext::CCC_Namespace:
case CodeCompletionContext::CCC_Type:
case CodeCompletionContext::CCC_Name:
case CodeCompletionContext::CCC_PotentiallyQualifiedName:
case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_ObjCSuperclass:
break;
case CodeCompletionContext::CCC_EnumTag:
@@ -1990,6 +2004,9 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_TypeQualifiers:
case CodeCompletionContext::CCC_Other:
case CodeCompletionContext::CCC_OtherWithMacros:
+ case CodeCompletionContext::CCC_ObjCInstanceMessage:
+ case CodeCompletionContext::CCC_ObjCClassMessage:
+ case CodeCompletionContext::CCC_ObjCCategoryName:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
@@ -2284,9 +2301,9 @@ void ASTUnit::CodeComplete(llvm::StringRef File, unsigned Line, unsigned Column,
}
}
-bool ASTUnit::Save(llvm::StringRef File) {
- if (getDiagnostics().hasErrorOccurred())
- return true;
+CXSaveError ASTUnit::Save(llvm::StringRef File) {
+ if (getDiagnostics().hasUnrecoverableErrorOccurred())
+ return CXSaveError_TranslationErrors;
// FIXME: Can we somehow regenerate the stat cache here, or do we need to
// unconditionally create a stat cache when we parse the file?
@@ -2294,11 +2311,11 @@ bool ASTUnit::Save(llvm::StringRef File) {
llvm::raw_fd_ostream Out(File.str().c_str(), ErrorInfo,
llvm::raw_fd_ostream::F_Binary);
if (!ErrorInfo.empty() || Out.has_error())
- return true;
+ return CXSaveError_Unknown;
serialize(Out);
Out.close();
- return Out.has_error();
+ return Out.has_error()? CXSaveError_Unknown : CXSaveError_None;
}
bool ASTUnit::serialize(llvm::raw_ostream &OS) {
diff --git a/lib/Frontend/BoostConAction.cpp b/lib/Frontend/BoostConAction.cpp
deleted file mode 100644
index 4a12ff2ebc95..000000000000
--- a/lib/Frontend/BoostConAction.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-//===-- BoostConAction.cpp - BoostCon Workshop Action -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-#include "clang/Frontend/FrontendActions.h"
-#include "clang/AST/ASTConsumer.h"
-#include "clang/AST/RecursiveASTVisitor.h"
-#include <cstdio>
-#include <iostream>
-using namespace clang;
-
-namespace {
- class BoostConASTConsumer : public ASTConsumer,
- public RecursiveASTVisitor<BoostConASTConsumer> {
- public:
- /// HandleTranslationUnit - This method is called when the ASTs for entire
- /// translation unit have been parsed.
- virtual void HandleTranslationUnit(ASTContext &Ctx);
-
- bool VisitCXXRecordDecl(CXXRecordDecl *D) {
- std::cout << D->getNameAsString() << std::endl;
- return true;
- }
- };
-}
-
-ASTConsumer *BoostConAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- return new BoostConASTConsumer();
-}
-
-void BoostConASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
- fprintf(stderr, "Welcome to BoostCon!\n");
- TraverseDecl(Ctx.getTranslationUnitDecl());
-}
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 7dcbebff39b8..556b1333c19b 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -12,7 +12,6 @@ add_clang_library(clangFrontend
ASTConsumers.cpp
ASTMerge.cpp
ASTUnit.cpp
- BoostConAction.cpp
CacheTokens.cpp
CompilerInstance.cpp
CompilerInvocation.cpp
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 06a1fd29838b..20b51893fcc2 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -247,17 +247,16 @@ void PTHWriter::EmitToken(const Token& T) {
} else {
// We cache *un-cleaned* spellings. This gives us 100% fidelity with the
// source code.
- const char* s = T.getLiteralData();
- unsigned len = T.getLength();
+ llvm::StringRef s(T.getLiteralData(), T.getLength());
// Get the string entry.
- llvm::StringMapEntry<OffsetOpt> *E = &CachedStrs.GetOrCreateValue(s, s+len);
+ llvm::StringMapEntry<OffsetOpt> *E = &CachedStrs.GetOrCreateValue(s);
// If this is a new string entry, bump the PTH offset.
if (!E->getValue().hasOffset()) {
E->getValue().setOffset(CurStrOffset);
StrEntries.push_back(E);
- CurStrOffset += len + 1;
+ CurStrOffset += s.size() + 1;
}
// Emit the relative offset into the PTH file for the spelling string.
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 38fcfe3c4711..c58e3af5089b 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -38,6 +38,7 @@
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/system_error.h"
+#include "llvm/Config/config.h"
using namespace clang;
CompilerInstance::CompilerInstance()
@@ -228,7 +229,7 @@ CompilerInstance::createPreprocessor(Diagnostic &Diags,
if (PPOpts.DetailedRecord)
PP->createPreprocessingRecord(
- PPOpts.DetailedRecordIncludesNestedMacroInstantiations);
+ PPOpts.DetailedRecordIncludesNestedMacroExpansions);
InitializePreprocessor(*PP, PPOpts, HSOpts, FEOpts);
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 536512a121e9..06b72602fd18 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -123,6 +123,10 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-dwarf-debug-flags");
Res.push_back(Opts.DwarfDebugFlags);
}
+ if (Opts.ObjCRuntimeHasARC)
+ Res.push_back("-fobjc-runtime-has-arc");
+ if (Opts.ObjCRuntimeHasTerminate)
+ Res.push_back("-fobjc-runtime-has-terminate");
if (Opts.EmitGcovArcs)
Res.push_back("-femit-coverage-data");
if (Opts.EmitGcovNotes)
@@ -133,6 +137,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-fno-common");
if (Opts.ForbidGuardVariables)
Res.push_back("-fforbid-guard-variables");
+ if (Opts.UseRegisterSizedBitfieldAccess)
+ Res.push_back("-fuse-register-sized-bitfield-access");
if (Opts.NoImplicitFloat)
Res.push_back("-no-implicit-float");
if (Opts.OmitLeafFramePointer)
@@ -169,6 +175,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-fno-use-cxa-atexit");
if (Opts.CXXCtorDtorAliases)
Res.push_back("-mconstructor-aliases");
+ if (Opts.ObjCAutoRefCountExceptions)
+ Res.push_back("-fobjc-arc-eh");
if (!Opts.DebugPass.empty()) {
Res.push_back("-mdebug-pass");
Res.push_back(Opts.DebugPass);
@@ -199,6 +207,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-mregparm");
Res.push_back(llvm::utostr(Opts.NumRegisterParameters));
}
+ if (Opts.NoExecStack)
+ Res.push_back("-mnoexecstack");
if (Opts.RelaxAll)
Res.push_back("-mrelax-all");
if (Opts.SaveTempLabels)
@@ -351,7 +361,6 @@ static const char *getActionName(frontend::ActionKind Kind) {
case frontend::ASTDumpXML: return "-ast-dump-xml";
case frontend::ASTPrint: return "-ast-print";
case frontend::ASTView: return "-ast-view";
- case frontend::BoostCon: return "-boostcon";
case frontend::CreateModule: return "-create-module";
case frontend::DumpRawTokens: return "-dump-raw-tokens";
case frontend::DumpTokens: return "-dump-tokens";
@@ -413,6 +422,23 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
Res.push_back("-version");
if (Opts.FixWhatYouCan)
Res.push_back("-fix-what-you-can");
+ switch (Opts.ARCMTAction) {
+ case FrontendOptions::ARCMT_None:
+ break;
+ case FrontendOptions::ARCMT_Check:
+ Res.push_back("-arcmt-check");
+ break;
+ case FrontendOptions::ARCMT_Modify:
+ Res.push_back("-arcmt-modify");
+ break;
+ case FrontendOptions::ARCMT_Migrate:
+ Res.push_back("-arcmt-migrate");
+ break;
+ }
+ if (!Opts.ARCMTMigrateDir.empty()) {
+ Res.push_back("-arcmt-migrate-directory");
+ Res.push_back(Opts.ARCMTMigrateDir);
+ }
bool NeedLang = false;
for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i)
@@ -537,6 +563,8 @@ static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
Res.push_back("-nostdinc");
if (!Opts.UseStandardCXXIncludes)
Res.push_back("-nostdinc++");
+ if (Opts.UseLibcxx)
+ Res.push_back("-stdlib=libc++");
if (Opts.Verbose)
Res.push_back("-v");
}
@@ -670,8 +698,12 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-fobjc-gc-only");
}
}
- if (Opts.ObjCInferRelatedResultType)
- Res.push_back("-fobjc-infer-related-result-type");
+ if (Opts.ObjCAutoRefCount)
+ Res.push_back("-fobjc-arc");
+ if (Opts.ObjCRuntimeHasWeak)
+ Res.push_back("-fobjc-runtime-has-weak");
+ if (!Opts.ObjCInferRelatedResultType)
+ Res.push_back("-fno-objc-infer-related-result-type");
if (Opts.AppleKext)
Res.push_back("-fapple-kext");
@@ -705,6 +737,8 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-ffake-address-space-map");
if (Opts.ParseUnknownAnytype)
Res.push_back("-funknown-anytype");
+ if (Opts.DebuggerSupport)
+ Res.push_back("-fdebugger-support");
if (Opts.DelayedTemplateParsing)
Res.push_back("-fdelayed-template-parsing");
if (Opts.Deprecated)
@@ -938,6 +972,8 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
+ Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
+ OPT_fuse_register_sized_bitfield_access);
Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing);
Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
@@ -951,6 +987,9 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
(Opts.OptimizationLevel > 1 && !Opts.OptimizeSize);
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
+ Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
+ Opts.ObjCRuntimeHasARC = Args.hasArg(OPT_fobjc_runtime_has_arc);
+ Opts.ObjCRuntimeHasTerminate = Args.hasArg(OPT_fobjc_runtime_has_terminate);
Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
Opts.CodeModel = Args.getLastArgValue(OPT_mcode_model);
@@ -965,6 +1004,7 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
Opts.NumRegisterParameters = Args.getLastArgIntValue(OPT_mregparm, 0, Diags);
+ Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
@@ -1010,6 +1050,7 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
+ Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
}
static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
@@ -1117,8 +1158,6 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::ASTPrint; break;
case OPT_ast_view:
Opts.ProgramAction = frontend::ASTView; break;
- case OPT_boostcon:
- Opts.ProgramAction = frontend::BoostCon; break;
case OPT_dump_raw_tokens:
Opts.ProgramAction = frontend::DumpRawTokens; break;
case OPT_dump_tokens:
@@ -1220,6 +1259,26 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.FixWhatYouCan = Args.hasArg(OPT_fix_what_you_can);
Opts.Modules = Args.getAllArgValues(OPT_import_module);
+ Opts.ARCMTAction = FrontendOptions::ARCMT_None;
+ if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
+ OPT_arcmt_modify,
+ OPT_arcmt_migrate)) {
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("missed a case");
+ case OPT_arcmt_check:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Check;
+ break;
+ case OPT_arcmt_modify:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Modify;
+ break;
+ case OPT_arcmt_migrate:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Migrate;
+ break;
+ }
+ }
+ Opts.ARCMTMigrateDir = Args.getLastArgValue(OPT_arcmt_migrate_directory);
+
InputKind DashX = IK_None;
if (const Arg *A = Args.getLastArg(OPT_x)) {
DashX = llvm::StringSwitch<InputKind>(A->getValue(Args))
@@ -1291,13 +1350,15 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
Opts.UseStandardIncludes = !Args.hasArg(OPT_nostdinc);
Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
+ if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
+ Opts.UseLibcxx = (strcmp(A->getValue(Args), "libc++") == 0);
Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
// Add -I... and -F... options in order.
for (arg_iterator it = Args.filtered_begin(OPT_I, OPT_F),
ie = Args.filtered_end(); it != ie; ++it)
Opts.AddPath((*it)->getValue(Args), frontend::Angled, true,
- /*IsFramework=*/ (*it)->getOption().matches(OPT_F), true);
+ /*IsFramework=*/ (*it)->getOption().matches(OPT_F), false);
// Add -iprefix/-iwith-prefix/-iwithprefixbefore options.
llvm::StringRef Prefix = ""; // FIXME: This isn't the correct default prefix.
@@ -1309,24 +1370,24 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Prefix = A->getValue(Args);
else if (A->getOption().matches(OPT_iwithprefix))
Opts.AddPath(Prefix.str() + A->getValue(Args),
- frontend::System, false, false, true);
+ frontend::System, false, false, false);
else
Opts.AddPath(Prefix.str() + A->getValue(Args),
- frontend::Angled, false, false, true);
+ frontend::Angled, false, false, false);
}
for (arg_iterator it = Args.filtered_begin(OPT_idirafter),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::After, true, false, true);
+ Opts.AddPath((*it)->getValue(Args), frontend::After, true, false, false);
for (arg_iterator it = Args.filtered_begin(OPT_iquote),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath((*it)->getValue(Args), frontend::Quoted, true, false, true);
+ Opts.AddPath((*it)->getValue(Args), frontend::Quoted, true, false, false);
for (arg_iterator it = Args.filtered_begin(OPT_cxx_isystem, OPT_isystem,
OPT_iwithsysroot), ie = Args.filtered_end(); it != ie; ++it)
Opts.AddPath((*it)->getValue(Args),
((*it)->getOption().matches(OPT_cxx_isystem) ?
frontend::CXXSystem : frontend::System),
- true, false, (*it)->getOption().matches(OPT_iwithsysroot));
+ true, false, !(*it)->getOption().matches(OPT_iwithsysroot));
// FIXME: Need options for the various environment variables!
}
@@ -1480,17 +1541,27 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (Args.hasArg(OPT_fno_operator_names))
Opts.CXXOperatorNames = 0;
+ if (Opts.ObjC1) {
+ if (Args.hasArg(OPT_fobjc_gc_only))
+ Opts.setGCMode(LangOptions::GCOnly);
+ else if (Args.hasArg(OPT_fobjc_gc))
+ Opts.setGCMode(LangOptions::HybridGC);
+ else if (Args.hasArg(OPT_fobjc_arc)) {
+ Opts.ObjCAutoRefCount = 1;
+ if (!Args.hasArg(OPT_fobjc_nonfragile_abi))
+ Diags.Report(diag::err_arc_nonfragile_abi);
+ }
+
+ if (Args.hasArg(OPT_fobjc_runtime_has_weak))
+ Opts.ObjCRuntimeHasWeak = 1;
+
+ if (Args.hasArg(OPT_fno_objc_infer_related_result_type))
+ Opts.ObjCInferRelatedResultType = 0;
+ }
+
if (Args.hasArg(OPT_fgnu89_inline))
Opts.GNUInline = 1;
- if (Args.hasArg(OPT_fobjc_gc_only))
- Opts.setGCMode(LangOptions::GCOnly);
- else if (Args.hasArg(OPT_fobjc_gc))
- Opts.setGCMode(LangOptions::HybridGC);
-
- if (Args.hasArg(OPT_fobjc_infer_related_result_type))
- Opts.ObjCInferRelatedResultType = 1;
-
if (Args.hasArg(OPT_fapple_kext)) {
if (!Opts.CPlusPlus)
Diags.Report(diag::warn_c_kext);
@@ -1598,6 +1669,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.MRTD = Args.hasArg(OPT_mrtd);
Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
+ Opts.DebuggerSupport = Args.hasArg(OPT_fdebugger_support);
// Record whether the __DEPRECATED define was requested.
Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
@@ -1715,6 +1787,19 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.addRemappedFile(Split.first, Split.second);
}
+
+ if (Arg *A = Args.getLastArg(OPT_fobjc_arc_cxxlib_EQ)) {
+ llvm::StringRef Name = A->getValue(Args);
+ unsigned Library = llvm::StringSwitch<unsigned>(Name)
+ .Case("libc++", ARCXX_libcxx)
+ .Case("libstdc++", ARCXX_libstdcxx)
+ .Case("none", ARCXX_nolib)
+ .Default(~0U);
+ if (Library == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ else
+ Opts.ObjCXXARCStandardLibrary = (ObjCXXARCStandardLibraryKind)Library;
+ }
}
static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index 5c3a23128a14..1edd09b06c4c 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -17,9 +17,11 @@
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -33,9 +35,11 @@ class DependencyFileCallback : public PPCallbacks {
llvm::raw_ostream *OS;
bool IncludeSystemHeaders;
bool PhonyTarget;
+ bool AddMissingHeaderDeps;
private:
bool FileMatchesDepCriteria(const char *Filename,
SrcMgr::CharacteristicKind FileType);
+ void AddFilename(llvm::StringRef Filename);
void OutputDependencyFile();
public:
@@ -44,10 +48,19 @@ public:
const DependencyOutputOptions &Opts)
: PP(_PP), Targets(Opts.Targets), OS(_OS),
IncludeSystemHeaders(Opts.IncludeSystemHeaders),
- PhonyTarget(Opts.UsePhonyTargets) {}
+ PhonyTarget(Opts.UsePhonyTargets),
+ AddMissingHeaderDeps(Opts.AddMissingHeaderDeps) {}
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType);
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ llvm::StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ llvm::StringRef SearchPath,
+ llvm::StringRef RelativePath);
virtual void EndOfMainFile() {
OutputDependencyFile();
@@ -72,6 +85,16 @@ void clang::AttachDependencyFileGen(Preprocessor &PP,
return;
}
+ // Disable the "file not found" diagnostic if the -MG option was given.
+ // FIXME: Ideally this would live in the driver, but we don't have the ability
+ // to remap individual diagnostics there without creating a DiagGroup, in
+ // which case we would need to prevent the group name from showing up in
+ // diagnostics.
+ if (Opts.AddMissingHeaderDeps) {
+ PP.getDiagnostics().setDiagnosticMapping(diag::warn_pp_file_not_found,
+ diag::MAP_IGNORE, SourceLocation());
+ }
+
PP.addPPCallbacks(new DependencyFileCallback(&PP, OS, Opts));
}
@@ -103,14 +126,34 @@ void DependencyFileCallback::FileChanged(SourceLocation Loc,
SM.getFileEntryForID(SM.getFileID(SM.getInstantiationLoc(Loc)));
if (FE == 0) return;
- const char *Filename = FE->getName();
- if (!FileMatchesDepCriteria(Filename, FileType))
+ llvm::StringRef Filename = FE->getName();
+ if (!FileMatchesDepCriteria(Filename.data(), FileType))
return;
- // Remove leading "./"
- if (Filename[0] == '.' && Filename[1] == '/')
- Filename = &Filename[2];
+ // Remove leading "./" (or ".//" or "././" etc.)
+ while (Filename.size() > 2 && Filename[0] == '.' &&
+ llvm::sys::path::is_separator(Filename[1])) {
+ Filename = Filename.substr(1);
+ while (llvm::sys::path::is_separator(Filename[0]))
+ Filename = Filename.substr(1);
+ }
+
+ AddFilename(Filename);
+}
+
+void DependencyFileCallback::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ llvm::StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ llvm::StringRef SearchPath,
+ llvm::StringRef RelativePath) {
+ if (AddMissingHeaderDeps && !File)
+ AddFilename(FileName);
+}
+void DependencyFileCallback::AddFilename(llvm::StringRef Filename) {
if (FilesSet.insert(Filename))
Files.push_back(Filename);
}
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 42da44c2c720..0128d6ee0531 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -130,6 +130,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
setCurrentFile(Filename, InputKind);
setCompilerInstance(&CI);
+ if (!BeginInvocation(CI))
+ goto failure;
+
// AST files follow a very different path, since they share objects via the
// AST unit.
if (InputKind == IK_AST) {
@@ -381,3 +384,46 @@ PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
llvm_unreachable("Invalid CreateASTConsumer on preprocessor action!");
}
+
+ASTConsumer *WrapperFrontendAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ return WrappedAction->CreateASTConsumer(CI, InFile);
+}
+bool WrapperFrontendAction::BeginInvocation(CompilerInstance &CI) {
+ return WrappedAction->BeginInvocation(CI);
+}
+bool WrapperFrontendAction::BeginSourceFileAction(CompilerInstance &CI,
+ llvm::StringRef Filename) {
+ WrappedAction->setCurrentFile(getCurrentFile(), getCurrentFileKind());
+ WrappedAction->setCompilerInstance(&CI);
+ return WrappedAction->BeginSourceFileAction(CI, Filename);
+}
+void WrapperFrontendAction::ExecuteAction() {
+ WrappedAction->ExecuteAction();
+}
+void WrapperFrontendAction::EndSourceFileAction() {
+ WrappedAction->EndSourceFileAction();
+}
+
+bool WrapperFrontendAction::usesPreprocessorOnly() const {
+ return WrappedAction->usesPreprocessorOnly();
+}
+bool WrapperFrontendAction::usesCompleteTranslationUnit() {
+ return WrappedAction->usesCompleteTranslationUnit();
+}
+bool WrapperFrontendAction::hasPCHSupport() const {
+ return WrappedAction->hasPCHSupport();
+}
+bool WrapperFrontendAction::hasASTFileSupport() const {
+ return WrappedAction->hasASTFileSupport();
+}
+bool WrapperFrontendAction::hasIRSupport() const {
+ return WrappedAction->hasIRSupport();
+}
+bool WrapperFrontendAction::hasCodeCompletionSupport() const {
+ return WrappedAction->hasCodeCompletionSupport();
+}
+
+WrapperFrontendAction::WrapperFrontendAction(FrontendAction *WrappedAction)
+ : WrappedAction(WrappedAction) {}
+
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index ad931166b8fc..e11a415db194 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -45,7 +45,7 @@ class InitHeaderSearch {
std::vector<std::pair<IncludeDirGroup, DirectoryLookup> > IncludePath;
typedef std::vector<std::pair<IncludeDirGroup,
DirectoryLookup> >::const_iterator path_iterator;
- HeaderSearch& Headers;
+ HeaderSearch &Headers;
bool Verbose;
std::string IncludeSysroot;
bool IsNotEmptyOrRoot;
@@ -78,7 +78,8 @@ public:
/// AddMinGW64CXXPaths - Add the necessary paths to support
/// libstdc++ of x86_64-w64-mingw32 aka mingw-w64.
- void AddMinGW64CXXPaths(llvm::StringRef Base);
+ void AddMinGW64CXXPaths(llvm::StringRef Base,
+ llvm::StringRef Version);
/// AddDelimitedPaths - Add a list of paths delimited by the system PATH
/// separator. The processing follows that of the CPATH variable for gcc.
@@ -90,7 +91,8 @@ public:
// AddDefaultCPlusPlusIncludePaths - Add paths that should be searched when
// compiling c++.
- void AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple);
+ void AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
/// AddDefaultSystemIncludePaths - Adds the default system include paths so
/// that e.g. stdio.h is found.
@@ -103,7 +105,7 @@ public:
void Realize(const LangOptions &Lang);
};
-}
+} // end anonymous namespace.
void InitHeaderSearch::AddPath(const llvm::Twine &Path,
IncludeDirGroup Group, bool isCXXAware,
@@ -216,12 +218,16 @@ void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(llvm::StringRef Base,
CXXSystem, true, false, false);
}
-void InitHeaderSearch::AddMinGW64CXXPaths(llvm::StringRef Base) {
- AddPath(Base,
+void InitHeaderSearch::AddMinGW64CXXPaths(llvm::StringRef Base,
+ llvm::StringRef Version) {
+ // Assumes Base is HeaderSearchOpts' ResourceDir
+ AddPath(Base + "/../../../include/c++/" + Version,
+ CXXSystem, true, false, false);
+ AddPath(Base + "/../../../include/c++/" + Version + "/x86_64-w64-mingw32",
CXXSystem, true, false, false);
- AddPath(Base + "/x86_64-w64-mingw32",
+ AddPath(Base + "/../../../include/c++/" + Version + "/i686-w64-mingw32",
CXXSystem, true, false, false);
- AddPath(Base + "/backward",
+ AddPath(Base + "/../../../include/c++/" + Version + "/backward",
CXXSystem, true, false, false);
}
@@ -245,24 +251,23 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
DWORD valueSize = maxLength - 1;
long lResult;
bool returnValue = false;
+
if (strncmp(keyPath, "HKEY_CLASSES_ROOT\\", 18) == 0) {
hRootKey = HKEY_CLASSES_ROOT;
subKey = keyPath + 18;
- }
- else if (strncmp(keyPath, "HKEY_USERS\\", 11) == 0) {
+ } else if (strncmp(keyPath, "HKEY_USERS\\", 11) == 0) {
hRootKey = HKEY_USERS;
subKey = keyPath + 11;
- }
- else if (strncmp(keyPath, "HKEY_LOCAL_MACHINE\\", 19) == 0) {
+ } else if (strncmp(keyPath, "HKEY_LOCAL_MACHINE\\", 19) == 0) {
hRootKey = HKEY_LOCAL_MACHINE;
subKey = keyPath + 19;
- }
- else if (strncmp(keyPath, "HKEY_CURRENT_USER\\", 18) == 0) {
+ } else if (strncmp(keyPath, "HKEY_CURRENT_USER\\", 18) == 0) {
hRootKey = HKEY_CURRENT_USER;
subKey = keyPath + 18;
}
else
- return(false);
+ return false;
+
const char *placeHolder = strstr(subKey, "$VERSION");
char bestName[256];
bestName[0] = '\0';
@@ -338,7 +343,7 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
RegCloseKey(hKey);
}
}
- return(returnValue);
+ return returnValue;
}
#else // _MSC_VER
// Read registry string.
@@ -351,12 +356,12 @@ static bool getSystemRegistryString(const char*, const char*, char*, size_t) {
static bool getVisualStudioDir(std::string &path) {
// First check the environment variables that vsvars32.bat sets.
const char* vcinstalldir = getenv("VCINSTALLDIR");
- if(vcinstalldir) {
+ if (vcinstalldir) {
char *p = const_cast<char *>(strstr(vcinstalldir, "\\VC"));
if (p)
*p = '\0';
path = vcinstalldir;
- return(true);
+ return true;
}
char vsIDEInstallDir[256];
@@ -374,56 +379,52 @@ static bool getVisualStudioDir(std::string &path) {
if (p)
*p = '\0';
path = vsIDEInstallDir;
- return(true);
+ return true;
}
- else if (hasVCExpressDir && vsExpressIDEInstallDir[0]) {
+
+ if (hasVCExpressDir && vsExpressIDEInstallDir[0]) {
char *p = (char*)strstr(vsExpressIDEInstallDir, "\\Common7\\IDE");
if (p)
*p = '\0';
path = vsExpressIDEInstallDir;
- return(true);
+ return true;
}
- else {
- // Try the environment.
- const char* vs100comntools = getenv("VS100COMNTOOLS");
- const char* vs90comntools = getenv("VS90COMNTOOLS");
- const char* vs80comntools = getenv("VS80COMNTOOLS");
- const char* vscomntools = NULL;
-
- // Try to find the version that we were compiled with
- if(false) {}
- #if (_MSC_VER >= 1600) // VC100
- else if(vs100comntools) {
- vscomntools = vs100comntools;
- }
- #elif (_MSC_VER == 1500) // VC80
- else if(vs90comntools) {
- vscomntools = vs90comntools;
- }
- #elif (_MSC_VER == 1400) // VC80
- else if(vs80comntools) {
- vscomntools = vs80comntools;
- }
- #endif
- // Otherwise find any version we can
- else if (vs100comntools)
- vscomntools = vs100comntools;
- else if (vs90comntools)
- vscomntools = vs90comntools;
- else if (vs80comntools)
- vscomntools = vs80comntools;
-
- if (vscomntools && *vscomntools) {
- char *p = const_cast<char *>(strstr(vscomntools, "\\Common7\\Tools"));
- if (p)
- *p = '\0';
- path = vscomntools;
- return(true);
- }
- else
- return(false);
+
+ // Try the environment.
+ const char *vs100comntools = getenv("VS100COMNTOOLS");
+ const char *vs90comntools = getenv("VS90COMNTOOLS");
+ const char *vs80comntools = getenv("VS80COMNTOOLS");
+ const char *vscomntools = NULL;
+
+ // Try to find the version that we were compiled with
+ if(false) {}
+ #if (_MSC_VER >= 1600) // VC100
+ else if(vs100comntools) {
+ vscomntools = vs100comntools;
}
- return(false);
+ #elif (_MSC_VER == 1500) // VC80
+ else if(vs90comntools) {
+ vscomntools = vs90comntools;
+ }
+ #elif (_MSC_VER == 1400) // VC80
+ else if(vs80comntools) {
+ vscomntools = vs80comntools;
+ }
+ #endif
+ // Otherwise find any version we can
+ else if (vs100comntools)
+ vscomntools = vs100comntools;
+ else if (vs90comntools)
+ vscomntools = vs90comntools;
+ else if (vs80comntools)
+ vscomntools = vs80comntools;
+
+ if (vscomntools && *vscomntools) {
+ const char *p = strstr(vscomntools, "\\Common7\\Tools");
+ path = p ? std::string(vscomntools, p) : vscomntools;
+ return true;
+ }
+ return false;
}
// Get Windows SDK installation directory.
@@ -432,7 +433,9 @@ static bool getWindowsSDKDir(std::string &path) {
// Try the Windows registry.
bool hasSDKDir = getSystemRegistryString(
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
- "InstallationFolder", windowsSDKInstallDir, sizeof(windowsSDKInstallDir) - 1);
+ "InstallationFolder",
+ windowsSDKInstallDir,
+ sizeof(windowsSDKInstallDir) - 1);
// If we have both vc80 and vc90, pick version we were compiled with.
if (hasSDKDir && windowsSDKInstallDir[0]) {
path = windowsSDKInstallDir;
@@ -548,17 +551,28 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
AddPath("/boot/develop/headers/posix", System, true, false, false);
AddPath("/boot/develop/headers", System, true, false, false);
break;
+ case llvm::Triple::RTEMS:
+ break;
case llvm::Triple::Cygwin:
AddPath("/usr/include/w32api", System, true, false, false);
break;
- case llvm::Triple::MinGW32:
- // FIXME: We should be aware of i686-w64-mingw32.
- if (triple.getArch() == llvm::Triple::x86_64)
- AddPath("c:/mingw/x86_64-w64-mingw32/include",
- System, true, false, false);
- AddPath("/mingw/include", System, true, false, false);
- AddPath("c:/mingw/include", System, true, false, false);
+ case llvm::Triple::MinGW32: {
+ // mingw-w64 crt include paths
+ llvm::sys::Path P(HSOpts.ResourceDir);
+ P.appendComponent("../../../i686-w64-mingw32/include"); // <sysroot>/i686-w64-mingw32/include
+ AddPath(P.str(), System, true, false, false);
+ P = llvm::sys::Path(HSOpts.ResourceDir);
+ P.appendComponent("../../../x86_64-w64-mingw32/include"); // <sysroot>/x86_64-w64-mingw32/include
+ AddPath(P.str(), System, true, false, false);
+ // mingw.org crt include paths
+ P = llvm::sys::Path(HSOpts.ResourceDir);
+ P.appendComponent("../../../include"); // <sysroot>/include
+ AddPath(P.str(), System, true, false, false);
+ AddPath("/mingw/include", System, true, false, false);
+ AddPath("c:/mingw/include", System, true, false, false);
+ }
break;
+
case llvm::Triple::Linux:
// Generic Debian multiarch support:
if (triple.getArch() == llvm::Triple::x86_64) {
@@ -576,11 +590,12 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
break;
}
- AddPath("/usr/include", System, false, false, false);
+ if ( os != llvm::Triple::RTEMS )
+ AddPath("/usr/include", System, false, false, false);
}
void InitHeaderSearch::
-AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
+AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOptions &HSOpts) {
llvm::Triple::OSType os = triple.getOS();
llvm::StringRef CxxIncludeRoot(CXX_INCLUDE_ROOT);
if (CxxIncludeRoot != "") {
@@ -640,20 +655,19 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "3.4.4");
break;
case llvm::Triple::MinGW32:
- // FIXME: We should be aware of i686-w64-mingw32.
- if (triple.getArch() == llvm::Triple::x86_64) {
- // mingw-w64-20110207
- AddMinGW64CXXPaths("c:/mingw/x86_64-w64-mingw32/include/c++/4.5.3");
- // mingw-w64-20101129
- AddMinGW64CXXPaths("c:/mingw/x86_64-w64-mingw32/include/c++/4.5.2");
- }
- // Try gcc 4.5.2 (MSYS)
- AddMinGWCPlusPlusIncludePaths("/mingw/lib/gcc", "mingw32", "4.5.2");
- // Try gcc 4.5.0
+ // mingw-w64 C++ include paths (i686-w64-mingw32 and x86_64-w64-mingw32)
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.0");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.1");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.2");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.3");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.0");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.1");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.2");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.0");
+ // mingw.org C++ include paths
+ AddMinGWCPlusPlusIncludePaths("/mingw/lib/gcc", "mingw32", "4.5.2"); //MSYS
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
- // Try gcc 4.4.0
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.4.0");
- // Try gcc 4.3.0
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0");
break;
case llvm::Triple::DragonFly:
@@ -733,6 +747,9 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
"x86_64-redhat-linux", "32", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.5.1",
"i686-redhat-linux", "", "", triple);
+ // RHEL5(gcc44)
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4",
+ "x86_64-redhat-linux6E", "32", "", triple);
// Fedora 13
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4",
"x86_64-redhat-linux", "32", "", triple);
@@ -814,6 +831,10 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
"x86_64-unknown-linux-gnu", "", "", triple);
// Arch Linux gcc 4.6
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.6.1",
+ "i686-pc-linux-gnu", "", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.6.1",
+ "x86_64-unknown-linux-gnu", "", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.6.0",
"i686-pc-linux-gnu", "", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.6.0",
@@ -918,8 +939,12 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
void InitHeaderSearch::AddDefaultSystemIncludePaths(const LangOptions &Lang,
const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts) {
- if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes)
- AddDefaultCPlusPlusIncludePaths(triple);
+ if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes) {
+ if (HSOpts.UseLibcxx)
+ AddPath("/usr/include/c++/v1", CXXSystem, true, false, false);
+ else
+ AddDefaultCPlusPlusIncludePaths(triple, HSOpts);
+ }
AddDefaultCIncludePaths(triple, HSOpts);
@@ -1017,23 +1042,24 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
std::vector<DirectoryLookup> SearchList;
SearchList.reserve(IncludePath.size());
- /* Quoted arguments go first. */
+ // Quoted arguments go first.
for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
it != ie; ++it) {
if (it->first == Quoted)
SearchList.push_back(it->second);
}
- /* Deduplicate and remember index */
+ // Deduplicate and remember index.
RemoveDuplicates(SearchList, 0, Verbose);
- unsigned quoted = SearchList.size();
+ unsigned NumQuoted = SearchList.size();
for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
it != ie; ++it) {
if (it->first == Angled)
SearchList.push_back(it->second);
}
- RemoveDuplicates(SearchList, quoted, Verbose);
- unsigned angled = SearchList.size();
+
+ RemoveDuplicates(SearchList, NumQuoted, Verbose);
+ unsigned NumAngled = SearchList.size();
for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
it != ie; ++it) {
@@ -1047,16 +1073,19 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
SearchList.push_back(it->second);
}
- RemoveDuplicates(SearchList, angled, Verbose);
+ // Remove duplicates across both the Angled and System directories. GCC does
+ // this and failing to remove duplicates across these two groups breaks
+ // #include_next.
+ RemoveDuplicates(SearchList, NumQuoted, Verbose);
bool DontSearchCurDir = false; // TODO: set to true if -I- is set?
- Headers.SetSearchPaths(SearchList, quoted, angled, DontSearchCurDir);
+ Headers.SetSearchPaths(SearchList, NumQuoted, NumAngled, DontSearchCurDir);
// If verbose, print the list of directories that will be searched.
if (Verbose) {
llvm::errs() << "#include \"...\" search starts here:\n";
for (unsigned i = 0, e = SearchList.size(); i != e; ++i) {
- if (i == quoted)
+ if (i == NumQuoted)
llvm::errs() << "#include <...> search starts here:\n";
const char *Name = SearchList[i].getName();
const char *Suffix;
@@ -1084,7 +1113,7 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
for (unsigned i = 0, e = HSOpts.UserEntries.size(); i != e; ++i) {
const HeaderSearchOptions::Entry &E = HSOpts.UserEntries[i];
Init.AddPath(E.Path, E.Group, false, E.IsUserSupplied, E.IsFramework,
- !E.IsSysRootRelative);
+ E.IgnoreSysRoot);
}
// Add entries from CPATH and friends.
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 147a8df03760..9428cd5de0f3 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -221,6 +221,125 @@ static void DefineExactWidthIntType(TargetInfo::IntType Ty,
ConstSuffix);
}
+/// \brief Add definitions required for a smooth interaction between
+/// Objective-C++ automatic reference counting and libc++.
+static void AddObjCXXARCLibcxxDefines(const LangOptions &LangOpts,
+ MacroBuilder &Builder) {
+ Builder.defineMacro("_LIBCPP_PREDEFINED_OBJC_ARC_ADDRESSOF");
+
+ std::string Result;
+ {
+ // Provide overloads of the function std::__1::addressof() that accept
+ // references to lifetime-qualified objects. libc++'s (more general)
+ // std::__1::addressof() template fails to instantiate with such types,
+ // because it attempts to convert the object to a char& before
+ // dereferencing.
+ llvm::raw_string_ostream Out(Result);
+
+ Out << "#pragma clang diagnostic push\n"
+ << "#pragma clang diagnostic ignored \"-Wc++0x-extensions\"\n"
+ << "namespace std { inline namespace __1 {\n"
+ << "\n";
+
+ Out << "template <class _Tp>\n"
+ << "inline __attribute__ ((__visibility__(\"hidden\"), "
+ << "__always_inline__))\n"
+ << "__attribute__((objc_ownership(strong))) _Tp*\n"
+ << "addressof(__attribute__((objc_ownership(strong))) _Tp& __x) {\n"
+ << " return &__x;\n"
+ << "}\n"
+ << "\n";
+
+ if (LangOpts.ObjCRuntimeHasWeak) {
+ Out << "template <class _Tp>\n"
+ << "inline __attribute__ ((__visibility__(\"hidden\"),"
+ << "__always_inline__))\n"
+ << "__attribute__((objc_ownership(weak))) _Tp*\n"
+ << "addressof(__attribute__((objc_ownership(weak))) _Tp& __x) {\n"
+ << " return &__x;\n"
+ << "};\n"
+ << "\n";
+ }
+
+ Out << "template <class _Tp>\n"
+ << "inline __attribute__ ((__visibility__(\"hidden\"),"
+ << "__always_inline__))\n"
+ << "__attribute__((objc_ownership(autoreleasing))) _Tp*\n"
+ << "addressof(__attribute__((objc_ownership(autoreleasing))) _Tp& __x) "
+ << "{\n"
+ << " return &__x;\n"
+ << "}\n"
+ << "\n";
+
+ Out << "template <class _Tp>\n"
+ << "inline __attribute__ ((__visibility__(\"hidden\"), "
+ << "__always_inline__))\n"
+ << "__unsafe_unretained _Tp* addressof(__unsafe_unretained _Tp& __x)"
+ << " {\n"
+ << " return &__x;\n"
+ << "}\n";
+
+ Out << "\n"
+ << "} }\n"
+ << "#pragma clang diagnostic pop\n"
+ << "\n";
+ }
+ Builder.append(Result);
+}
+
+/// \brief Add definitions required for a smooth interaction between
+/// Objective-C++ automated reference counting and libstdc++ (4.2).
+static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
+ MacroBuilder &Builder) {
+ Builder.defineMacro("_GLIBCXX_PREDEFINED_OBJC_ARC_IS_SCALAR");
+
+ std::string Result;
+ {
+ // Provide specializations for the __is_scalar type trait so that
+ // lifetime-qualified objects are not considered "scalar" types, which
+ // libstdc++ uses as an indicator of the presence of trivial copy, assign,
+ // default-construct, and destruct semantics (none of which hold for
+ // lifetime-qualified objects in ARC).
+ llvm::raw_string_ostream Out(Result);
+
+ Out << "namespace std {\n"
+ << "\n"
+ << "struct __true_type;\n"
+ << "struct __false_type;\n"
+ << "\n";
+
+ Out << "template<typename _Tp> struct __is_scalar;\n"
+ << "\n";
+
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(strong))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+
+ if (LangOpts.ObjCRuntimeHasWeak) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(weak))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
+
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(autoreleasing)))"
+ << " _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+
+ Out << "}\n";
+ }
+ Builder.append(Result);
+}
+
static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
@@ -240,11 +359,18 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
} else {
if (LangOpts.GNUMode)
Builder.defineMacro("__cplusplus");
- else
- // C++ [cpp.predefined]p1:
+ else {
+ // C++0x [cpp.predefined]p1:
+ // The name_ _cplusplus is defined to the value 201103L when compiling a
+ // C++ translation unit.
+ if (LangOpts.CPlusPlus0x)
+ Builder.defineMacro("__cplusplus", "201103L");
+ // C++03 [cpp.predefined]p1:
// The name_ _cplusplus is defined to the value 199711L when compiling a
// C++ translation unit.
- Builder.defineMacro("__cplusplus", "199711L");
+ else
+ Builder.defineMacro("__cplusplus", "199711L");
+ }
}
if (LangOpts.ObjC1)
@@ -312,7 +438,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// darwin_constant_cfstrings controls this. This is also dependent
// on other things like the runtime I believe. This is set even for C code.
- Builder.defineMacro("__CONSTANT_CFSTRINGS__");
+ if (!LangOpts.NoConstantCFStrings)
+ Builder.defineMacro("__CONSTANT_CFSTRINGS__");
if (LangOpts.ObjC2)
Builder.defineMacro("OBJC_NEW_PROPERTIES");
@@ -487,6 +614,15 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.FastRelaxedMath)
Builder.defineMacro("__FAST_RELAXED_MATH__");
+ if (LangOpts.ObjCAutoRefCount) {
+ Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
+ Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
+ Builder.defineMacro("__autoreleasing",
+ "__attribute__((objc_ownership(autoreleasing)))");
+ Builder.defineMacro("__unsafe_unretained",
+ "__attribute__((objc_ownership(none)))");
+ }
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
@@ -560,6 +696,7 @@ void clang::InitializePreprocessor(Preprocessor &PP,
const PreprocessorOptions &InitOpts,
const HeaderSearchOptions &HSOpts,
const FrontendOptions &FEOpts) {
+ const LangOptions &LangOpts = PP.getLangOptions();
std::string PredefineBuffer;
PredefineBuffer.reserve(4080);
llvm::raw_string_ostream Predefines(PredefineBuffer);
@@ -575,10 +712,27 @@ void clang::InitializePreprocessor(Preprocessor &PP,
Builder.append("# 1 \"<built-in>\" 3");
// Install things like __POWERPC__, __GNUC__, etc into the macro table.
- if (InitOpts.UsePredefines)
- InitializePredefinedMacros(PP.getTargetInfo(), PP.getLangOptions(),
- FEOpts, Builder);
-
+ if (InitOpts.UsePredefines) {
+ InitializePredefinedMacros(PP.getTargetInfo(), LangOpts, FEOpts, Builder);
+
+ // Install definitions to make Objective-C++ ARC work well with various
+ // C++ Standard Library implementations.
+ if (LangOpts.ObjC1 && LangOpts.CPlusPlus && LangOpts.ObjCAutoRefCount) {
+ switch (InitOpts.ObjCXXARCStandardLibrary) {
+ case ARCXX_nolib:
+ break;
+
+ case ARCXX_libcxx:
+ AddObjCXXARCLibcxxDefines(LangOpts, Builder);
+ break;
+
+ case ARCXX_libstdcxx:
+ AddObjCXXARCLibstdcxxDefines(LangOpts, Builder);
+ break;
+ }
+ }
+ }
+
// Even with predefines off, some macros are still predefined.
// These should all be defined in the preprocessor according to the
// current language configuration.
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index b46e04749ba3..c892960a18b9 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/config.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
#include <cstdio>
using namespace clang;
@@ -122,6 +123,12 @@ public:
virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
const std::string &Str);
virtual void PragmaMessage(SourceLocation Loc, llvm::StringRef Str);
+ virtual void PragmaDiagnosticPush(SourceLocation Loc,
+ llvm::StringRef Namespace);
+ virtual void PragmaDiagnosticPop(SourceLocation Loc,
+ llvm::StringRef Namespace);
+ virtual void PragmaDiagnostic(SourceLocation Loc, llvm::StringRef Namespace,
+ diag::Mapping Map, llvm::StringRef Str);
bool HandleFirstTokOnLine(Token &Tok);
bool MoveToLine(SourceLocation Loc) {
@@ -189,7 +196,7 @@ bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
if (LineNo-CurLine == 1)
OS << '\n';
else if (LineNo == CurLine)
- return false; // Spelling line moved, but instantiation line didn't.
+ return false; // Spelling line moved, but expansion line didn't.
else {
const char *NewLines = "\n\n\n\n\n\n\n\n";
OS.write(NewLines, LineNo-CurLine);
@@ -361,12 +368,49 @@ void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
EmittedTokensOnThisLine = true;
}
+void PrintPPOutputPPCallbacks::
+PragmaDiagnosticPush(SourceLocation Loc, llvm::StringRef Namespace) {
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic push";
+ EmittedTokensOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnosticPop(SourceLocation Loc, llvm::StringRef Namespace) {
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic pop";
+ EmittedTokensOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnostic(SourceLocation Loc, llvm::StringRef Namespace,
+ diag::Mapping Map, llvm::StringRef Str) {
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic ";
+ switch (Map) {
+ default: llvm_unreachable("unexpected diagnostic kind");
+ case diag::MAP_WARNING:
+ OS << "warning";
+ break;
+ case diag::MAP_ERROR:
+ OS << "error";
+ break;
+ case diag::MAP_IGNORE:
+ OS << "ignored";
+ break;
+ case diag::MAP_FATAL:
+ OS << "fatal";
+ break;
+ }
+ OS << " \"" << Str << '"';
+ EmittedTokensOnThisLine = true;
+}
/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
/// is called for the first token on each new line. If this really is the start
/// of a new logical line, handle it and return true, otherwise return false.
/// This may not be the start of a logical line because the "start of line"
-/// marker is set for spelling lines, not instantiation ones.
+/// marker is set for spelling lines, not expansion ones.
bool PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index 1c47bf7bee93..e49e19a17c8d 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -292,8 +292,57 @@ static void SelectInterestingSourceRegion(std::string &SourceLine,
}
}
-void TextDiagnosticPrinter::EmitCaretDiagnostic(Diagnostic::Level Level,
- SourceLocation Loc,
+/// Look through spelling locations for a macro argument expansion, and
+/// if found skip to it so that we can trace the argument rather than the macros
+/// in which that argument is used. If no macro argument expansion is found,
+/// don't skip anything and return the starting location.
+static SourceLocation skipToMacroArgExpansion(const SourceManager &SM,
+ SourceLocation StartLoc) {
+ for (SourceLocation L = StartLoc; L.isMacroID();
+ L = SM.getImmediateSpellingLoc(L)) {
+ if (SM.isMacroArgInstantiation(L))
+ return L;
+ }
+
+ // Otherwise just return initial location, there's nothing to skip.
+ return StartLoc;
+}
+
+/// Gets the location of the immediate macro caller, one level up the stack
+/// toward the initial macro typed into the source.
+static SourceLocation getImmediateMacroCallerLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its spelling
+ // location points to the argument as typed into the macro call, and
+ // therefore is used to locate the macro caller.
+ if (SM.isMacroArgInstantiation(Loc))
+ return SM.getImmediateSpellingLoc(Loc);
+
+ // Otherwise, the caller of the macro is located where this macro is
+ // expanded (while the spelling is part of the macro definition).
+ return SM.getImmediateInstantiationRange(Loc).first;
+}
+
+/// Gets the location of the immediate macro callee, one level down the stack
+/// toward the leaf macro.
+static SourceLocation getImmediateMacroCalleeLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its
+ // expansion location points to the unexpanded paramater reference within
+ // the macro definition (or callee).
+ if (SM.isMacroArgInstantiation(Loc))
+ return SM.getImmediateInstantiationRange(Loc).first;
+
+ // Otherwise, the callee of the macro is located where this location was
+ // spelled inside the macro definition.
+ return SM.getImmediateSpellingLoc(Loc);
+}
+
+void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
CharSourceRange *Ranges,
unsigned NumRanges,
const SourceManager &SM,
@@ -307,38 +356,46 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(Diagnostic::Level Level,
assert(!Loc.isInvalid() && "must have a valid source location here");
// If this is a macro ID, first emit information about where this was
- // instantiated (recursively) then emit information about where the token was
+ // expanded (recursively) then emit information about where the token was
// spelled from.
if (!Loc.isFileID()) {
- // Whether to suppress printing this macro instantiation.
+ // Whether to suppress printing this macro expansion.
bool Suppressed
= OnMacroInst >= MacroSkipStart && OnMacroInst < MacroSkipEnd;
-
- SourceLocation OneLevelUp = SM.getImmediateInstantiationRange(Loc).first;
+ // When processing macros, skip over the expansions leading up to
+ // a macro argument, and trace the argument's expansion stack instead.
+ Loc = skipToMacroArgExpansion(SM, Loc);
+
+ SourceLocation OneLevelUp = getImmediateMacroCallerLoc(SM, Loc);
+
// FIXME: Map ranges?
- EmitCaretDiagnostic(Level, OneLevelUp, Ranges, NumRanges, SM, 0, 0, Columns,
+ EmitCaretDiagnostic(OneLevelUp, Ranges, NumRanges, SM,
+ Hints, NumHints, Columns,
OnMacroInst + 1, MacroSkipStart, MacroSkipEnd);
-
+
// Map the location.
- Loc = SM.getImmediateSpellingLoc(Loc);
+ Loc = getImmediateMacroCalleeLoc(SM, Loc);
// Map the ranges.
for (unsigned i = 0; i != NumRanges; ++i) {
CharSourceRange &R = Ranges[i];
SourceLocation S = R.getBegin(), E = R.getEnd();
if (S.isMacroID())
- R.setBegin(SM.getImmediateSpellingLoc(S));
+ R.setBegin(getImmediateMacroCalleeLoc(SM, S));
if (E.isMacroID())
- R.setEnd(SM.getImmediateSpellingLoc(E));
+ R.setEnd(getImmediateMacroCalleeLoc(SM, E));
}
if (!Suppressed) {
+ // Don't print recursive expansion notes from an expansion note.
+ Loc = SM.getSpellingLoc(Loc);
+
// Get the pretty name, according to #line directives etc.
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
if (PLoc.isInvalid())
return;
-
+
// If this diagnostic is not in the main file, print out the
// "included from" lines.
if (LastWarningLoc != PLoc.getIncludeLoc()) {
@@ -353,9 +410,9 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(Diagnostic::Level Level,
OS << PLoc.getColumn() << ':';
OS << ' ';
}
- OS << "note: instantiated from:\n";
-
- EmitCaretDiagnostic(Level, Loc, Ranges, NumRanges, SM, Hints, NumHints,
+ OS << "note: expanded from:\n";
+
+ EmitCaretDiagnostic(Loc, Ranges, NumRanges, SM, 0, 0,
Columns, OnMacroInst + 1, MacroSkipStart,
MacroSkipEnd);
return;
@@ -364,13 +421,13 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(Diagnostic::Level Level,
if (OnMacroInst == MacroSkipStart) {
// Tell the user that we've skipped contexts.
OS << "note: (skipping " << (MacroSkipEnd - MacroSkipStart)
- << " contexts in backtrace; use -fmacro-backtrace-limit=0 to see "
+ << " expansions in backtrace; use -fmacro-backtrace-limit=0 to see "
"all)\n";
}
return;
}
-
+
// Decompose the location into a FID/Offset pair.
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
FileID FID = LocInfo.first;
@@ -769,6 +826,20 @@ static bool PrintWordWrapped(llvm::raw_ostream &OS,
return true;
}
+/// Get the presumed location of a diagnostic message. This computes the
+/// presumed location for the top of any macro backtrace when present.
+static PresumedLoc getDiagnosticPresumedLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ // This is a condensed form of the algorithm used by EmitCaretDiagnostic to
+ // walk to the top of the macro call stack.
+ while (Loc.isMacroID()) {
+ Loc = skipToMacroArgExpansion(SM, Loc);
+ Loc = getImmediateMacroCallerLoc(SM, Loc);
+ }
+
+ return SM.getPresumedLoc(Loc);
+}
+
void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info) {
// Default implementation (Warnings/errors count).
@@ -787,7 +858,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
// if enabled.
if (Info.getLocation().isValid()) {
const SourceManager &SM = Info.getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(Info.getLocation());
+ PresumedLoc PLoc = getDiagnosticPresumedLoc(SM, Info.getLocation());
if (PLoc.isInvalid()) {
// At least print the file name if available:
FileID FID = SM.getFileID(Info.getLocation());
@@ -1040,15 +1111,17 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
}
}
+ const SourceManager &SM = LastLoc.getManager();
unsigned MacroInstSkipStart = 0, MacroInstSkipEnd = 0;
if (DiagOpts && DiagOpts->MacroBacktraceLimit && !LastLoc.isFileID()) {
- // Compute the length of the macro-instantiation backtrace, so that we
+ // Compute the length of the macro-expansion backtrace, so that we
// can establish which steps in the macro backtrace we'll skip.
SourceLocation Loc = LastLoc;
unsigned Depth = 0;
do {
++Depth;
- Loc = LastLoc.getManager().getImmediateInstantiationRange(Loc).first;
+ Loc = skipToMacroArgExpansion(SM, Loc);
+ Loc = getImmediateMacroCallerLoc(SM, Loc);
} while (!Loc.isFileID());
if (Depth > DiagOpts->MacroBacktraceLimit) {
@@ -1058,7 +1131,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
}
}
- EmitCaretDiagnostic(Level, LastLoc, Ranges, NumRanges, LastLoc.getManager(),
+ EmitCaretDiagnostic(LastLoc, Ranges, NumRanges, LastLoc.getManager(),
Info.getFixItHints(),
Info.getNumFixItHints(),
DiagOpts->MessageLength,
diff --git a/lib/FrontendTool/CMakeLists.txt b/lib/FrontendTool/CMakeLists.txt
index 720ce2adf123..b8e4329c07f9 100644
--- a/lib/FrontendTool/CMakeLists.txt
+++ b/lib/FrontendTool/CMakeLists.txt
@@ -1,5 +1,6 @@
set(LLVM_USED_LIBS clangDriver clangFrontend clangRewrite clangCodeGen
- clangStaticAnalyzerFrontend clangStaticAnalyzerCheckers clangStaticAnalyzerCore)
+ clangStaticAnalyzerFrontend clangStaticAnalyzerCheckers clangStaticAnalyzerCore
+ clangARCMigrate)
add_clang_library(clangFrontendTool
ExecuteCompilerInvocation.cpp
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 664b53351dc1..f2db3ae74191 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -14,6 +14,7 @@
#include "clang/FrontendTool/Utils.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "clang/ARCMigrate/ARCMTActions.h"
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Driver/CC1Options.h"
#include "clang/Driver/OptTable.h"
@@ -38,7 +39,6 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case ASTDumpXML: return new ASTDumpXMLAction();
case ASTPrint: return new ASTPrintAction();
case ASTView: return new ASTViewAction();
- case BoostCon: return new BoostConAction();
case CreateModule: return 0;
case DumpRawTokens: return new DumpRawTokensAction();
case DumpTokens: return new DumpTokensAction();
@@ -89,6 +89,21 @@ static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
if (!Act)
return 0;
+ // Potentially wrap the base FE action in an ARC Migrate Tool action.
+ switch (CI.getFrontendOpts().ARCMTAction) {
+ case FrontendOptions::ARCMT_None:
+ break;
+ case FrontendOptions::ARCMT_Check:
+ Act = new arcmt::CheckAction(Act);
+ break;
+ case FrontendOptions::ARCMT_Modify:
+ Act = new arcmt::ModifyAction(Act);
+ break;
+ case FrontendOptions::ARCMT_Migrate:
+ Act = new arcmt::MigrateAction(Act, CI.getFrontendOpts().ARCMTMigrateDir);
+ break;
+ }
+
// If there are any AST files to merge, create a frontend action
// adaptor to perform the merge.
if (!CI.getFrontendOpts().ASTMergeFiles.empty())
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
index 28fb882bd24a..6eede0b6ba47 100644
--- a/lib/Headers/float.h
+++ b/lib/Headers/float.h
@@ -24,6 +24,48 @@
#ifndef __FLOAT_H
#define __FLOAT_H
+/* If we're on MinGW, fall baack to the system's float.h, which might have
+ * additional definitions provided for Windows.
+ * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
+ */
+#if defined(__MINGW32__) && \
+ defined(__has_include_next) && __has_include_next(<float.h>)
+# include_next <float.h>
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+# undef DECIMAL_DIG
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+#endif
+
/* Characteristics of floating point types, C99 5.2.4.2.2 */
#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
diff --git a/lib/Headers/stdarg.h b/lib/Headers/stdarg.h
index c36ab12019fe..2957bf058ea8 100644
--- a/lib/Headers/stdarg.h
+++ b/lib/Headers/stdarg.h
@@ -39,7 +39,7 @@ typedef __builtin_va_list va_list;
*/
#define __va_copy(d,s) __builtin_va_copy(d,s)
-#if __STDC_VERSION__ >= 199900L || !defined(__STRICT_ANSI__)
+#if __STDC_VERSION__ >= 199900L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
#define va_copy(dest, src) __builtin_va_copy(dest, src)
#endif
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index bb4388195a32..86ab9564a235 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -123,10 +123,8 @@ const FileEntry *DirectoryLookup::LookupFile(
llvm::SmallString<1024> TmpDir;
if (isNormalDir()) {
// Concatenate the requested file onto the directory.
- // FIXME: Portability. Filename concatenation should be in sys::Path.
- TmpDir += getDir()->getName();
- TmpDir.push_back('/');
- TmpDir.append(Filename.begin(), Filename.end());
+ TmpDir = getDir()->getName();
+ llvm::sys::path::append(TmpDir, Filename);
if (SearchPath != NULL) {
llvm::StringRef SearchPathRef(getDir()->getName());
SearchPath->clear();
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 3b1149c08af3..a28b8f6e7b9f 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -156,7 +156,7 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
///
/// On entrance to this routine, TokStartLoc is a macro location which has a
/// spelling loc that indicates the bytes to be lexed for the token and an
-/// instantiation location that indicates where all lexed tokens should be
+/// expansion location that indicates where all lexed tokens should be
/// "expanded from".
///
/// FIXME: It would really be nice to make _Pragma just be a wrapper around a
@@ -166,8 +166,8 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
/// out of the critical path of the lexer!
///
Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
- SourceLocation InstantiationLocStart,
- SourceLocation InstantiationLocEnd,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
unsigned TokLen, Preprocessor &PP) {
SourceManager &SM = PP.getSourceManager();
@@ -188,8 +188,8 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
// Set the SourceLocation with the remapping information. This ensures that
// GetMappedTokenLoc will remap the tokens as they are lexed.
L->FileLoc = SM.createInstantiationLoc(SM.getLocForStartOfFile(SpellingFID),
- InstantiationLocStart,
- InstantiationLocEnd, TokLen);
+ ExpansionLocStart,
+ ExpansionLocEnd, TokLen);
// Ensure that the lexer thinks it is inside a directive, so that end \n will
// return an EOD token.
@@ -621,7 +621,7 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
unsigned CharNo,
const SourceManager &SM,
const LangOptions &Features) {
- // Figure out how many physical characters away the specified instantiation
+ // Figure out how many physical characters away the specified expansion
// character is. This needs to take into consideration newlines and
// trigraphs.
bool Invalid = false;
@@ -679,9 +679,17 @@ SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
const SourceManager &SM,
const LangOptions &Features) {
- if (Loc.isInvalid() || !Loc.isFileID())
+ if (Loc.isInvalid())
return SourceLocation();
-
+
+ if (Loc.isMacroID()) {
+ if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, Features))
+ return SourceLocation(); // Points inside the macro expansion.
+
+ // Continue and find the location just after the macro expansion.
+ Loc = SM.getInstantiationRange(Loc).second;
+ }
+
unsigned Len = Lexer::MeasureTokenLength(Loc, SM, Features);
if (Len > Offset)
Len = Len - Offset;
@@ -691,6 +699,58 @@ SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
return Loc.getFileLocWithOffset(Len);
}
+/// \brief Returns true if the given MacroID location points at the first
+/// token of the macro expansion.
+bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
+
+ std::pair<FileID, unsigned> infoLoc = SM.getDecomposedLoc(loc);
+ // FIXME: If the token comes from the macro token paste operator ('##')
+ // this function will always return false;
+ if (infoLoc.second > 0)
+ return false; // Does not point at the start of token.
+
+ SourceLocation expansionLoc =
+ SM.getSLocEntry(infoLoc.first)
+ .getInstantiation().getInstantiationLocStart();
+ if (expansionLoc.isFileID())
+ return true; // No other macro expansions, this is the first.
+
+ return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts);
+}
+
+/// \brief Returns true if the given MacroID location points at the last
+/// token of the macro expansion.
+bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
+
+ SourceLocation spellLoc = SM.getSpellingLoc(loc);
+ unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
+ if (tokLen == 0)
+ return false;
+
+ FileID FID = SM.getFileID(loc);
+ SourceLocation afterLoc = loc.getFileLocWithOffset(tokLen+1);
+ if (!SM.isBeforeInSourceLocationOffset(afterLoc, SM.getNextOffset()))
+ return true; // We got past the last FileID, this points to the last token.
+
+ // FIXME: If the token comes from the macro token paste operator ('##')
+ // or the stringify operator ('#') this function will always return false;
+ if (FID == SM.getFileID(afterLoc))
+ return false; // Still in the same FileID, does not point to the last token.
+
+ SourceLocation expansionLoc =
+ SM.getSLocEntry(FID).getInstantiation().getInstantiationLocEnd();
+ if (expansionLoc.isFileID())
+ return true; // No other macro expansions.
+
+ return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts);
+}
+
//===----------------------------------------------------------------------===//
// Character information.
//===----------------------------------------------------------------------===//
@@ -829,7 +889,7 @@ static inline bool isNumberBody(unsigned char c) {
//===----------------------------------------------------------------------===//
/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
-/// lexer buffer was all instantiated at a single point, perform the mapping.
+/// lexer buffer was all expanded at a single point, perform the mapping.
/// This is currently only used for _Pragma implementation, so it is the slow
/// path of the hot getSourceLocation method. Do not allow it to be inlined.
static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
@@ -837,14 +897,14 @@ static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
SourceLocation FileLoc,
unsigned CharNo, unsigned TokLen) {
- assert(FileLoc.isMacroID() && "Must be an instantiation");
+ assert(FileLoc.isMacroID() && "Must be a macro expansion");
// Otherwise, we're lexing "mapped tokens". This is used for things like
- // _Pragma handling. Combine the instantiation location of FileLoc with the
+ // _Pragma handling. Combine the expansion location of FileLoc with the
// spelling location.
SourceManager &SM = PP.getSourceManager();
- // Create a new SLoc which is expanded from Instantiation(FileLoc) but whose
+ // Create a new SLoc which is expanded from Expansion(FileLoc) but whose
// characters come from spelling(FileLoc)+Offset.
SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
SpellingLoc = SpellingLoc.getFileLocWithOffset(CharNo);
diff --git a/lib/Lex/MacroArgs.cpp b/lib/Lex/MacroArgs.cpp
index dee7da38aaa0..968c15e3c27b 100644
--- a/lib/Lex/MacroArgs.cpp
+++ b/lib/Lex/MacroArgs.cpp
@@ -185,7 +185,8 @@ MacroArgs::getPreExpArgument(unsigned Arg, const MacroInfo *MI,
/// a character literal for the Microsoft charize (#@) extension.
///
Token MacroArgs::StringifyArgument(const Token *ArgToks,
- Preprocessor &PP, bool Charify) {
+ Preprocessor &PP, bool Charify,
+ SourceLocation hashInstLoc) {
Token Tok;
Tok.startToken();
Tok.setKind(Charify ? tok::char_constant : tok::string_literal);
@@ -273,14 +274,15 @@ Token MacroArgs::StringifyArgument(const Token *ArgToks,
}
}
- PP.CreateString(&Result[0], Result.size(), Tok);
+ PP.CreateString(&Result[0], Result.size(), Tok, hashInstLoc);
return Tok;
}
/// getStringifiedArgument - Compute, cache, and return the specified argument
/// that has been 'stringified' as required by the # operator.
const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
- Preprocessor &PP) {
+ Preprocessor &PP,
+ SourceLocation hashInstLoc) {
assert(ArgNo < NumUnexpArgTokens && "Invalid argument number!");
if (StringifiedArgs.empty()) {
StringifiedArgs.resize(getNumArguments());
@@ -288,6 +290,7 @@ const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
sizeof(StringifiedArgs[0])*getNumArguments());
}
if (StringifiedArgs[ArgNo].isNot(tok::string_literal))
- StringifiedArgs[ArgNo] = StringifyArgument(getUnexpArgument(ArgNo), PP);
+ StringifiedArgs[ArgNo] = StringifyArgument(getUnexpArgument(ArgNo), PP,
+ /*Charify=*/false, hashInstLoc);
return StringifiedArgs[ArgNo];
}
diff --git a/lib/Lex/MacroArgs.h b/lib/Lex/MacroArgs.h
index 6ff4856b4e1c..a962dacf7c93 100644
--- a/lib/Lex/MacroArgs.h
+++ b/lib/Lex/MacroArgs.h
@@ -20,6 +20,7 @@ namespace clang {
class MacroInfo;
class Preprocessor;
class Token;
+ class SourceLocation;
/// MacroArgs - An instance of this class captures information about
/// the formal arguments specified to a function-like macro invocation.
@@ -86,7 +87,8 @@ public:
/// getStringifiedArgument - Compute, cache, and return the specified argument
/// that has been 'stringified' as required by the # operator.
- const Token &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP);
+ const Token &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP,
+ SourceLocation hashInstLoc);
/// getNumArguments - Return the number of arguments passed into this macro
/// invocation.
@@ -106,7 +108,8 @@ public:
/// a character literal for the Microsoft charize (#@) extension.
///
static Token StringifyArgument(const Token *ArgToks,
- Preprocessor &PP, bool Charify = false);
+ Preprocessor &PP, bool Charify,
+ SourceLocation hashInstLoc);
/// deallocate - This should only be called by the Preprocessor when managing
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index 66d87a19386a..0a16a2567219 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -25,6 +25,7 @@ MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
IsUsed = false;
IsAllowRedefinitionsWithoutWarning = false;
IsWarnIfUnused = false;
+ IsDefinitionLengthCached = false;
ArgumentList = 0;
NumArguments = 0;
@@ -43,11 +44,42 @@ MacroInfo::MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator) {
IsUsed = MI.IsUsed;
IsAllowRedefinitionsWithoutWarning = MI.IsAllowRedefinitionsWithoutWarning;
IsWarnIfUnused = MI.IsWarnIfUnused;
+ IsDefinitionLengthCached = MI.IsDefinitionLengthCached;
+ DefinitionLength = MI.DefinitionLength;
ArgumentList = 0;
NumArguments = 0;
setArgumentList(MI.ArgumentList, MI.NumArguments, PPAllocator);
}
+unsigned MacroInfo::getDefinitionLengthSlow(SourceManager &SM) const {
+ assert(!IsDefinitionLengthCached);
+ IsDefinitionLengthCached = true;
+
+ if (ReplacementTokens.empty())
+ return (DefinitionLength = 0);
+
+ const Token &firstToken = ReplacementTokens.front();
+ const Token &lastToken = ReplacementTokens.back();
+ SourceLocation macroStart = firstToken.getLocation();
+ SourceLocation macroEnd = lastToken.getLocation();
+ assert(macroStart.isValid() && macroEnd.isValid());
+ assert((macroStart.isFileID() || firstToken.is(tok::comment)) &&
+ "Macro defined in macro?");
+ assert((macroEnd.isFileID() || lastToken.is(tok::comment)) &&
+ "Macro defined in macro?");
+ std::pair<FileID, unsigned>
+ startInfo = SM.getDecomposedInstantiationLoc(macroStart);
+ std::pair<FileID, unsigned>
+ endInfo = SM.getDecomposedInstantiationLoc(macroEnd);
+ assert(startInfo.first == endInfo.first &&
+ "Macro definition spanning multiple FileIDs ?");
+ assert(startInfo.second <= endInfo.second);
+ DefinitionLength = endInfo.second - startInfo.second;
+ DefinitionLength += lastToken.getLength();
+
+ return DefinitionLength;
+}
+
/// isIdenticalTo - Return true if the specified macro definition is equal to
/// this macro in spelling, arguments, and whitespace. This is used to emit
/// duplicate definition warnings. This implements the rules in C99 6.10.3.
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 66e44bbc6dd0..4af5fabe5c80 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -784,8 +784,7 @@ void Preprocessor::HandleLineDirective(Token &Tok) {
Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
return DiscardUntilEndOfDirective();
}
- FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString(),
- Literal.GetStringLength());
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
// Verify that there is nothing after the string, other than EOD. Because
// of C99 6.10.4p5, macros that expand to empty tokens are ok.
@@ -918,8 +917,7 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
return DiscardUntilEndOfDirective();
}
- FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString(),
- Literal.GetStringLength());
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
// If a filename was present, read any flags that are present.
if (ReadLineMarkerFlags(IsFileEntry, IsFileExit,
@@ -1182,16 +1180,17 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
const FileEntry *File = LookupFile(
Filename, isAngled, LookupFrom, CurDir,
Callbacks ? &SearchPath : NULL, Callbacks ? &RelativePath : NULL);
- if (File == 0) {
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
- return;
- }
// Notify the callback object that we've seen an inclusion directive.
if (Callbacks)
Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, File,
End, SearchPath, RelativePath);
+ if (File == 0) {
+ Diag(FilenameTok, diag::warn_pp_file_not_found) << Filename;
+ return;
+ }
+
// The #included file will be considered to be a system header if either it is
// in a system include directory, or if the #includer is a system include
// header.
@@ -1210,10 +1209,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// Look up the file, create a File ID for it.
FileID FID = SourceMgr.createFileID(File, FilenameTok.getLocation(),
FileCharacter);
- if (FID.isInvalid()) {
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
- return;
- }
+ assert(!FID.isInvalid() && "Expected valid file ID");
// Finally, if all is good, enter the new file!
EnterSourceFile(FID, CurDir, FilenameTok.getLocation());
@@ -1599,7 +1595,7 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) {
// If the macro is not defined, this is a noop undef, just return.
if (MI == 0) return;
- if (!MI->isUsed())
+ if (!MI->isUsed() && MI->isWarnIfUnused())
Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
// If the callbacks want to know, tell them about the macro #undef.
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index bf0a7fbfef18..bf28199b888a 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -265,6 +265,10 @@ bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
assert(CurTokenLexer && !CurPPLexer &&
"Ending a macro when currently in a #include file!");
+ if (!MacroExpandingLexersStack.empty() &&
+ MacroExpandingLexersStack.back().first == CurTokenLexer.get())
+ removeCachedMacroExpandedTokensOfLastLexer();
+
// Delete or cache the now-dead macro expander.
if (NumCachedTokenLexers == TokenLexerCacheSize)
CurTokenLexer.reset();
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 01cd75fa8485..ecd4d4cfc68b 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -22,6 +22,7 @@
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Config/config.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
@@ -194,9 +195,9 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
/// invocation.
MacroArgs *Args = 0;
- // Remember where the end of the instantiation occurred. For an object-like
+ // Remember where the end of the expansion occurred. For an object-like
// macro, this is the identifier. For a function-like macro, this is the ')'.
- SourceLocation InstantiationEnd = Identifier.getLocation();
+ SourceLocation ExpansionEnd = Identifier.getLocation();
// If this is a function-like macro, read the arguments.
if (MI->isFunctionLike()) {
@@ -209,7 +210,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
// Preprocessor directives used inside macro arguments are not portable, and
// this enables the warning.
InMacroArgs = true;
- Args = ReadFunctionLikeMacroArgs(Identifier, MI, InstantiationEnd);
+ Args = ReadFunctionLikeMacroArgs(Identifier, MI, ExpansionEnd);
// Finished parsing args.
InMacroArgs = false;
@@ -229,8 +230,8 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
// If we started lexing a macro, enter the macro expansion body.
- // Remember where the token is instantiated.
- SourceLocation InstantiateLoc = Identifier.getLocation();
+ // Remember where the token is expanded.
+ SourceLocation ExpandLoc = Identifier.getLocation();
// If this macro expands to no tokens, don't bother to push it onto the
// expansion stack, only to take it right back off.
@@ -254,7 +255,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
}
Identifier.setFlag(Token::LeadingEmptyMacro);
- LastEmptyMacroInstantiationLoc = InstantiateLoc;
+ LastEmptyMacroExpansionLoc = ExpandLoc;
++NumFastMacroExpanded;
return false;
@@ -280,11 +281,11 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
Identifier.setFlagValue(Token::StartOfLine , isAtStartOfLine);
Identifier.setFlagValue(Token::LeadingSpace, hasLeadingSpace);
- // Update the tokens location to include both its instantiation and physical
+ // Update the tokens location to include both its expansion and physical
// locations.
SourceLocation Loc =
- SourceMgr.createInstantiationLoc(Identifier.getLocation(), InstantiateLoc,
- InstantiationEnd,Identifier.getLength());
+ SourceMgr.createInstantiationLoc(Identifier.getLocation(), ExpandLoc,
+ ExpansionEnd,Identifier.getLength());
Identifier.setLocation(Loc);
// If this is a disabled macro or #define X X, we must mark the result as
@@ -302,7 +303,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
}
// Start expanding the macro.
- EnterMacro(Identifier, InstantiationEnd, Args);
+ EnterMacro(Identifier, ExpansionEnd, Args);
// Now that the macro is at the top of the include stack, ask the
// preprocessor to read the next token from it.
@@ -490,6 +491,46 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
isVarargsElided, *this);
}
+/// \brief Keeps macro expanded tokens for TokenLexers.
+//
+/// Works like a stack; a TokenLexer adds the macro expanded tokens that is
+/// going to lex in the cache and when it finishes the tokens are removed
+/// from the end of the cache.
+Token *Preprocessor::cacheMacroExpandedTokens(TokenLexer *tokLexer,
+ llvm::ArrayRef<Token> tokens) {
+ assert(tokLexer);
+ if (tokens.empty())
+ return 0;
+
+ size_t newIndex = MacroExpandedTokens.size();
+ bool cacheNeedsToGrow = tokens.size() >
+ MacroExpandedTokens.capacity()-MacroExpandedTokens.size();
+ MacroExpandedTokens.append(tokens.begin(), tokens.end());
+
+ if (cacheNeedsToGrow) {
+ // Go through all the TokenLexers whose 'Tokens' pointer points in the
+ // buffer and update the pointers to the (potential) new buffer array.
+ for (unsigned i = 0, e = MacroExpandingLexersStack.size(); i != e; ++i) {
+ TokenLexer *prevLexer;
+ size_t tokIndex;
+ llvm::tie(prevLexer, tokIndex) = MacroExpandingLexersStack[i];
+ prevLexer->Tokens = MacroExpandedTokens.data() + tokIndex;
+ }
+ }
+
+ MacroExpandingLexersStack.push_back(std::make_pair(tokLexer, newIndex));
+ return MacroExpandedTokens.data() + newIndex;
+}
+
+void Preprocessor::removeCachedMacroExpandedTokensOfLastLexer() {
+ assert(!MacroExpandingLexersStack.empty());
+ size_t tokIndex = MacroExpandingLexersStack.back().second;
+ assert(tokIndex < MacroExpandedTokens.size());
+ // Pop the cached macro expanded tokens from the end.
+ MacroExpandedTokens.resize(tokIndex);
+ MacroExpandingLexersStack.pop_back();
+}
+
/// ComputeDATE_TIME - Compute the current time, enter it into the specified
/// scratch buffer, then return DATELoc/TIMELoc locations with the position of
/// the identifier tokens inserted.
@@ -551,6 +592,11 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
.Case("cxx_exceptions", LangOpts.Exceptions)
.Case("cxx_rtti", LangOpts.RTTI)
.Case("enumerator_attributes", true)
+ // Objective-C features
+ .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("objc_arc_weak", LangOpts.ObjCAutoRefCount &&
+ LangOpts.ObjCRuntimeHasWeak)
.Case("objc_nonfragile_abi", LangOpts.ObjCNonFragileABI)
.Case("objc_weak_class", LangOpts.ObjCNonFragileABI)
.Case("ownership_holds", true)
@@ -787,10 +833,10 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Loc = AdvanceToTokenCharacter(Loc, 0);
// One wrinkle here is that GCC expands __LINE__ to location of the *end* of
- // a macro instantiation. This doesn't matter for object-like macros, but
+ // a macro expansion. This doesn't matter for object-like macros, but
// can matter for a function-like macro that expands to contain __LINE__.
- // Skip down through instantiation points until we find a file loc for the
- // end of the instantiation history.
+ // Skip down through expansion points until we find a file loc for the
+ // end of the expansion history.
Loc = SourceMgr.getInstantiationRange(Loc).second;
PresumedLoc PLoc = SourceMgr.getPresumedLoc(Loc);
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 23855d4a4748..e6b28c13317b 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -326,9 +326,7 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
if (PLoc.isInvalid())
return;
- unsigned FilenameLen = strlen(PLoc.getFilename());
- unsigned FilenameID = SourceMgr.getLineTableFilenameID(PLoc.getFilename(),
- FilenameLen);
+ unsigned FilenameID = SourceMgr.getLineTableFilenameID(PLoc.getFilename());
// Notify the client, if desired, that we are in a new source file.
if (Callbacks)
@@ -370,7 +368,7 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
const DirectoryLookup *CurDir;
const FileEntry *File = LookupFile(Filename, isAngled, 0, CurDir, NULL, NULL);
if (File == 0) {
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ Diag(FilenameTok, diag::warn_pp_file_not_found) << Filename;
return;
}
@@ -454,8 +452,7 @@ void Preprocessor::HandlePragmaComment(Token &Tok) {
return;
}
- ArgumentString = std::string(Literal.GetString(),
- Literal.GetString()+Literal.GetStringLength());
+ ArgumentString = Literal.GetString();
}
// FIXME: If the kind is "compiler" warn if the string is present (it is
@@ -531,7 +528,7 @@ void Preprocessor::HandlePragmaMessage(Token &Tok) {
return;
}
- llvm::StringRef MessageString(Literal.GetString(), Literal.GetStringLength());
+ llvm::StringRef MessageString(Literal.GetString());
if (ExpectClosingParen) {
if (Tok.isNot(tok::r_paren)) {
@@ -839,8 +836,11 @@ struct PragmaDebugHandler : public PragmaHandler {
/// PragmaDiagnosticHandler - e.g. '#pragma GCC diagnostic ignored "-Wformat"'
struct PragmaDiagnosticHandler : public PragmaHandler {
+private:
+ const char *Namespace;
public:
- explicit PragmaDiagnosticHandler() : PragmaHandler("diagnostic") {}
+ explicit PragmaDiagnosticHandler(const char *NS) :
+ PragmaHandler("diagnostic"), Namespace(NS) {}
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &DiagToken) {
SourceLocation DiagLoc = DiagToken.getLocation();
@@ -851,6 +851,7 @@ public:
return;
}
IdentifierInfo *II = Tok.getIdentifierInfo();
+ PPCallbacks *Callbacks = PP.getPPCallbacks();
diag::Mapping Map;
if (II->isStr("warning"))
@@ -864,10 +865,13 @@ public:
else if (II->isStr("pop")) {
if (!PP.getDiagnostics().popMappings(DiagLoc))
PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
-
+ else if (Callbacks)
+ Callbacks->PragmaDiagnosticPop(DiagLoc, Namespace);
return;
} else if (II->isStr("push")) {
PP.getDiagnostics().pushMappings(DiagLoc);
+ if (Callbacks)
+ Callbacks->PragmaDiagnosticPush(DiagLoc, Namespace);
return;
} else {
PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
@@ -906,7 +910,7 @@ public:
return;
}
- llvm::StringRef WarningName(Literal.GetString(), Literal.GetStringLength());
+ llvm::StringRef WarningName(Literal.GetString());
if (WarningName.size() < 3 || WarningName[0] != '-' ||
WarningName[1] != 'W') {
@@ -919,6 +923,8 @@ public:
Map, DiagLoc))
PP.Diag(StrToks[0].getLocation(),
diag::warn_pragma_diagnostic_unknown_warning) << WarningName;
+ else if (Callbacks)
+ Callbacks->PragmaDiagnostic(DiagLoc, Namespace, Map, WarningName);
}
};
@@ -1013,13 +1019,13 @@ void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler("GCC", new PragmaPoisonHandler());
AddPragmaHandler("GCC", new PragmaSystemHeaderHandler());
AddPragmaHandler("GCC", new PragmaDependencyHandler());
- AddPragmaHandler("GCC", new PragmaDiagnosticHandler());
+ AddPragmaHandler("GCC", new PragmaDiagnosticHandler("GCC"));
// #pragma clang ...
AddPragmaHandler("clang", new PragmaPoisonHandler());
AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
AddPragmaHandler("clang", new PragmaDebugHandler());
AddPragmaHandler("clang", new PragmaDependencyHandler());
- AddPragmaHandler("clang", new PragmaDiagnosticHandler());
+ AddPragmaHandler("clang", new PragmaDiagnosticHandler("clang"));
AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index 0c8d948ce0a7..9f93ab04502a 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -45,8 +45,8 @@ void PreprocessingRecord::MaybeLoadPreallocatedEntities() const {
ExternalSource->ReadPreprocessedEntities();
}
-PreprocessingRecord::PreprocessingRecord(bool IncludeNestedMacroInstantiations)
- : IncludeNestedMacroInstantiations(IncludeNestedMacroInstantiations),
+PreprocessingRecord::PreprocessingRecord(bool IncludeNestedMacroExpansions)
+ : IncludeNestedMacroExpansions(IncludeNestedMacroExpansions),
ExternalSource(0), NumPreallocatedEntities(0),
LoadedPreallocatedEntities(false)
{
@@ -121,14 +121,13 @@ MacroDefinition *PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) {
}
void PreprocessingRecord::MacroExpands(const Token &Id, const MacroInfo* MI) {
- if (!IncludeNestedMacroInstantiations && Id.getLocation().isMacroID())
+ if (!IncludeNestedMacroExpansions && Id.getLocation().isMacroID())
return;
if (MacroDefinition *Def = findMacroDefinition(MI))
PreprocessedEntities.push_back(
- new (*this) MacroInstantiation(Id.getIdentifierInfo(),
- Id.getLocation(),
- Def));
+ new (*this) MacroExpansion(Id.getIdentifierInfo(),
+ Id.getLocation(), Def));
}
void PreprocessingRecord::MacroDefined(const Token &Id,
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index fdc18f878180..e7aa286a16bf 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -118,6 +118,8 @@ Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
Preprocessor::~Preprocessor() {
assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
+ assert(MacroExpandingLexersStack.empty() && MacroExpandedTokens.empty() &&
+ "Preprocessor::HandleEndOfTokenLexer should have cleared those");
while (!IncludeMacroStack.empty()) {
delete IncludeMacroStack.back().TheLexer;
@@ -225,6 +227,10 @@ Preprocessor::macro_begin(bool IncludeExternalMacros) const {
return Macros.begin();
}
+size_t Preprocessor::getTotalMemory() const {
+ return BP.getTotalMemory() + MacroExpandedTokens.capacity()*sizeof(Token);
+}
+
Preprocessor::macro_iterator
Preprocessor::macro_end(bool IncludeExternalMacros) const {
if (IncludeExternalMacros && ExternalSource &&
@@ -322,15 +328,15 @@ llvm::StringRef Preprocessor::getSpelling(const Token &Tok,
/// location for it. If specified, the source location provides a source
/// location for the token.
void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
- SourceLocation InstantiationLoc) {
+ SourceLocation ExpansionLoc) {
Tok.setLength(Len);
const char *DestPtr;
SourceLocation Loc = ScratchBuf->getToken(Buf, Len, DestPtr);
- if (InstantiationLoc.isValid())
- Loc = SourceMgr.createInstantiationLoc(Loc, InstantiationLoc,
- InstantiationLoc, Len);
+ if (ExpansionLoc.isValid())
+ Loc = SourceMgr.createInstantiationLoc(Loc, ExpansionLoc,
+ ExpansionLoc, Len);
Tok.setLocation(Loc);
// If this is a raw identifier or a literal token, set the pointer data.
@@ -528,10 +534,10 @@ CommentHandler::~CommentHandler() { }
CodeCompletionHandler::~CodeCompletionHandler() { }
void Preprocessor::createPreprocessingRecord(
- bool IncludeNestedMacroInstantiations) {
+ bool IncludeNestedMacroExpansions) {
if (Record)
return;
- Record = new PreprocessingRecord(IncludeNestedMacroInstantiations);
+ Record = new PreprocessingRecord(IncludeNestedMacroExpansions);
addPPCallbacks(Record);
}
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index 65aff0d1d037..8ff82f160033 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -23,7 +23,7 @@ using namespace clang;
/// Create a TokenLexer for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
-void TokenLexer::Init(Token &Tok, SourceLocation ILEnd, MacroArgs *Actuals) {
+void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroArgs *Actuals) {
// If the client is reusing a TokenLexer, make sure to free any memory
// associated with it.
destroy();
@@ -32,14 +32,36 @@ void TokenLexer::Init(Token &Tok, SourceLocation ILEnd, MacroArgs *Actuals) {
ActualArgs = Actuals;
CurToken = 0;
- InstantiateLocStart = Tok.getLocation();
- InstantiateLocEnd = ILEnd;
+ ExpandLocStart = Tok.getLocation();
+ ExpandLocEnd = ELEnd;
AtStartOfLine = Tok.isAtStartOfLine();
HasLeadingSpace = Tok.hasLeadingSpace();
Tokens = &*Macro->tokens_begin();
OwnsTokens = false;
DisableMacroExpansion = false;
NumTokens = Macro->tokens_end()-Macro->tokens_begin();
+ MacroExpansionStart = SourceLocation();
+
+ SourceManager &SM = PP.getSourceManager();
+ MacroStartSLocOffset = SM.getNextOffset();
+
+ if (NumTokens > 0) {
+ assert(Tokens[0].getLocation().isValid());
+ assert((Tokens[0].getLocation().isFileID() || Tokens[0].is(tok::comment)) &&
+ "Macro defined in macro?");
+ assert(ExpandLocStart.isValid());
+
+ // Reserve a source location entry chunk for the length of the macro
+ // definition. Tokens that get lexed directly from the definition will
+ // have their locations pointing inside this chunk. This is to avoid
+ // creating separate source location entries for each token.
+ SourceLocation macroStart = SM.getInstantiationLoc(Tokens[0].getLocation());
+ MacroDefStartInfo = SM.getDecomposedLoc(macroStart);
+ MacroExpansionStart = SM.createInstantiationLoc(macroStart,
+ ExpandLocStart,
+ ExpandLocEnd,
+ Macro->getDefinitionLength(SM));
+ }
// If this is a function-like macro, expand the arguments and change
// Tokens to point to the expanded tokens.
@@ -69,9 +91,10 @@ void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
DisableMacroExpansion = disableMacroExpansion;
NumTokens = NumToks;
CurToken = 0;
- InstantiateLocStart = InstantiateLocEnd = SourceLocation();
+ ExpandLocStart = ExpandLocEnd = SourceLocation();
AtStartOfLine = false;
HasLeadingSpace = false;
+ MacroExpansionStart = SourceLocation();
// Set HasLeadingSpace/AtStartOfLine so that the first token will be
// returned unmodified.
@@ -98,6 +121,8 @@ void TokenLexer::destroy() {
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from Tokens.
void TokenLexer::ExpandFunctionArguments() {
+ SourceManager &SM = PP.getSourceManager();
+
llvm::SmallVector<Token, 128> ResultToks;
// Loop through 'Tokens', expanding them into ResultToks. Keep
@@ -119,13 +144,19 @@ void TokenLexer::ExpandFunctionArguments() {
int ArgNo = Macro->getArgumentNum(Tokens[i+1].getIdentifierInfo());
assert(ArgNo != -1 && "Token following # is not an argument?");
+ SourceLocation hashInstLoc;
+ if(ExpandLocStart.isValid()) {
+ hashInstLoc = getMacroExpansionLocation(CurTok.getLocation());
+ assert(hashInstLoc.isValid() && "Expected '#' to come from definition");
+ }
+
Token Res;
if (CurTok.is(tok::hash)) // Stringify
- Res = ActualArgs->getStringifiedArgument(ArgNo, PP);
+ Res = ActualArgs->getStringifiedArgument(ArgNo, PP, hashInstLoc);
else {
// 'charify': don't bother caching these.
Res = MacroArgs::StringifyArgument(ActualArgs->getUnexpArgument(ArgNo),
- PP, true);
+ PP, true, hashInstLoc);
}
// The stringified/charified string leading space flag gets set to match
@@ -185,6 +216,27 @@ void TokenLexer::ExpandFunctionArguments() {
unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
ResultToks.append(ResultArgToks, ResultArgToks+NumToks);
+ // If the '##' came from expanding an argument, turn it into 'unknown'
+ // to avoid pasting.
+ for (unsigned i = FirstResult, e = ResultToks.size(); i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+ }
+
+ if(ExpandLocStart.isValid()) {
+ SourceLocation curInst =
+ getMacroExpansionLocation(CurTok.getLocation());
+ assert(curInst.isValid() &&
+ "Expected arg identifier to come from definition");
+ for (unsigned i = FirstResult, e = ResultToks.size(); i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ Tok.setLocation(SM.createMacroArgInstantiationLoc(Tok.getLocation(),
+ curInst,
+ Tok.getLength()));
+ }
+ }
+
// If any tokens were substituted from the argument, the whitespace
// before the first token should match the whitespace of the arg
// identifier.
@@ -220,6 +272,29 @@ void TokenLexer::ExpandFunctionArguments() {
ResultToks.append(ArgToks, ArgToks+NumToks);
+ // If the '##' came from expanding an argument, turn it into 'unknown'
+ // to avoid pasting.
+ for (unsigned i = ResultToks.size() - NumToks, e = ResultToks.size();
+ i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+ }
+
+ if (ExpandLocStart.isValid()) {
+ SourceLocation curInst =
+ getMacroExpansionLocation(CurTok.getLocation());
+ assert(curInst.isValid() &&
+ "Expected arg identifier to come from definition");
+ for (unsigned i = ResultToks.size() - NumToks, e = ResultToks.size();
+ i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ Tok.setLocation(SM.createMacroArgInstantiationLoc(Tok.getLocation(),
+ curInst,
+ Tok.getLength()));
+ }
+ }
+
// If this token (the macro argument) was supposed to get leading
// whitespace, transfer this information onto the first token of the
// expansion.
@@ -284,15 +359,11 @@ void TokenLexer::ExpandFunctionArguments() {
assert(!OwnsTokens && "This would leak if we already own the token list");
// This is deleted in the dtor.
NumTokens = ResultToks.size();
- llvm::BumpPtrAllocator &Alloc = PP.getPreprocessorAllocator();
- Token *Res =
- static_cast<Token *>(Alloc.Allocate(sizeof(Token)*ResultToks.size(),
- llvm::alignOf<Token>()));
- if (NumTokens)
- memcpy(Res, &ResultToks[0], NumTokens*sizeof(Token));
- Tokens = Res;
-
- // The preprocessor bump pointer owns these tokens, not us.
+ // The tokens will be added to Preprocessor's cache and will be removed
+ // when this TokenLexer finishes lexing them.
+ Tokens = PP.cacheMacroExpandedTokens(this, ResultToks);
+
+ // The preprocessor cache of macro expanded tokens owns these tokens,not us.
OwnsTokens = false;
}
}
@@ -317,6 +388,8 @@ void TokenLexer::Lex(Token &Tok) {
return PPCache.Lex(Tok);
}
+ SourceManager &SM = PP.getSourceManager();
+
// If this is the first token of the expanded result, we inherit spacing
// properties later.
bool isFirstToken = CurToken == 0;
@@ -327,7 +400,8 @@ void TokenLexer::Lex(Token &Tok) {
bool TokenIsFromPaste = false;
// If this token is followed by a token paste (##) operator, paste the tokens!
- if (!isAtEnd() && Tokens[CurToken].is(tok::hashhash)) {
+ // Note that ## is a normal token when not expanding a macro.
+ if (!isAtEnd() && Tokens[CurToken].is(tok::hashhash) && Macro) {
// When handling the microsoft /##/ extension, the final token is
// returned by PasteTokens, not the pasted token.
if (PasteTokens(Tok))
@@ -339,14 +413,25 @@ void TokenLexer::Lex(Token &Tok) {
// The token's current location indicate where the token was lexed from. We
// need this information to compute the spelling of the token, but any
// diagnostics for the expanded token should appear as if they came from
- // InstantiationLoc. Pull this information together into a new SourceLocation
+ // ExpansionLoc. Pull this information together into a new SourceLocation
// that captures all of this.
- if (InstantiateLocStart.isValid()) { // Don't do this for token streams.
- SourceManager &SM = PP.getSourceManager();
- Tok.setLocation(SM.createInstantiationLoc(Tok.getLocation(),
- InstantiateLocStart,
- InstantiateLocEnd,
- Tok.getLength()));
+ if (ExpandLocStart.isValid() && // Don't do this for token streams.
+ // Check that the token's location was not already set properly.
+ SM.isBeforeInSourceLocationOffset(Tok.getLocation(),
+ MacroStartSLocOffset)) {
+ SourceLocation instLoc;
+ if (Tok.is(tok::comment)) {
+ instLoc = SM.createInstantiationLoc(Tok.getLocation(),
+ ExpandLocStart,
+ ExpandLocEnd,
+ Tok.getLength());
+ } else {
+ instLoc = getMacroExpansionLocation(Tok.getLocation());
+ assert(instLoc.isValid() &&
+ "Location for token not coming from definition was not set!");
+ }
+
+ Tok.setLocation(instLoc);
}
// If this is the first token, set the lexical properties of the token to
@@ -384,9 +469,10 @@ void TokenLexer::Lex(Token &Tok) {
bool TokenLexer::PasteTokens(Token &Tok) {
llvm::SmallString<128> Buffer;
const char *ResultTokStrPtr = 0;
+ SourceLocation PasteOpLoc;
do {
// Consume the ## operator.
- SourceLocation PasteOpLoc = Tokens[CurToken].getLocation();
+ PasteOpLoc = Tokens[CurToken].getLocation();
++CurToken;
assert(!isAtEnd() && "No token on the RHS of a paste operator!");
@@ -484,12 +570,12 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// Do not emit the error when preprocessing assembler code.
if (!PP.getLangOptions().AsmPreprocessor) {
- // Explicitly convert the token location to have proper instantiation
+ // Explicitly convert the token location to have proper expansion
// information so that the user knows where it came from.
SourceManager &SM = PP.getSourceManager();
SourceLocation Loc =
- SM.createInstantiationLoc(PasteOpLoc, InstantiateLocStart,
- InstantiateLocEnd, 2);
+ SM.createInstantiationLoc(PasteOpLoc, ExpandLocStart,
+ ExpandLocEnd, 2);
// If we're in microsoft extensions mode, downgrade this from a hard
// error to a warning that defaults to an error. This allows
// disabling it.
@@ -512,12 +598,30 @@ bool TokenLexer::PasteTokens(Token &Tok) {
// Transfer properties of the LHS over the the Result.
Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
-
+
// Finally, replace LHS with the result, consume the RHS, and iterate.
++CurToken;
Tok = Result;
} while (!isAtEnd() && Tokens[CurToken].is(tok::hashhash));
+ // The token's current location indicate where the token was lexed from. We
+ // need this information to compute the spelling of the token, but any
+ // diagnostics for the expanded token should appear as if the token was
+ // expanded from the (##) operator. Pull this information together into
+ // a new SourceLocation that captures all of this.
+ if (ExpandLocStart.isValid()) {
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation pasteLocInst =
+ getMacroExpansionLocation(PasteOpLoc);
+ assert(pasteLocInst.isValid() &&
+ "Expected '##' to come from definition");
+
+ Tok.setLocation(SM.createInstantiationLoc(Tok.getLocation(),
+ pasteLocInst,
+ pasteLocInst,
+ Tok.getLength()));
+ }
+
// Now that we got the result token, it will be subject to expansion. Since
// token pasting re-lexes the result token in raw mode, identifier information
// isn't looked up. As such, if the result is an identifier, look up id info.
@@ -548,7 +652,7 @@ bool TokenLexer::isParsingPreprocessorDirective() const {
/// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes
/// together to form a comment that comments out everything in the current
/// macro, other active macros, and anything left on the current physical
-/// source line of the instantiated buffer. Handle this by returning the
+/// source line of the expanded buffer. Handle this by returning the
/// first token on the next line.
void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok) {
// We 'comment out' the rest of this macro by just ignoring the rest of the
@@ -561,3 +665,23 @@ void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok) {
PP.HandleMicrosoftCommentPaste(Tok);
}
+
+/// \brief If \arg loc is a FileID and points inside the current macro
+/// definition, returns the appropriate source location pointing at the
+/// macro expansion source location entry.
+SourceLocation TokenLexer::getMacroExpansionLocation(SourceLocation loc) const {
+ assert(ExpandLocStart.isValid() && MacroExpansionStart.isValid() &&
+ "Not appropriate for token streams");
+ assert(loc.isValid());
+
+ SourceManager &SM = PP.getSourceManager();
+ unsigned relativeOffset;
+ if (loc.isFileID() &&
+ SM.isInFileID(loc,
+ MacroDefStartInfo.first, MacroDefStartInfo.second,
+ Macro->getDefinitionLength(SM), &relativeOffset)) {
+ return MacroExpansionStart.getFileLocWithOffset(relativeOffset);
+ }
+
+ return SourceLocation();
+}
diff --git a/lib/Makefile b/lib/Makefile
index 924819c81807..74df7abcef24 100755
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -9,8 +9,8 @@
CLANG_LEVEL := ..
PARALLEL_DIRS = Headers Basic Lex Parse AST Sema CodeGen Analysis \
- StaticAnalyzer Rewrite Serialization Frontend FrontendTool \
- Index Driver
+ StaticAnalyzer Rewrite ARCMigrate Serialization Frontend \
+ FrontendTool Index Driver
include $(CLANG_LEVEL)/Makefile
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index 21917b23ffb6..56584c96180b 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -57,6 +57,10 @@ void clang::ParseAST(Sema &S, bool PrintStats) {
Stmt::CollectingStats(true);
}
+ // Also turn on collection of stats inside of the Sema object.
+ bool OldCollectStats = PrintStats;
+ std::swap(OldCollectStats, S.CollectStats);
+
ASTConsumer *Consumer = &S.getASTConsumer();
llvm::OwningPtr<Parser> ParseOP(new Parser(S.getPreprocessor(), S));
@@ -95,9 +99,10 @@ void clang::ParseAST(Sema &S, bool PrintStats) {
Consumer->HandleTopLevelDecl(DeclGroupRef(*I));
Consumer->HandleTranslationUnit(S.getASTContext());
-
+
+ std::swap(OldCollectStats, S.CollectStats);
if (PrintStats) {
- fprintf(stderr, "\nSTATISTICS:\n");
+ llvm::errs() << "\nSTATISTICS:\n";
P.getActions().PrintStats();
S.getASTContext().PrintStats();
Decl::PrintStats();
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index ad3fcfe0d369..0e17295858b9 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -31,10 +31,16 @@ using namespace clang;
///
/// Called type-id in C++.
TypeResult Parser::ParseTypeName(SourceRange *Range,
- Declarator::TheContext Context) {
+ Declarator::TheContext Context,
+ ObjCDeclSpec *objcQuals,
+ AccessSpecifier AS,
+ Decl **OwnedType) {
// Parse the common declaration-specifiers piece.
DeclSpec DS(AttrFactory);
- ParseSpecifierQualifierList(DS);
+ DS.setObjCQualifiers(objcQuals);
+ ParseSpecifierQualifierList(DS, AS);
+ if (OwnedType)
+ *OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : 0;
// Parse the abstract-declarator, if present.
Declarator DeclaratorInfo(DS, Context);
@@ -672,6 +678,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
ParenBraceBracketBalancer BalancerRAIIObj(*this);
Decl *SingleDecl = 0;
+ Decl *OwnedType = 0;
switch (Tok.getKind()) {
case tok::kw_template:
case tok::kw_export:
@@ -694,7 +701,7 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
break;
case tok::kw_using:
SingleDecl = ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
- DeclEnd, attrs);
+ DeclEnd, attrs, &OwnedType);
break;
case tok::kw_static_assert:
case tok::kw__Static_assert:
@@ -706,8 +713,9 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
- // single decl, convert it now.
- return Actions.ConvertDeclToDeclGroup(SingleDecl);
+ // single decl, convert it now. Alias declarations can also declare a type;
+ // include that too if it is present.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl, OwnedType);
}
/// simple-declaration: [C99 6.7: declaration] [C++ 7p1: dcl.dcl]
@@ -1079,10 +1087,10 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
/// type-qualifier specifier-qualifier-list[opt]
/// [GNU] attributes specifier-qualifier-list[opt]
///
-void Parser::ParseSpecifierQualifierList(DeclSpec &DS) {
+void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS) {
/// specifier-qualifier-list is a subset of declaration-specifiers. Just
/// parse declaration-specifiers and complain about extra stuff.
- ParseDeclarationSpecifiers(DS);
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS);
// Validate declspec for type-name.
unsigned Specs = DS.getParsedSpecifiers();
@@ -1394,8 +1402,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Thus, if the template-name is actually the constructor
// name, then the code is ill-formed; this interpretation is
// reinforced by the NAD status of core issue 635.
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
if ((DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified())) &&
TemplateId->Name &&
@@ -1597,8 +1604,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// type-name
case tok::annot_template_id: {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind != TNK_Type_template) {
// This template-id does not refer to a type name, so we're
// done with the type-specifiers.
@@ -2444,13 +2450,29 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
ConsumeCodeCompletionToken();
}
+
+ bool IsScopedEnum = false;
+ bool IsScopedUsingClassTag = false;
+
+ if (getLang().CPlusPlus0x &&
+ (Tok.is(tok::kw_class) || Tok.is(tok::kw_struct))) {
+ IsScopedEnum = true;
+ IsScopedUsingClassTag = Tok.is(tok::kw_class);
+ ConsumeToken();
+ }
// If attributes exist after tag, parse them.
ParsedAttributes attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
+ bool AllowFixedUnderlyingType = getLang().CPlusPlus0x || getLang().Microsoft;
+
CXXScopeSpec &SS = DS.getTypeSpecScope();
if (getLang().CPlusPlus) {
+ // "enum foo : bar;" is not a potential typo for "enum foo::bar;"
+ // if a fixed underlying type is allowed.
+ ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType);
+
if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false))
return;
@@ -2465,17 +2487,6 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
}
}
- bool AllowFixedUnderlyingType = getLang().CPlusPlus0x || getLang().Microsoft;
- bool IsScopedEnum = false;
- bool IsScopedUsingClassTag = false;
-
- if (getLang().CPlusPlus0x &&
- (Tok.is(tok::kw_class) || Tok.is(tok::kw_struct))) {
- IsScopedEnum = true;
- IsScopedUsingClassTag = Tok.is(tok::kw_class);
- ConsumeToken();
- }
-
// Must have either 'enum name' or 'enum {...}'.
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
(AllowFixedUnderlyingType && Tok.isNot(tok::colon))) {
@@ -3016,6 +3027,10 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw___attribute:
return true;
+ // C++0x decltype.
+ case tok::kw_decltype:
+ return true;
+
// GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
case tok::less:
return getLang().ObjC1;
@@ -3639,33 +3654,15 @@ void Parser::ParseParenDeclarator(Declarator &D) {
/// declarator D up to a paren, which indicates that we are parsing function
/// arguments.
///
-/// If AttrList is non-null, then the caller parsed those arguments immediately
+/// If attrs is non-null, then the caller parsed those arguments immediately
/// after the open paren - they should be considered to be the first argument of
/// a parameter. If RequiresArg is true, then the first argument of the
/// function is required to be present and required to not be an identifier
/// list.
///
-/// This method also handles this portion of the grammar:
-/// parameter-type-list: [C99 6.7.5]
-/// parameter-list
-/// parameter-list ',' '...'
-/// [C++] parameter-list '...'
-///
-/// parameter-list: [C99 6.7.5]
-/// parameter-declaration
-/// parameter-list ',' parameter-declaration
-///
-/// parameter-declaration: [C99 6.7.5]
-/// declaration-specifiers declarator
-/// [C++] declaration-specifiers declarator '=' assignment-expression
-/// [GNU] declaration-specifiers declarator attributes
-/// declaration-specifiers abstract-declarator[opt]
-/// [C++] declaration-specifiers abstract-declarator[opt]
-/// '=' assignment-expression
-/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
-///
-/// For C++, after the parameter-list, it also parses "cv-qualifier-seq[opt]",
-/// C++0x "ref-qualifier[opt]" and "exception-specification[opt]".
+/// For C++, after the parameter-list, it also parses cv-qualifier-seq[opt],
+/// (C++0x) ref-qualifier[opt], exception-specification[opt], and
+/// (C++0x) trailing-return-type[opt].
///
/// [C++0x] exception-specification:
/// dynamic-exception-specification
@@ -3677,36 +3674,62 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// lparen is already consumed!
assert(D.isPastIdentifier() && "Should not call before identifier!");
+ // This should be true when the function has typed arguments.
+ // Otherwise, it is treated as a K&R-style function.
+ bool HasProto = false;
+ // Build up an array of information about the parsed arguments.
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ // Remember where we see an ellipsis, if any.
+ SourceLocation EllipsisLoc;
+
+ DeclSpec DS(AttrFactory);
+ bool RefQualifierIsLValueRef = true;
+ SourceLocation RefQualifierLoc;
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ llvm::SmallVector<ParsedType, 2> DynamicExceptions;
+ llvm::SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
ParsedType TrailingReturnType;
+
+ SourceLocation EndLoc;
- // This parameter list may be empty.
- if (Tok.is(tok::r_paren)) {
+ if (isFunctionDeclaratorIdentifierList()) {
if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
- SourceLocation EndLoc = ConsumeParen(); // Eat the closing ')'.
+ ParseFunctionDeclaratorIdentifierList(D, ParamInfo);
+
+ EndLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ } else {
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+
+ if (Tok.isNot(tok::r_paren))
+ ParseParameterDeclarationClause(D, attrs, ParamInfo, EllipsisLoc);
+ else if (RequiresArg)
+ Diag(Tok, diag::err_argument_required_after_attribute);
+
+ HasProto = ParamInfo.size() || getLang().CPlusPlus;
+
+ // If we have the closing ')', eat it.
+ EndLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
- // cv-qualifier-seq[opt].
- DeclSpec DS(AttrFactory);
- SourceLocation RefQualifierLoc;
- bool RefQualifierIsLValueRef = true;
- ExceptionSpecificationType ESpecType = EST_None;
- SourceRange ESpecRange;
- llvm::SmallVector<ParsedType, 2> DynamicExceptions;
- llvm::SmallVector<SourceRange, 2> DynamicExceptionRanges;
- ExprResult NoexceptExpr;
if (getLang().CPlusPlus) {
MaybeParseCXX0XAttributes(attrs);
+ // Parse cv-qualifier-seq[opt].
ParseTypeQualifierListOpt(DS, false /*no attributes*/);
- if (!DS.getSourceRange().getEnd().isInvalid())
- EndLoc = DS.getSourceRange().getEnd();
+ if (!DS.getSourceRange().getEnd().isInvalid())
+ EndLoc = DS.getSourceRange().getEnd();
- // Parse ref-qualifier[opt]
+ // Parse ref-qualifier[opt].
if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) {
if (!getLang().CPlusPlus0x)
Diag(Tok, diag::ext_ref_qualifier);
-
+
RefQualifierIsLValueRef = Tok.is(tok::amp);
RefQualifierLoc = ConsumeToken();
EndLoc = RefQualifierLoc;
@@ -3720,87 +3743,158 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
if (ESpecType != EST_None)
EndLoc = ESpecRange.getEnd();
- // Parse trailing-return-type.
+ // Parse trailing-return-type[opt].
if (getLang().CPlusPlus0x && Tok.is(tok::arrow)) {
TrailingReturnType = ParseTrailingReturnType().get();
}
}
- // Remember that we parsed a function type, and remember the attributes.
- // int() -> no prototype, no '...'.
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*prototype*/getLang().CPlusPlus,
- /*variadic*/ false,
- SourceLocation(),
- /*arglist*/ 0, 0,
- DS.getTypeQualifiers(),
- RefQualifierIsLValueRef,
- RefQualifierLoc,
- ESpecType, ESpecRange.getBegin(),
- DynamicExceptions.data(),
- DynamicExceptionRanges.data(),
- DynamicExceptions.size(),
- NoexceptExpr.isUsable() ?
- NoexceptExpr.get() : 0,
- LParenLoc, EndLoc, D,
- TrailingReturnType),
- attrs, EndLoc);
- return;
+ // Leave prototype scope.
+ PrototypeScope.Exit();
}
- // Alternatively, this parameter list may be an identifier list form for a
- // K&R-style function: void foo(a,b,c)
- if (!getLang().CPlusPlus && Tok.is(tok::identifier)
- && !TryAltiVecVectorToken()) {
- if (TryAnnotateTypeOrScopeToken() || !Tok.is(tok::annot_typename)) {
- // K&R identifier lists can't have typedefs as identifiers, per
- // C99 6.7.5.3p11.
- if (RequiresArg)
- Diag(Tok, diag::err_argument_required_after_attribute);
-
- // Identifier list. Note that '(' identifier-list ')' is only allowed for
- // normal declarators, not for abstract-declarators. Get the first
- // identifier.
- Token FirstTok = Tok;
- ConsumeToken(); // eat the first identifier.
-
- // Identifier lists follow a really simple grammar: the identifiers can
- // be followed *only* by a ", moreidentifiers" or ")". However, K&R
- // identifier lists are really rare in the brave new modern world, and it
- // is very common for someone to typo a type in a non-k&r style list. If
- // we are presented with something like: "void foo(intptr x, float y)",
- // we don't want to start parsing the function declarator as though it is
- // a K&R style declarator just because intptr is an invalid type.
- //
- // To handle this, we check to see if the token after the first identifier
- // is a "," or ")". Only if so, do we parse it as an identifier list.
- if (Tok.is(tok::comma) || Tok.is(tok::r_paren))
- return ParseFunctionDeclaratorIdentifierList(LParenLoc,
- FirstTok.getIdentifierInfo(),
- FirstTok.getLocation(), D);
-
- // If we get here, the code is invalid. Push the first identifier back
- // into the token stream and parse the first argument as an (invalid)
- // normal argument declarator.
- PP.EnterToken(Tok);
- Tok = FirstTok;
+ // Remember that we parsed a function type, and remember the attributes.
+ D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
+ /*isVariadic=*/EllipsisLoc.isValid(),
+ EllipsisLoc,
+ ParamInfo.data(), ParamInfo.size(),
+ DS.getTypeQualifiers(),
+ RefQualifierIsLValueRef,
+ RefQualifierLoc,
+ /*MutableLoc=*/SourceLocation(),
+ ESpecType, ESpecRange.getBegin(),
+ DynamicExceptions.data(),
+ DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ?
+ NoexceptExpr.get() : 0,
+ LParenLoc, EndLoc, D,
+ TrailingReturnType),
+ attrs, EndLoc);
+}
+
+/// isFunctionDeclaratorIdentifierList - This parameter list may have an
+/// identifier list form for a K&R-style function: void foo(a,b,c)
+///
+/// Note that identifier-lists are only allowed for normal declarators, not for
+/// abstract-declarators.
+bool Parser::isFunctionDeclaratorIdentifierList() {
+ return !getLang().CPlusPlus
+ && Tok.is(tok::identifier)
+ && !TryAltiVecVectorToken()
+ // K&R identifier lists can't have typedefs as identifiers, per C99
+ // 6.7.5.3p11.
+ && (TryAnnotateTypeOrScopeToken() || !Tok.is(tok::annot_typename))
+ // Identifier lists follow a really simple grammar: the identifiers can
+ // be followed *only* by a ", identifier" or ")". However, K&R
+ // identifier lists are really rare in the brave new modern world, and
+ // it is very common for someone to typo a type in a non-K&R style
+ // list. If we are presented with something like: "void foo(intptr x,
+ // float y)", we don't want to start parsing the function declarator as
+ // though it is a K&R style declarator just because intptr is an
+ // invalid type.
+ //
+ // To handle this, we check to see if the token after the first
+ // identifier is a "," or ")". Only then do we parse it as an
+ // identifier list.
+ && (NextToken().is(tok::comma) || NextToken().is(tok::r_paren));
+}
+
+/// ParseFunctionDeclaratorIdentifierList - While parsing a function declarator
+/// we found a K&R-style identifier list instead of a typed parameter list.
+///
+/// After returning, ParamInfo will hold the parsed parameters.
+///
+/// identifier-list: [C99 6.7.5]
+/// identifier
+/// identifier-list ',' identifier
+///
+void Parser::ParseFunctionDeclaratorIdentifierList(
+ Declarator &D,
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo) {
+ // If there was no identifier specified for the declarator, either we are in
+ // an abstract-declarator, or we are in a parameter declarator which was found
+ // to be abstract. In abstract-declarators, identifier lists are not valid:
+ // diagnose this.
+ if (!D.getIdentifier())
+ Diag(Tok, diag::ext_ident_list_in_param);
+
+ // Maintain an efficient lookup of params we have seen so far.
+ llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
+
+ while (1) {
+ // If this isn't an identifier, report the error and skip until ')'.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ // Forget we parsed anything.
+ ParamInfo.clear();
+ return;
}
- }
- // Finally, a normal, non-empty parameter type list.
+ IdentifierInfo *ParmII = Tok.getIdentifierInfo();
- // Build up an array of information about the parsed arguments.
- llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ // Reject 'typedef int y; int test(x, y)', but continue parsing.
+ if (Actions.getTypeName(*ParmII, Tok.getLocation(), getCurScope()))
+ Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
+
+ // Verify that the argument identifier has not already been mentioned.
+ if (!ParamsSoFar.insert(ParmII)) {
+ Diag(Tok, diag::err_param_redefinition) << ParmII;
+ } else {
+ // Remember this identifier in ParamInfo.
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ Tok.getLocation(),
+ 0));
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
- // Enter function-declaration scope, limiting any declarators to the
- // function prototype scope, including parameter declarators.
- ParseScope PrototypeScope(this,
- Scope::FunctionPrototypeScope|Scope::DeclScope);
+ // The list continues if we see a comma.
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken();
+ }
+}
+
+/// ParseParameterDeclarationClause - Parse a (possibly empty) parameter-list
+/// after the opening parenthesis. This function will not parse a K&R-style
+/// identifier list.
+///
+/// D is the declarator being parsed. If attrs is non-null, then the caller
+/// parsed those arguments immediately after the open paren - they should be
+/// considered to be the first argument of a parameter.
+///
+/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
+/// be the location of the ellipsis, if any was parsed.
+///
+/// parameter-type-list: [C99 6.7.5]
+/// parameter-list
+/// parameter-list ',' '...'
+/// [C++] parameter-list '...'
+///
+/// parameter-list: [C99 6.7.5]
+/// parameter-declaration
+/// parameter-list ',' parameter-declaration
+///
+/// parameter-declaration: [C99 6.7.5]
+/// declaration-specifiers declarator
+/// [C++] declaration-specifiers declarator '=' assignment-expression
+/// [GNU] declaration-specifiers declarator attributes
+/// declaration-specifiers abstract-declarator[opt]
+/// [C++] declaration-specifiers abstract-declarator[opt]
+/// '=' assignment-expression
+/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
+///
+void Parser::ParseParameterDeclarationClause(
+ Declarator &D,
+ ParsedAttributes &attrs,
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo,
+ SourceLocation &EllipsisLoc) {
- bool IsVariadic = false;
- SourceLocation EllipsisLoc;
while (1) {
if (Tok.is(tok::ellipsis)) {
- IsVariadic = true;
EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
break;
}
@@ -3817,6 +3911,11 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// If the caller parsed attributes for the first argument, add them now.
// Take them so that we only apply the attributes to the first parameter.
+ // FIXME: If we saw an ellipsis first, this code is not reached. Are the
+ // attributes lost? Should they even be allowed?
+ // FIXME: If we can leave the attributes in the token stream somehow, we can
+ // get rid of a parameter (attrs) and this statement. It might be too much
+ // hassle.
DS.takeAttributesFrom(attrs);
ParseDeclarationSpecifiers(DS);
@@ -3912,7 +4011,6 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// If the next token is a comma, consume it and keep reading arguments.
if (Tok.isNot(tok::comma)) {
if (Tok.is(tok::ellipsis)) {
- IsVariadic = true;
EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
if (!getLang().CPlusPlus) {
@@ -3930,150 +4028,6 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
ConsumeToken();
}
- // If we have the closing ')', eat it.
- SourceLocation EndLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
-
- DeclSpec DS(AttrFactory);
- SourceLocation RefQualifierLoc;
- bool RefQualifierIsLValueRef = true;
- ExceptionSpecificationType ESpecType = EST_None;
- SourceRange ESpecRange;
- llvm::SmallVector<ParsedType, 2> DynamicExceptions;
- llvm::SmallVector<SourceRange, 2> DynamicExceptionRanges;
- ExprResult NoexceptExpr;
-
- if (getLang().CPlusPlus) {
- MaybeParseCXX0XAttributes(attrs);
-
- // Parse cv-qualifier-seq[opt].
- ParseTypeQualifierListOpt(DS, false /*no attributes*/);
- if (!DS.getSourceRange().getEnd().isInvalid())
- EndLoc = DS.getSourceRange().getEnd();
-
- // Parse ref-qualifier[opt]
- if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) {
- if (!getLang().CPlusPlus0x)
- Diag(Tok, diag::ext_ref_qualifier);
-
- RefQualifierIsLValueRef = Tok.is(tok::amp);
- RefQualifierLoc = ConsumeToken();
- EndLoc = RefQualifierLoc;
- }
-
- // FIXME: We should leave the prototype scope before parsing the exception
- // specification, and then reenter it when parsing the trailing return type.
- // FIXMEFIXME: Why? That wouldn't be right for the noexcept clause.
-
- // Parse exception-specification[opt].
- ESpecType = MaybeParseExceptionSpecification(ESpecRange,
- DynamicExceptions,
- DynamicExceptionRanges,
- NoexceptExpr);
- if (ESpecType != EST_None)
- EndLoc = ESpecRange.getEnd();
-
- // Parse trailing-return-type.
- if (getLang().CPlusPlus0x && Tok.is(tok::arrow)) {
- TrailingReturnType = ParseTrailingReturnType().get();
- }
- }
-
- // Leave prototype scope.
- PrototypeScope.Exit();
-
- // Remember that we parsed a function type, and remember the attributes.
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*proto*/true, IsVariadic,
- EllipsisLoc,
- ParamInfo.data(), ParamInfo.size(),
- DS.getTypeQualifiers(),
- RefQualifierIsLValueRef,
- RefQualifierLoc,
- ESpecType, ESpecRange.getBegin(),
- DynamicExceptions.data(),
- DynamicExceptionRanges.data(),
- DynamicExceptions.size(),
- NoexceptExpr.isUsable() ?
- NoexceptExpr.get() : 0,
- LParenLoc, EndLoc, D,
- TrailingReturnType),
- attrs, EndLoc);
-}
-
-/// ParseFunctionDeclaratorIdentifierList - While parsing a function declarator
-/// we found a K&R-style identifier list instead of a type argument list. The
-/// first identifier has already been consumed, and the current token is the
-/// token right after it.
-///
-/// identifier-list: [C99 6.7.5]
-/// identifier
-/// identifier-list ',' identifier
-///
-void Parser::ParseFunctionDeclaratorIdentifierList(SourceLocation LParenLoc,
- IdentifierInfo *FirstIdent,
- SourceLocation FirstIdentLoc,
- Declarator &D) {
- // Build up an array of information about the parsed arguments.
- llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
- llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
-
- // If there was no identifier specified for the declarator, either we are in
- // an abstract-declarator, or we are in a parameter declarator which was found
- // to be abstract. In abstract-declarators, identifier lists are not valid:
- // diagnose this.
- if (!D.getIdentifier())
- Diag(FirstIdentLoc, diag::ext_ident_list_in_param);
-
- // The first identifier was already read, and is known to be the first
- // identifier in the list. Remember this identifier in ParamInfo.
- ParamsSoFar.insert(FirstIdent);
- ParamInfo.push_back(DeclaratorChunk::ParamInfo(FirstIdent, FirstIdentLoc, 0));
-
- while (Tok.is(tok::comma)) {
- // Eat the comma.
- ConsumeToken();
-
- // If this isn't an identifier, report the error and skip until ')'.
- if (Tok.isNot(tok::identifier)) {
- Diag(Tok, diag::err_expected_ident);
- SkipUntil(tok::r_paren);
- return;
- }
-
- IdentifierInfo *ParmII = Tok.getIdentifierInfo();
-
- // Reject 'typedef int y; int test(x, y)', but continue parsing.
- if (Actions.getTypeName(*ParmII, Tok.getLocation(), getCurScope()))
- Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
-
- // Verify that the argument identifier has not already been mentioned.
- if (!ParamsSoFar.insert(ParmII)) {
- Diag(Tok, diag::err_param_redefinition) << ParmII;
- } else {
- // Remember this identifier in ParamInfo.
- ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
- Tok.getLocation(),
- 0));
- }
-
- // Eat the identifier.
- ConsumeToken();
- }
-
- // If we have the closing ')', eat it and we're done.
- SourceLocation RLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
-
- // Remember that we parsed a function type, and remember the attributes. This
- // function type is always a K&R style function type, which is not varargs and
- // has no prototype.
- ParsedAttributes attrs(AttrFactory);
- D.AddTypeInfo(DeclaratorChunk::getFunction(/*proto*/false, /*varargs*/false,
- SourceLocation(),
- &ParamInfo[0], ParamInfo.size(),
- /*TypeQuals*/0,
- true, SourceLocation(),
- EST_None, SourceLocation(), 0, 0,
- 0, 0, LParenLoc, RLoc, D),
- attrs, RLoc);
}
/// [C90] direct-declarator '[' constant-expression[opt] ']'
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 51aa01091e73..172049c34053 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -314,7 +314,8 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs) {
+ ParsedAttributesWithRange &attrs,
+ Decl **OwnedType) {
assert(Tok.is(tok::kw_using) && "Not using token");
// Eat 'using'.
@@ -342,7 +343,8 @@ Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
// Using declarations can't have attributes.
ProhibitAttributes(attrs);
- return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd);
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd,
+ AS_none, OwnedType);
}
/// ParseUsingDirective - Parse C++ using-directive, assumes
@@ -422,7 +424,8 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
- AccessSpecifier AS) {
+ AccessSpecifier AS,
+ Decl **OwnedType) {
CXXScopeSpec SS;
SourceLocation TypenameLoc;
bool IsTypeName;
@@ -511,7 +514,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
TypeAlias = ParseTypeName(0, TemplateInfo.Kind ?
Declarator::AliasTemplateContext :
- Declarator::AliasDeclContext);
+ Declarator::AliasDeclContext, 0, AS, OwnedType);
} else
// Parse (optional) attributes (most likely GNU strong-using extension).
MaybeParseGNUAttributes(attrs);
@@ -701,8 +704,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
CXXScopeSpec &SS) {
// Check whether we have a template-id that names a type.
if (Tok.is(tok::annot_template_id)) {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind == TNK_Type_template ||
TemplateId->Kind == TNK_Dependent_template_name) {
AnnotateTemplateIdTokenAsType();
@@ -976,7 +978,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
}
} else if (Tok.is(tok::annot_template_id)) {
- TemplateId = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateId = takeTemplateIdAnnotation(Tok);
NameLoc = ConsumeToken();
if (TemplateId->Kind != TNK_Type_template &&
@@ -993,7 +995,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
DS.SetTypeSpecError();
SkipUntil(tok::semi, false, true);
- TemplateId->Destroy();
if (SuppressingAccessChecks)
Actions.ActOnStopSuppressingAccessChecks();
@@ -1051,9 +1052,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
SkipUntil(tok::comma, true);
-
- if (TemplateId)
- TemplateId->Destroy();
return;
}
@@ -1149,7 +1147,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateParams? &(*TemplateParams)[0] : 0,
TemplateParams? TemplateParams->size() : 0));
}
- TemplateId->Destroy();
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Declaration) {
// Explicit instantiation of a member of a class template
@@ -1294,6 +1291,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
break;
}
+ // C++ [temp]p3 In a template-declaration which defines a class, no
+ // declarator is permitted.
+ if (TemplateInfo.Kind)
+ ExpectedSemi = true;
+
if (ExpectedSemi) {
ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
TagType == DeclSpec::TST_class ? "class"
@@ -1828,14 +1830,16 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
Diag(Tok, diag::err_bitfield_member_init);
SkipUntil(tok::comma, true, true);
} else {
- HasDeferredInitializer = !DeclaratorInfo.isFunctionDeclarator() &&
+ HasDeferredInitializer = !DeclaratorInfo.isDeclarationOfFunction() &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_static &&
DeclaratorInfo.getDeclSpec().getStorageClassSpec()
- != DeclSpec::SCS_static;
+ != DeclSpec::SCS_typedef;
if (!HasDeferredInitializer) {
SourceLocation EqualLoc;
Init = ParseCXXMemberInitializer(
- DeclaratorInfo.isFunctionDeclarator(), EqualLoc);
+ DeclaratorInfo.isDeclarationOfFunction(), EqualLoc);
if (Init.isInvalid())
SkipUntil(tok::comma, true, true);
}
@@ -2246,8 +2250,7 @@ Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
ParsedType TemplateTypeTy;
if (Tok.is(tok::annot_template_id)) {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind == TNK_Type_template ||
TemplateId->Kind == TNK_Dependent_template_name) {
AnnotateTemplateIdTokenAsType();
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 4e94ed9323b8..fc64ae022654 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -220,7 +220,7 @@ ExprResult Parser::ParseAssignmentExpression() {
if (Tok.is(tok::kw_throw))
return ParseThrowExpression();
- ExprResult LHS = ParseCastExpression(false, false, ParsedType());
+ ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false);
return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
}
@@ -304,13 +304,14 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
ColonLoc = ConsumeToken();
} else {
// Otherwise, we're missing a ':'. Assume that this was a typo that the
- // user forgot. If we're not in a macro instantion, we can suggest a
+ // user forgot. If we're not in a macro instantiation, we can suggest a
// fixit hint. If there were two spaces before the current token,
// suggest inserting the colon in between them, otherwise insert ": ".
SourceLocation FILoc = Tok.getLocation();
const char *FIText = ": ";
- if (FILoc.isFileID()) {
- const SourceManager &SM = PP.getSourceManager();
+ const SourceManager &SM = PP.getSourceManager();
+ if (FILoc.isFileID() || PP.isAtStartOfMacroExpansion(FILoc)) {
+ FILoc = SM.getInstantiationLoc(FILoc);
bool IsInvalid = false;
const char *SourcePtr =
SM.getCharacterData(FILoc.getFileLocWithOffset(-1), &IsInvalid);
@@ -414,12 +415,12 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
///
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
- ParsedType TypeOfCast) {
+ bool isTypeCast) {
bool NotCastExpr;
ExprResult Res = ParseCastExpression(isUnaryExpression,
isAddressOfOperand,
NotCastExpr,
- TypeOfCast);
+ isTypeCast);
if (NotCastExpr)
Diag(Tok, diag::err_expected_expression);
return move(Res);
@@ -588,7 +589,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
- ParsedType TypeOfCast) {
+ bool isTypeCast) {
ExprResult Res;
tok::TokenKind SavedKind = Tok.getKind();
NotCastExpr = false;
@@ -619,7 +620,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ColonProtectionRAIIObject X(*this, false);
Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
- TypeOfCast, CastTy, RParenLoc);
+ isTypeCast, CastTy, RParenLoc);
}
switch (ParenExprType) {
@@ -791,7 +792,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ParseBuiltinPrimaryExpression();
case tok::kw___null:
return Actions.ActOnGNUNullExpr(ConsumeToken());
- break;
+
case tok::plusplus: // unary-expression: '++' unary-expression [C99]
case tok::minusminus: { // unary-expression: '--' unary-expression [C99]
// C++ [expr.unary] has:
@@ -951,12 +952,11 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ExprError();
if (!Tok.is(tok::annot_cxxscope))
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
- NotCastExpr, TypeOfCast);
+ NotCastExpr, isTypeCast);
Token Next = NextToken();
if (Next.is(tok::annot_template_id)) {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
if (TemplateId->Kind == TNK_Type_template) {
// We have a qualified template-id that we know refers to a
// type, translate it into a type and continue parsing as a
@@ -965,7 +965,7 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
ParseOptionalCXXScopeSpecifier(SS, ParsedType(), false);
AnnotateTemplateIdTokenAsType();
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
- NotCastExpr, TypeOfCast);
+ NotCastExpr, isTypeCast);
}
}
@@ -975,15 +975,14 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
}
case tok::annot_template_id: { // [C++] template-id
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind == TNK_Type_template) {
// We have a template-id that we know refers to a type,
// translate it into a type and continue parsing as a cast
// expression.
AnnotateTemplateIdTokenAsType();
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
- NotCastExpr, TypeOfCast);
+ NotCastExpr, isTypeCast);
}
// Fall through to treat the template-id as an id-expression.
@@ -1101,17 +1100,20 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
return ParseObjCAtExpression(AtLoc);
}
case tok::caret:
- return ParsePostfixExpressionSuffix(ParseBlockLiteralExpression());
- case tok::code_completion:
+ Res = ParseBlockLiteralExpression();
+ break;
+ case tok::code_completion: {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
ConsumeCodeCompletionToken();
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
- NotCastExpr, TypeOfCast);
+ NotCastExpr, isTypeCast);
+ }
case tok::l_square:
- // These can be followed by postfix-expr pieces.
- if (getLang().ObjC1)
- return ParsePostfixExpressionSuffix(ParseObjCMessageExpression());
- // FALL THROUGH.
+ if (getLang().ObjC1) {
+ Res = ParseObjCMessageExpression();
+ break;
+ }
+ // FALL THROUGH.
default:
NotCastExpr = true;
return ExprError();
@@ -1261,7 +1263,6 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (Tok.isNot(tok::r_paren)) {
if (ParseExpressionList(ArgExprs, CommaLocs, &Sema::CodeCompleteCall,
LHS.get())) {
- SkipUntil(tok::r_paren);
LHS = ExprError();
}
}
@@ -1424,7 +1425,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
EnterExpressionEvaluationContext Unevaluated(Actions,
Sema::Unevaluated);
Operand = ParseParenExpression(ExprType, true/*stopIfCastExpr*/,
- ParsedType(), CastTy, RParenLoc);
+ false, CastTy, RParenLoc);
CastRange = SourceRange(LParenLoc, RParenLoc);
// If ParseParenExpression parsed a '(typename)' sequence only, then this is
@@ -1717,7 +1718,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
ConsumeParen());
break;
}
-}
+ }
if (Res.isInvalid())
return ExprError();
@@ -1740,10 +1741,15 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
/// '(' type-name ')' '{' initializer-list ',' '}'
/// cast-expression: [C99 6.5.4]
/// '(' type-name ')' cast-expression
-///
+/// [ARC] bridged-cast-expression
+///
+/// [ARC] bridged-cast-expression:
+/// (__bridge type-name) cast-expression
+/// (__bridge_transfer type-name) cast-expression
+/// (__bridge_retained type-name) cast-expression
ExprResult
Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
- ParsedType TypeOfCast, ParsedType &CastTy,
+ bool isTypeCast, ParsedType &CastTy,
SourceLocation &RParenLoc) {
assert(Tok.is(tok::l_paren) && "Not a paren expr!");
GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
@@ -1772,7 +1778,43 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// If the substmt parsed correctly, build the AST node.
if (!Stmt.isInvalid())
Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.take(), Tok.getLocation());
-
+ } else if (ExprType >= CompoundLiteral &&
+ (Tok.is(tok::kw___bridge) ||
+ Tok.is(tok::kw___bridge_transfer) ||
+ Tok.is(tok::kw___bridge_retained) ||
+ Tok.is(tok::kw___bridge_retain))) {
+ tok::TokenKind tokenKind = Tok.getKind();
+ SourceLocation BridgeKeywordLoc = ConsumeToken();
+
+ // Parse an Objective-C ARC ownership cast expression.
+ ObjCBridgeCastKind Kind;
+ if (tokenKind == tok::kw___bridge)
+ Kind = OBC_Bridge;
+ else if (tokenKind == tok::kw___bridge_transfer)
+ Kind = OBC_BridgeTransfer;
+ else if (tokenKind == tok::kw___bridge_retained)
+ Kind = OBC_BridgeRetained;
+ else {
+ // As a hopefully temporary workaround, allow __bridge_retain as
+ // a synonym for __bridge_retained, but only in system headers.
+ assert(tokenKind == tok::kw___bridge_retain);
+ Kind = OBC_BridgeRetained;
+ if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_retain)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ "__bridge_retained");
+ }
+
+ TypeResult Ty = ParseTypeName();
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, OpenLoc);
+ ExprResult SubExpr = ParseCastExpression(/*isUnaryExpression=*/false);
+
+ if (Ty.isInvalid() || SubExpr.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnObjCBridgedCast(getCurScope(), OpenLoc, Kind,
+ BridgeKeywordLoc, Ty.get(),
+ RParenLoc, SubExpr.get());
} else if (ExprType >= CompoundLiteral &&
isTypeIdInParens(isAmbiguousTypeId)) {
@@ -1787,20 +1829,23 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
return ParseCXXAmbiguousParenExpression(ExprType, CastTy,
OpenLoc, RParenLoc);
- TypeResult Ty;
-
- {
- InMessageExpressionRAIIObject InMessage(*this, false);
- Ty = ParseTypeName();
- }
+ // Parse the type declarator.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
// If our type is followed by an identifier and either ':' or ']', then
// this is probably an Objective-C message send where the leading '[' is
// missing. Recover as if that were the case.
- if (!Ty.isInvalid() && Tok.is(tok::identifier) && !InMessageExpression &&
- getLang().ObjC1 && !Ty.get().get().isNull() &&
- (NextToken().is(tok::colon) || NextToken().is(tok::r_square)) &&
- Ty.get().get()->isObjCObjectOrInterfaceType()) {
+ if (!DeclaratorInfo.isInvalidType() && Tok.is(tok::identifier) &&
+ !InMessageExpression && getLang().ObjC1 &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
Result = ParseObjCMessageExpressionBody(SourceLocation(),
SourceLocation(),
Ty.get(), 0);
@@ -1813,21 +1858,31 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (Tok.is(tok::l_brace)) {
ExprType = CompoundLiteral;
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
}
if (ExprType == CastExpr) {
// We parsed '(' type-name ')' and the thing after it wasn't a '{'.
- if (Ty.isInvalid())
+ if (DeclaratorInfo.isInvalidType())
return ExprError();
- CastTy = Ty.get();
-
// Note that this doesn't parse the subsequent cast-expression, it just
// returns the parsed type to the callee.
- if (stopIfCastExpr)
+ if (stopIfCastExpr) {
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ CastTy = Ty.get();
return ExprResult();
+ }
// Reject the cast of super idiom in ObjC.
if (Tok.is(tok::identifier) && getLang().ObjC1 &&
@@ -1841,17 +1896,21 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Parse the cast-expression that follows it next.
// TODO: For cast expression with CastTy.
- Result = ParseCastExpression(false, false, CastTy);
- if (!Result.isInvalid())
- Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc, CastTy,
+ Result = ParseCastExpression(/*isUnaryExpression=*/false,
+ /*isAddressOfOperand=*/false,
+ /*isTypeCast=*/true);
+ if (!Result.isInvalid()) {
+ Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc,
+ DeclaratorInfo, CastTy,
RParenLoc, Result.take());
+ }
return move(Result);
}
Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
return ExprError();
}
- } else if (TypeOfCast) {
+ } else if (isTypeCast) {
// Parse the expression-list.
InMessageExpressionRAIIObject InMessage(*this, false);
@@ -1861,7 +1920,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
if (!ParseExpressionList(ArgExprs, CommaLocs)) {
ExprType = SimpleExpr;
Result = Actions.ActOnParenOrParenListExpr(OpenLoc, Tok.getLocation(),
- move_arg(ArgExprs), TypeOfCast);
+ move_arg(ArgExprs));
}
} else {
InMessageExpressionRAIIObject InMessage(*this, false);
@@ -2174,6 +2233,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
SourceLocation(),
0, 0, 0,
true, SourceLocation(),
+ SourceLocation(),
EST_None,
SourceLocation(),
0, 0, 0, 0,
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index eab7114284de..b32eeda24279 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -245,8 +245,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// So we need to check whether the simple-template-id is of the
// right kind (it should name a type or be dependent), and then
// convert it into a type within the nested-name-specifier.
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
*MayBePseudoDestructor = true;
return false;
@@ -281,10 +280,6 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
SS.SetInvalid(SourceRange(StartLoc, CCLoc));
}
- // If we are caching tokens we will process the TemplateId again,
- // otherwise destroy it.
- if (!PP.isBacktrackEnabled())
- TemplateId->Destroy();
continue;
}
@@ -543,7 +538,14 @@ ExprResult Parser::ParseCXXCasts() {
if (ExpectAndConsume(tok::less, diag::err_expected_less_after, CastName))
return ExprError();
- TypeResult CastTy = ParseTypeName();
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
SourceLocation RAngleBracketLoc = Tok.getLocation();
if (ExpectAndConsume(tok::greater, diag::err_expected_greater))
@@ -559,9 +561,9 @@ ExprResult Parser::ParseCXXCasts() {
// Match the ')'.
RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
- if (!Result.isInvalid() && !CastTy.isInvalid())
+ if (!Result.isInvalid() && !DeclaratorInfo.isInvalidType())
Result = Actions.ActOnCXXNamedCast(OpLoc, Kind,
- LAngleBracketLoc, CastTy.get(),
+ LAngleBracketLoc, DeclaratorInfo,
RAngleBracketLoc,
LParenLoc, Result.take(), RParenLoc);
@@ -785,12 +787,12 @@ ExprResult Parser::ParseThrowExpression() {
case tok::r_brace:
case tok::colon:
case tok::comma:
- return Actions.ActOnCXXThrow(ThrowLoc, 0);
+ return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, 0);
default:
ExprResult Expr(ParseAssignmentExpression());
if (Expr.isInvalid()) return move(Expr);
- return Actions.ActOnCXXThrow(ThrowLoc, Expr.take());
+ return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, Expr.take());
}
}
@@ -1606,8 +1608,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// unqualified-id:
// template-id (already parsed and annotated)
if (Tok.is(tok::annot_template_id)) {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation*>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
// If the template-name names the current class, then this is a constructor
if (AllowConstructorName && TemplateId->Name &&
@@ -1630,7 +1631,6 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
/*NontrivialTypeSourceInfo=*/true),
TemplateId->TemplateNameLoc,
TemplateId->RAngleLoc);
- TemplateId->Destroy();
ConsumeToken();
return false;
}
@@ -1755,7 +1755,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
SourceRange TypeIdParens;
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ Declarator DeclaratorInfo(DS, Declarator::CXXNewContext);
if (Tok.is(tok::l_paren)) {
// If it turns out to be a placement, we change the type location.
PlacementLParen = ConsumeParen();
@@ -2200,7 +2200,8 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
Result = ParseCastExpression(false/*isUnaryExpression*/,
false/*isAddressofOperand*/,
NotCastExpr,
- ParsedType()/*TypeOfCast*/);
+ // type-id has priority.
+ true/*isTypeCast*/);
}
// If we parsed a cast-expression, it's really a type-id, otherwise it's
@@ -2219,7 +2220,11 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
ConsumeAnyToken();
if (ParseAs >= CompoundLiteral) {
- TypeResult Ty = ParseTypeName();
+ // Parse the type declarator.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
// Match the ')'.
if (Tok.is(tok::r_paren))
@@ -2229,21 +2234,21 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
if (ParseAs == CompoundLiteral) {
ExprType = CompoundLiteral;
+ TypeResult Ty = ParseTypeName();
return ParseCompoundLiteralExpression(Ty.get(), LParenLoc, RParenLoc);
}
// We parsed '(' type-id ')' and the thing after it wasn't a '{'.
assert(ParseAs == CastExpr);
- if (Ty.isInvalid())
+ if (DeclaratorInfo.isInvalidType())
return ExprError();
- CastTy = Ty.get();
-
// Result is what ParseCastExpression returned earlier.
if (!Result.isInvalid())
- Result = Actions.ActOnCastExpr(getCurScope(), LParenLoc, CastTy, RParenLoc,
- Result.take());
+ Result = Actions.ActOnCastExpr(getCurScope(), LParenLoc,
+ DeclaratorInfo, CastTy,
+ RParenLoc, Result.take());
return move(Result);
}
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index a8c18c01a291..7641565d1cc3 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -480,6 +480,10 @@ void Parser::ParseObjCInterfaceDeclList(Decl *interfaceDecl,
/// retain
/// copy
/// nonatomic
+/// atomic
+/// strong
+/// weak
+/// unsafe_unretained
///
void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl) {
assert(Tok.getKind() == tok::l_paren);
@@ -504,16 +508,22 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, Decl *ClassDecl) {
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readonly);
else if (II->isStr("assign"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_assign);
+ else if (II->isStr("unsafe_unretained"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_unsafe_unretained);
else if (II->isStr("readwrite"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readwrite);
else if (II->isStr("retain"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_retain);
+ else if (II->isStr("strong"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_strong);
else if (II->isStr("copy"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
else if (II->isStr("nonatomic"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
else if (II->isStr("atomic"))
DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_atomic);
+ else if (II->isStr("weak"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_weak);
else if (II->isStr("getter") || II->isStr("setter")) {
bool IsSetter = II->getNameStart()[0] == 's';
@@ -775,11 +785,12 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
ParsedType Ty;
if (isTypeSpecifierQualifier()) {
- TypeResult TypeSpec = ParseTypeName(0, Declarator::ObjCPrototypeContext);
+ TypeResult TypeSpec =
+ ParseTypeName(0, Declarator::ObjCPrototypeContext, &DS);
if (!TypeSpec.isInvalid())
Ty = TypeSpec.get();
}
-
+
if (Tok.is(tok::r_paren))
ConsumeParen();
else if (Tok.getLocation() == TypeStartLoc) {
@@ -1622,10 +1633,7 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
if (Tok.isNot(tok::ellipsis)) {
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
- // For some odd reason, the name of the exception variable is
- // optional. As a result, we need to use "PrototypeContext", because
- // we must accept either 'declarator' or 'abstract-declarator' here.
- Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ Declarator ParmDecl(DS, Declarator::ObjCCatchContext);
ParseDeclarator(ParmDecl);
// Inform the actions module about the declarator, so it
@@ -1690,6 +1698,29 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
FinallyStmt.take());
}
+/// objc-autoreleasepool-statement:
+/// @autoreleasepool compound-statement
+///
+StmtResult
+Parser::ParseObjCAutoreleasePoolStmt(SourceLocation atLoc) {
+ ConsumeToken(); // consume autoreleasepool
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope BodyScope(this, Scope::DeclScope);
+
+ StmtResult AutoreleasePoolBody(ParseCompoundStatementBody());
+
+ BodyScope.Exit();
+ if (AutoreleasePoolBody.isInvalid())
+ AutoreleasePoolBody = Actions.ActOnNullStmt(Tok.getLocation());
+ return Actions.ActOnObjCAutoreleasePoolStmt(atLoc,
+ AutoreleasePoolBody.take());
+}
+
/// objc-method-def: objc-method-proto ';'[opt] '{' body '}'
///
Decl *Parser::ParseObjCMethodDefinition() {
@@ -1765,6 +1796,9 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
if (Tok.isObjCAtKeyword(tok::objc_synchronized))
return ParseObjCSynchronizedStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_autoreleasepool))
+ return ParseObjCAutoreleasePoolStmt(AtLoc);
ExprResult Res(ParseExpressionWithLeadingAt(AtLoc));
if (Res.isInvalid()) {
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index 6cc8b57b61fe..b91bca55a9dc 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -226,7 +226,7 @@ Retry:
case tok::semi: { // C99 6.8.3p3: expression[opt] ';'
SourceLocation LeadingEmptyMacroLoc;
if (Tok.hasLeadingEmptyMacro())
- LeadingEmptyMacroLoc = PP.getLastEmptyMacroInstantiationLoc();
+ LeadingEmptyMacroLoc = PP.getLastEmptyMacroExpansionLoc();
return Actions.ActOnNullStmt(ConsumeToken(), LeadingEmptyMacroLoc);
}
@@ -502,6 +502,7 @@ StmtResult Parser::ParseCaseStatement(ParsedAttributes &attrs, bool MissingCase,
StmtTy *DeepestParsedCaseStmt = 0;
// While we have case statements, eat and stack them.
+ SourceLocation ColonLoc;
do {
SourceLocation CaseLoc = MissingCase ? Expr.get()->getExprLoc() :
ConsumeToken(); // eat the 'case'.
@@ -539,7 +540,6 @@ StmtResult Parser::ParseCaseStatement(ParsedAttributes &attrs, bool MissingCase,
ColonProtection.restore();
- SourceLocation ColonLoc;
if (Tok.is(tok::colon)) {
ColonLoc = ConsumeToken();
@@ -589,8 +589,8 @@ StmtResult Parser::ParseCaseStatement(ParsedAttributes &attrs, bool MissingCase,
} else {
// Nicely diagnose the common error "switch (X) { case 4: }", which is
// not valid.
- // FIXME: add insertion hint.
- Diag(Tok, diag::err_label_end_of_compound_statement);
+ SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement);
SubStmt = true;
}
@@ -634,7 +634,8 @@ StmtResult Parser::ParseDefaultStatement(ParsedAttributes &attrs) {
// Diagnose the common error "switch (X) {... default: }", which is not valid.
if (Tok.is(tok::r_brace)) {
- Diag(Tok, diag::err_label_end_of_compound_statement);
+ SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement);
return StmtError();
}
@@ -646,6 +647,10 @@ StmtResult Parser::ParseDefaultStatement(ParsedAttributes &attrs) {
SubStmt.get(), getCurScope());
}
+StmtResult Parser::ParseCompoundStatement(ParsedAttributes &Attr,
+ bool isStmtExpr) {
+ return ParseCompoundStatement(Attr, isStmtExpr, Scope::DeclScope);
+}
/// ParseCompoundStatement - Parse a "{}" block.
///
@@ -675,14 +680,15 @@ StmtResult Parser::ParseDefaultStatement(ParsedAttributes &attrs) {
/// [OMP] flush-directive
///
StmtResult Parser::ParseCompoundStatement(ParsedAttributes &attrs,
- bool isStmtExpr) {
+ bool isStmtExpr,
+ unsigned ScopeFlags) {
//FIXME: Use attributes?
assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
// Enter a scope to hold everything within the compound stmt. Compound
// statements can always hold declarations.
- ParseScope CompoundScope(this, Scope::DeclScope);
+ ParseScope CompoundScope(this, ScopeFlags);
// Parse the statements in the body.
return ParseCompoundStatementBody(isStmtExpr);
@@ -1909,7 +1915,8 @@ StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
return StmtError(Diag(Tok, diag::err_expected_lbrace));
// FIXME: Possible draft standard bug: attribute-specifier should be allowed?
ParsedAttributesWithRange attrs(AttrFactory);
- StmtResult TryBlock(ParseCompoundStatement(attrs));
+ StmtResult TryBlock(ParseCompoundStatement(attrs, /*isStmtExpr=*/false,
+ Scope::DeclScope|Scope::TryScope));
if (TryBlock.isInvalid())
return move(TryBlock);
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index aa89d75c6d1b..9eab40a3ecdf 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -861,8 +861,7 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
void Parser::AnnotateTemplateIdTokenAsType() {
assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
assert((TemplateId->Kind == TNK_Type_template ||
TemplateId->Kind == TNK_Dependent_template_name) &&
"Only works for type and dependent templates");
@@ -888,7 +887,6 @@ void Parser::AnnotateTemplateIdTokenAsType() {
// Replace the template-id annotation token, and possible the scope-specifier
// that precedes it, with the typename annotation token.
PP.AnnotateCachedTokens(Tok);
- TemplateId->Destroy();
}
/// \brief Determine whether the given token can end a template argument.
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 78d2c9091b1b..2ba0fc673f67 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -908,8 +908,7 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
return TPResult::True();
case tok::annot_template_id: {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind != TNK_Type_template)
return TPResult::False();
CXXScopeSpec SS;
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index f19472ccdc04..5c502907bc15 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -124,9 +124,8 @@ void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
/// MatchRHSPunctuation - For punctuation with a LHS and RHS (e.g. '['/']'),
/// this helper function matches and consumes the specified RHS token if
-/// present. If not present, it emits the specified diagnostic indicating
-/// that the parser failed to match the RHS of the token at LHSLoc. LHSName
-/// should be the name of the unmatched LHS token.
+/// present. If not present, it emits a corresponding diagnostic indicating
+/// that the parser failed to match the RHS of the token at LHSLoc.
SourceLocation Parser::MatchRHSPunctuation(tok::TokenKind RHSTok,
SourceLocation LHSLoc) {
@@ -486,6 +485,7 @@ void Parser::Initialize() {
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
/// action tells us to. This returns true if the EOF was encountered.
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
+ DelayedCleanupPoint CleanupRAII(TopLevelDeclCleanupPool);
while (Tok.is(tok::annot_pragma_unused))
HandlePragmaUnused();
@@ -548,6 +548,7 @@ void Parser::ParseTranslationUnit() {
Parser::DeclGroupPtrTy
Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS) {
+ DelayedCleanupPoint CleanupRAII(TopLevelDeclCleanupPool);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
Decl *SingleDecl = 0;
@@ -1155,6 +1156,18 @@ Parser::ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
return move(Result);
}
+/// \brief Get the TemplateIdAnnotation from the token and put it in the
+/// cleanup pool so that it gets destroyed when parsing the current top level
+/// declaration is finished.
+TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
+ assert(tok.is(tok::annot_template_id) && "Expected template-id token");
+ TemplateIdAnnotation *
+ Id = static_cast<TemplateIdAnnotation *>(tok.getAnnotationValue());
+ TopLevelDeclCleanupPool.delayMemberFunc< TemplateIdAnnotation,
+ &TemplateIdAnnotation::Destroy>(Id);
+ return Id;
+}
+
/// TryAnnotateTypeOrScopeToken - If the current token position is on a
/// typename (possibly qualified in C++) or a C++ scope specifier not followed
/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
@@ -1209,8 +1222,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
*Tok.getIdentifierInfo(),
Tok.getLocation());
} else if (Tok.is(tok::annot_template_id)) {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind == TNK_Function_template) {
Diag(Tok, diag::err_typename_refers_to_non_type_template)
<< Tok.getAnnotationRange();
@@ -1228,7 +1240,6 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
TemplateId->LAngleLoc,
TemplateArgsPtr,
TemplateId->RAngleLoc);
- TemplateId->Destroy();
} else {
Diag(Tok, diag::err_expected_type_name_after_typename)
<< SS.getRange();
@@ -1311,8 +1322,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
}
if (Tok.is(tok::annot_template_id)) {
- TemplateIdAnnotation *TemplateId
- = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->Kind == TNK_Type_template) {
// A template-id that refers to a type was parsed into a
// template-id annotation in a context where we weren't allowed
diff --git a/lib/Rewrite/FixItRewriter.cpp b/lib/Rewrite/FixItRewriter.cpp
index 8dcc5dcba789..e50793e2775f 100644
--- a/lib/Rewrite/FixItRewriter.cpp
+++ b/lib/Rewrite/FixItRewriter.cpp
@@ -121,8 +121,7 @@ void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
if (Hint.CodeToInsert.empty()) {
// We're removing code.
- if (Rewrite.RemoveText(Hint.RemoveRange.getBegin(),
- Rewrite.getRangeSize(Hint.RemoveRange)))
+ if (Rewrite.RemoveText(Hint.RemoveRange))
Failed = true;
continue;
}
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index df08cd7cbf3d..0b54755dc58c 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -516,7 +516,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
}
// Okay, we have the first token of a macro expansion: highlight the
- // instantiation by inserting a start tag before the macro instantiation and
+ // expansion by inserting a start tag before the macro expansion and
// end tag after it.
std::pair<SourceLocation, SourceLocation> LLoc =
SM.getInstantiationRange(Tok.getLocation());
diff --git a/lib/Rewrite/RewriteObjC.cpp b/lib/Rewrite/RewriteObjC.cpp
index 8cdb55a0c1c6..8202164ebce5 100644
--- a/lib/Rewrite/RewriteObjC.cpp
+++ b/lib/Rewrite/RewriteObjC.cpp
@@ -2110,8 +2110,7 @@ Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
QualType StrType = Context->getPointerType(Context->CharTy);
std::string StrEncoding;
Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
- Expr *Replacement = StringLiteral::Create(*Context,StrEncoding.c_str(),
- StrEncoding.length(),
+ Expr *Replacement = StringLiteral::Create(*Context, StrEncoding,
false, false, StrType,
SourceLocation());
ReplaceStmt(Exp, Replacement);
@@ -2129,9 +2128,8 @@ Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
llvm::SmallVector<Expr*, 8> SelExprs;
QualType argType = Context->getPointerType(Context->CharTy);
SelExprs.push_back(StringLiteral::Create(*Context,
- Exp->getSelector().getAsString().c_str(),
- Exp->getSelector().getAsString().size(),
- false, false, argType,
+ Exp->getSelector().getAsString(),
+ false, false, argType,
SourceLocation()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size());
@@ -2798,8 +2796,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
llvm::SmallVector<Expr*, 8> ClsExprs;
QualType argType = Context->getPointerType(Context->CharTy);
ClsExprs.push_back(StringLiteral::Create(*Context,
- ClassDecl->getIdentifier()->getNameStart(),
- ClassDecl->getIdentifier()->getLength(),
+ ClassDecl->getIdentifier()->getName(),
false, false, argType, SourceLocation()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
&ClsExprs[0],
@@ -2877,8 +2874,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
= Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
IdentifierInfo *clsName = Class->getIdentifier();
ClsExprs.push_back(StringLiteral::Create(*Context,
- clsName->getNameStart(),
- clsName->getLength(),
+ clsName->getName(),
false, false,
argType, SourceLocation()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
@@ -2909,8 +2905,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
llvm::SmallVector<Expr*, 8> ClsExprs;
QualType argType = Context->getPointerType(Context->CharTy);
ClsExprs.push_back(StringLiteral::Create(*Context,
- ClassDecl->getIdentifier()->getNameStart(),
- ClassDecl->getIdentifier()->getLength(),
+ ClassDecl->getIdentifier()->getName(),
false, false, argType, SourceLocation()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
@@ -2991,8 +2986,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
llvm::SmallVector<Expr*, 8> SelExprs;
QualType argType = Context->getPointerType(Context->CharTy);
SelExprs.push_back(StringLiteral::Create(*Context,
- Exp->getSelector().getAsString().c_str(),
- Exp->getSelector().getAsString().size(),
+ Exp->getSelector().getAsString(),
false, false, argType, SourceLocation()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
&SelExprs[0], SelExprs.size(),
diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp
index 51fe379fee0d..92f516074ec9 100644
--- a/lib/Rewrite/Rewriter.cpp
+++ b/lib/Rewrite/Rewriter.cpp
@@ -231,10 +231,44 @@ RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
/// InsertText - Insert the specified string at the specified location in the
/// original buffer.
bool Rewriter::InsertText(SourceLocation Loc, llvm::StringRef Str,
- bool InsertAfter) {
+ bool InsertAfter, bool indentNewLines) {
+ using llvm::StringRef;
+
if (!isRewritable(Loc)) return true;
FileID FID;
unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
+
+ llvm::SmallString<128> indentedStr;
+ if (indentNewLines && Str.find('\n') != StringRef::npos) {
+ StringRef MB = SourceMgr->getBufferData(FID);
+
+ unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1;
+ const SrcMgr::ContentCache *
+ Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+ unsigned lineOffs = Content->SourceLineCache[lineNo];
+
+ // Find the whitespace at the start of the line.
+ StringRef indentSpace;
+ {
+ unsigned i = lineOffs;
+ while (isWhitespace(MB[i]))
+ ++i;
+ indentSpace = MB.substr(lineOffs, i-lineOffs);
+ }
+
+ llvm::SmallVector<StringRef, 4> lines;
+ Str.split(lines, "\n");
+
+ for (unsigned i = 0, e = lines.size(); i != e; ++i) {
+ indentedStr += lines[i];
+ if (i < e-1) {
+ indentedStr += '\n';
+ indentedStr += indentSpace;
+ }
+ }
+ Str = indentedStr.str();
+ }
+
getEditBuffer(FID).InsertText(StartOffs, Str, InsertAfter);
return false;
}
@@ -317,6 +351,7 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
SourceLocation parentIndent) {
using llvm::StringRef;
+ if (range.isInvalid()) return true;
if (!isRewritable(range.getBegin())) return true;
if (!isRewritable(range.getEnd())) return true;
if (!isRewritable(parentIndent)) return true;
@@ -330,7 +365,7 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
if (StartFileID != EndFileID || StartFileID != parentFileID)
return true;
- if (StartOff >= EndOff || parentOff >= StartOff)
+ if (StartOff > EndOff)
return true;
FileID FID = StartFileID;
@@ -343,16 +378,12 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
const SrcMgr::ContentCache *
Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
- // Find where the line starts for the three offsets.
+ // Find where the lines start.
unsigned parentLineOffs = Content->SourceLineCache[parentLineNo];
unsigned startLineOffs = Content->SourceLineCache[startLineNo];
- unsigned endLineOffs = Content->SourceLineCache[endLineNo];
-
- if (startLineOffs == endLineOffs || startLineOffs == parentLineOffs)
- return true;
// Find the whitespace at the start of each line.
- StringRef parentSpace, startSpace, endSpace;
+ StringRef parentSpace, startSpace;
{
unsigned i = parentLineOffs;
while (isWhitespace(MB[i]))
@@ -363,11 +394,6 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
while (isWhitespace(MB[i]))
++i;
startSpace = MB.substr(startLineOffs, i-startLineOffs);
-
- i = endLineOffs;
- while (isWhitespace(MB[i]))
- ++i;
- endSpace = MB.substr(endLineOffs, i-endLineOffs);
}
if (parentSpace.size() >= startSpace.size())
return true;
@@ -378,19 +404,14 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
// Indent the lines between start/end offsets.
RewriteBuffer &RB = getEditBuffer(FID);
- for (unsigned i = startLineOffs; i != endLineOffs; ++i) {
- if (MB[i] == '\n') {
- unsigned startOfLine = i+1;
- if (startOfLine == endLineOffs)
- break;
- StringRef origIndent;
- unsigned ws = startOfLine;
- while (isWhitespace(MB[ws]))
- ++ws;
- origIndent = MB.substr(startOfLine, ws-startOfLine);
- if (origIndent.startswith(startSpace))
- RB.InsertText(startOfLine, indent, /*InsertAfter=*/false);
- }
+ for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) {
+ unsigned offs = Content->SourceLineCache[lineNo];
+ unsigned i = offs;
+ while (isWhitespace(MB[i]))
+ ++i;
+ StringRef origIndent = MB.substr(offs, i-offs);
+ if (origIndent.startswith(startSpace))
+ RB.InsertText(offs, indent, /*InsertAfter=*/false);
}
return false;
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index 9efae6103a4a..7a14855e69cd 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -486,7 +486,8 @@ static void SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
const char *initialization = 0;
QualType VariableTy = VD->getType().getCanonicalType();
- if (VariableTy->getAs<ObjCObjectPointerType>()) {
+ if (VariableTy->isObjCObjectPointerType() ||
+ VariableTy->isBlockPointerType()) {
// Check if 'nil' is defined.
if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("nil")))
initialization = " = nil";
@@ -499,6 +500,13 @@ static void SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
initialization = " = false";
else if (VariableTy->isEnumeralType())
return;
+ else if (VariableTy->isPointerType() || VariableTy->isMemberPointerType()) {
+ // Check if 'NULL' is defined.
+ if (S.PP.getMacroInfo(&S.getASTContext().Idents.get("NULL")))
+ initialization = " = NULL";
+ else
+ initialization = " = 0";
+ }
else if (VariableTy->isScalarType())
initialization = " = 0";
@@ -589,7 +597,17 @@ clang::sema::AnalysisBasedWarnings::Policy::Policy() {
enableCheckUnreachable = 0;
}
-clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) : S(s) {
+clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
+ : S(s),
+ NumFunctionsAnalyzed(0),
+ NumFunctionsWithBadCFGs(0),
+ NumCFGBlocks(0),
+ MaxCFGBlocksPerFunction(0),
+ NumUninitAnalysisFunctions(0),
+ NumUninitAnalysisVariables(0),
+ MaxUninitAnalysisVariablesPerFunction(0),
+ NumUninitAnalysisBlockVisits(0),
+ MaxUninitAnalysisBlockVisitsPerFunction(0) {
Diagnostic &D = S.getDiagnostics();
DefaultPolicy.enableCheckUnreachable = (unsigned)
(D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
@@ -705,8 +723,68 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
!= Diagnostic::Ignored) {
if (CFG *cfg = AC.getCFG()) {
UninitValsDiagReporter reporter(S);
+ UninitVariablesAnalysisStats stats;
+ std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
- reporter);
+ reporter, stats);
+
+ if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
+ ++NumUninitAnalysisFunctions;
+ NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
+ NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
+ MaxUninitAnalysisVariablesPerFunction =
+ std::max(MaxUninitAnalysisVariablesPerFunction,
+ stats.NumVariablesAnalyzed);
+ MaxUninitAnalysisBlockVisitsPerFunction =
+ std::max(MaxUninitAnalysisBlockVisitsPerFunction,
+ stats.NumBlockVisits);
+ }
}
}
+
+ // Collect statistics about the CFG if it was built.
+ if (S.CollectStats && AC.isCFGBuilt()) {
+ ++NumFunctionsAnalyzed;
+ if (CFG *cfg = AC.getCFG()) {
+ // If we successfully built a CFG for this context, record some more
+ // detail information about it.
+ NumCFGBlocks += cfg->getNumBlockIDs();
+ MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
+ cfg->getNumBlockIDs());
+ } else {
+ ++NumFunctionsWithBadCFGs;
+ }
+ }
+}
+
+void clang::sema::AnalysisBasedWarnings::PrintStats() const {
+ llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
+
+ unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
+ unsigned AvgCFGBlocksPerFunction =
+ !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
+ llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
+ << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
+ << " " << NumCFGBlocks << " CFG blocks built.\n"
+ << " " << AvgCFGBlocksPerFunction
+ << " average CFG blocks per function.\n"
+ << " " << MaxCFGBlocksPerFunction
+ << " max CFG blocks per function.\n";
+
+ unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
+ : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
+ unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
+ : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
+ llvm::errs() << NumUninitAnalysisFunctions
+ << " functions analyzed for uninitialiazed variables\n"
+ << " " << NumUninitAnalysisVariables << " variables analyzed.\n"
+ << " " << AvgUninitVariablesPerFunction
+ << " average variables per function.\n"
+ << " " << MaxUninitAnalysisVariablesPerFunction
+ << " max variables per function.\n"
+ << " " << NumUninitAnalysisBlockVisits << " block visits.\n"
+ << " " << AvgUninitBlockVisitsPerFunction
+ << " average block visits per function.\n"
+ << " " << MaxUninitAnalysisBlockVisitsPerFunction
+ << " max block visits per function.\n";
}
diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/AttributeList.cpp
index 619a5b961bfe..5a8330bbfd77 100644
--- a/lib/Sema/AttributeList.cpp
+++ b/lib/Sema/AttributeList.cpp
@@ -107,6 +107,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
return llvm::StringSwitch<AttributeList::Kind>(AttrName)
.Case("weak", AT_weak)
.Case("weakref", AT_weakref)
+ .Case("objc_arc_weak_reference_unavailable", AT_arc_weakref_unavailable)
.Case("pure", AT_pure)
.Case("mode", AT_mode)
.Case("used", AT_used)
@@ -177,6 +178,11 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("cf_consumed", AT_cf_consumed)
.Case("cf_returns_not_retained", AT_cf_returns_not_retained)
.Case("cf_returns_retained", AT_cf_returns_retained)
+ .Case("cf_returns_autoreleased", AT_cf_returns_autoreleased)
+ .Case("ns_consumes_self", AT_ns_consumes_self)
+ .Case("ns_consumed", AT_ns_consumed)
+ .Case("objc_ownership", AT_objc_ownership)
+ .Case("objc_precise_lifetime", AT_objc_precise_lifetime)
.Case("ownership_returns", AT_ownership_returns)
.Case("ownership_holds", AT_ownership_holds)
.Case("ownership_takes", AT_ownership_takes)
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
index 0a670197d7ef..652198189a4c 100644
--- a/lib/Sema/CMakeLists.txt
+++ b/lib/Sema/CMakeLists.txt
@@ -23,6 +23,7 @@ add_clang_library(clangSema
SemaExceptionSpec.cpp
SemaExpr.cpp
SemaExprCXX.cpp
+ SemaExprMember.cpp
SemaExprObjC.cpp
SemaInit.cpp
SemaLookup.cpp
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 2334ab5128a7..d7dc5b2538c6 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -46,7 +46,9 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCImplementation:
case CCC_ObjCIvarList:
case CCC_ClassStructUnion:
- case CCC_MemberAccess:
+ case CCC_DotMemberAccess:
+ case CCC_ArrowMemberAccess:
+ case CCC_ObjCPropertyAccess:
case CCC_EnumTag:
case CCC_UnionTag:
case CCC_ClassOrStructTag:
@@ -64,6 +66,10 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_TypeQualifiers:
case CCC_Other:
case CCC_OtherWithMacros:
+ case CCC_ObjCInstanceMessage:
+ case CCC_ObjCClassMessage:
+ case CCC_ObjCSuperclass:
+ case CCC_ObjCCategoryName:
return false;
}
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index 5be16e7431c1..c87f2cff5489 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -13,8 +13,10 @@
#include "clang/Parse/ParseDiagnostic.h" // FIXME: remove this back-dependency!
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Lex/Preprocessor.h"
@@ -124,6 +126,12 @@ void CXXScopeSpec::Adopt(NestedNameSpecifierLoc Other) {
Builder.Adopt(Other);
}
+SourceLocation CXXScopeSpec::getLastQualifierNameLoc() const {
+ if (!Builder.getRepresentation())
+ return SourceLocation();
+ return Builder.getTemporary().getLocalBeginLoc();
+}
+
NestedNameSpecifierLoc
CXXScopeSpec::getWithLocInContext(ASTContext &Context) const {
if (!Builder.getRepresentation())
@@ -141,6 +149,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
unsigned TypeQuals,
bool RefQualifierIsLvalueRef,
SourceLocation RefQualifierLoc,
+ SourceLocation MutableLoc,
ExceptionSpecificationType
ESpecType,
SourceLocation ESpecLoc,
@@ -166,6 +175,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
I.Fun.ArgInfo = 0;
I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
+ I.Fun.MutableLoc = MutableLoc.getRawEncoding();
I.Fun.ExceptionSpecType = ESpecType;
I.Fun.ExceptionSpecLoc = ESpecLoc.getRawEncoding();
I.Fun.NumExceptions = 0;
@@ -213,6 +223,73 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
return I;
}
+bool Declarator::isDeclarationOfFunction() const {
+ for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
+ switch (DeclTypeInfo[i].Kind) {
+ case DeclaratorChunk::Function:
+ return true;
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return false;
+ }
+ llvm_unreachable("Invalid type chunk");
+ return false;
+ }
+
+ switch (DS.getTypeSpecType()) {
+ case TST_auto:
+ case TST_bool:
+ case TST_char:
+ case TST_char16:
+ case TST_char32:
+ case TST_class:
+ case TST_decimal128:
+ case TST_decimal32:
+ case TST_decimal64:
+ case TST_double:
+ case TST_enum:
+ case TST_error:
+ case TST_float:
+ case TST_int:
+ case TST_struct:
+ case TST_union:
+ case TST_unknown_anytype:
+ case TST_unspecified:
+ case TST_void:
+ case TST_wchar:
+ return false;
+
+ case TST_decltype:
+ case TST_typeofExpr:
+ if (Expr *E = DS.getRepAsExpr())
+ return E->getType()->isFunctionType();
+ return false;
+
+ case TST_underlyingType:
+ case TST_typename:
+ case TST_typeofType: {
+ QualType QT = DS.getRepAsType().get();
+ if (QT.isNull())
+ return false;
+
+ if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT))
+ QT = LIT->getType();
+
+ if (QT.isNull())
+ return false;
+
+ return QT->isFunctionType();
+ }
+ }
+
+ return false;
+}
+
/// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
/// declaration specifier includes.
///
@@ -792,9 +869,6 @@ bool DeclSpec::isMissingDeclaratorOk() {
}
void UnqualifiedId::clear() {
- if (Kind == IK_TemplateId)
- TemplateId->Destroy();
-
Kind = IK_Identifier;
Identifier = 0;
StartLocation = SourceLocation();
diff --git a/lib/Sema/DelayedDiagnostic.cpp b/lib/Sema/DelayedDiagnostic.cpp
index af548fe13460..c6744ed80d4b 100644
--- a/lib/Sema/DelayedDiagnostic.cpp
+++ b/lib/Sema/DelayedDiagnostic.cpp
@@ -47,5 +47,8 @@ void DelayedDiagnostic::Destroy() {
case Deprecation:
delete [] DeprecationData.Message;
break;
+
+ case ForbiddenType:
+ break;
}
}
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index ae154aae2062..59bc2637c8f3 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -68,7 +68,10 @@ public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
void BuildScopeInformation(Decl *D, unsigned &ParentScope);
- void BuildScopeInformation(Stmt *S, unsigned ParentScope);
+ void BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl,
+ unsigned &ParentScope);
+ void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
+
void VerifyJumps();
void VerifyIndirectJumps();
void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
@@ -87,7 +90,8 @@ JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s) : S(s) {
// Build information for the top level compound statement, so that we have a
// defined scope record for every "goto" and label.
- BuildScopeInformation(Body, 0);
+ unsigned BodyParentScope = 0;
+ BuildScopeInformation(Body, BodyParentScope);
// Check that all jumps we saw are kosher.
VerifyJumps();
@@ -111,87 +115,110 @@ unsigned JumpScopeChecker::GetDeepestCommonScope(unsigned A, unsigned B) {
return A;
}
+typedef std::pair<unsigned,unsigned> ScopePair;
+
/// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
/// diagnostic that should be emitted if control goes over it. If not, return 0.
-static std::pair<unsigned,unsigned>
- GetDiagForGotoScopeDecl(const Decl *D, bool isCPlusPlus) {
+static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
unsigned InDiag = 0, OutDiag = 0;
if (VD->getType()->isVariablyModifiedType())
InDiag = diag::note_protected_by_vla;
- if (VD->hasAttr<BlocksAttr>()) {
- InDiag = diag::note_protected_by___block;
- OutDiag = diag::note_exits___block;
- } else if (VD->hasAttr<CleanupAttr>()) {
- InDiag = diag::note_protected_by_cleanup;
- OutDiag = diag::note_exits_cleanup;
- } else if (isCPlusPlus) {
- if (!VD->hasLocalStorage())
- return std::make_pair(InDiag, OutDiag);
-
- ASTContext &Context = D->getASTContext();
- QualType T = Context.getBaseElementType(VD->getType());
- if (!T->isDependentType()) {
- // C++0x [stmt.dcl]p3:
- // A program that jumps from a point where a variable with automatic
- // storage duration is not in scope to a point where it is in scope
- // is ill-formed unless the variable has scalar type, class type with
- // a trivial default constructor and a trivial destructor, a
- // cv-qualified version of one of these types, or an array of one of
- // the preceding types and is declared without an initializer (8.5).
- // Check whether this is a C++ class.
- CXXRecordDecl *Record = T->getAsCXXRecordDecl();
-
- if (const Expr *Init = VD->getInit()) {
- bool CallsTrivialConstructor = false;
- if (Record) {
- // FIXME: With generalized initializer lists, this may
- // classify "X x{};" as having no initializer.
- if (const CXXConstructExpr *Construct
- = dyn_cast<CXXConstructExpr>(Init))
- if (const CXXConstructorDecl *Constructor
- = Construct->getConstructor())
- if ((Context.getLangOptions().CPlusPlus0x
- ? Record->hasTrivialDefaultConstructor()
- : Record->isPOD()) &&
- Constructor->isDefaultConstructor())
- CallsTrivialConstructor = true;
+ if (VD->hasAttr<BlocksAttr>())
+ return ScopePair(diag::note_protected_by___block,
+ diag::note_exits___block);
+
+ if (VD->hasAttr<CleanupAttr>())
+ return ScopePair(diag::note_protected_by_cleanup,
+ diag::note_exits_cleanup);
+
+ if (Context.getLangOptions().ObjCAutoRefCount && VD->hasLocalStorage()) {
+ switch (VD->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return ScopePair(diag::note_protected_by_objc_ownership,
+ diag::note_exits_objc_ownership);
+ }
+ }
+
+ if (Context.getLangOptions().CPlusPlus && VD->hasLocalStorage()) {
+ // C++0x [stmt.dcl]p3:
+ // A program that jumps from a point where a variable with automatic
+ // storage duration is not in scope to a point where it is in scope
+ // is ill-formed unless the variable has scalar type, class type with
+ // a trivial default constructor and a trivial destructor, a
+ // cv-qualified version of one of these types, or an array of one of
+ // the preceding types and is declared without an initializer.
+
+ // C++03 [stmt.dcl.p3:
+ // A program that jumps from a point where a local variable
+ // with automatic storage duration is not in scope to a point
+ // where it is in scope is ill-formed unless the variable has
+ // POD type and is declared without an initializer.
+
+ if (const Expr *init = VD->getInit()) {
+ // We actually give variables of record type (or array thereof)
+ // an initializer even if that initializer only calls a trivial
+ // ctor. Detect that case.
+ // FIXME: With generalized initializer lists, this may
+ // classify "X x{};" as having no initializer.
+ unsigned inDiagToUse = diag::note_protected_by_variable_init;
+
+ const CXXRecordDecl *record = 0;
+
+ if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
+ const CXXConstructorDecl *ctor = cce->getConstructor();
+ record = ctor->getParent();
+
+ if (ctor->isTrivial() && ctor->isDefaultConstructor()) {
+ if (Context.getLangOptions().CPlusPlus0x) {
+ inDiagToUse = (record->hasTrivialDestructor() ? 0 :
+ diag::note_protected_by_variable_nontriv_destructor);
+ } else {
+ if (record->isPOD())
+ inDiagToUse = 0;
+ }
}
-
- if (!CallsTrivialConstructor)
- InDiag = diag::note_protected_by_variable_init;
+ } else if (VD->getType()->isArrayType()) {
+ record = VD->getType()->getBaseElementTypeUnsafe()
+ ->getAsCXXRecordDecl();
}
-
- // Note whether we have a class with a non-trivial destructor.
- if (Record && !Record->hasTrivialDestructor())
+
+ if (inDiagToUse)
+ InDiag = inDiagToUse;
+
+ // Also object to indirect jumps which leave scopes with dtors.
+ if (record && !record->hasTrivialDestructor())
OutDiag = diag::note_exits_dtor;
}
}
- return std::make_pair(InDiag, OutDiag);
+ return ScopePair(InDiag, OutDiag);
}
if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
if (TD->getUnderlyingType()->isVariablyModifiedType())
- return std::make_pair((unsigned) diag::note_protected_by_vla_typedef, 0);
+ return ScopePair(diag::note_protected_by_vla_typedef, 0);
}
if (const TypeAliasDecl *TD = dyn_cast<TypeAliasDecl>(D)) {
if (TD->getUnderlyingType()->isVariablyModifiedType())
- return std::make_pair((unsigned) diag::note_protected_by_vla_type_alias, 0);
+ return ScopePair(diag::note_protected_by_vla_type_alias, 0);
}
- return std::make_pair(0U, 0U);
+ return ScopePair(0U, 0U);
}
/// \brief Build scope information for a declaration that is part of a DeclStmt.
void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
- bool isCPlusPlus = this->S.getLangOptions().CPlusPlus;
-
// If this decl causes a new scope, push and switch to it.
- std::pair<unsigned,unsigned> Diags
- = GetDiagForGotoScopeDecl(D, isCPlusPlus);
+ std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S.Context, D);
if (Diags.first || Diags.second) {
Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
D->getLocation()));
@@ -205,11 +232,55 @@ void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
BuildScopeInformation(Init, ParentScope);
}
+/// \brief Build scope information for a captured block literal variables.
+void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
+ const BlockDecl *BDecl,
+ unsigned &ParentScope) {
+ // exclude captured __block variables; there's no destructor
+ // associated with the block literal for them.
+ if (D->hasAttr<BlocksAttr>())
+ return;
+ QualType T = D->getType();
+ QualType::DestructionKind destructKind = T.isDestructedType();
+ if (destructKind != QualType::DK_none) {
+ std::pair<unsigned,unsigned> Diags;
+ switch (destructKind) {
+ case QualType::DK_cxx_destructor:
+ Diags = ScopePair(diag::note_enters_block_captures_cxx_obj,
+ diag::note_exits_block_captures_cxx_obj);
+ break;
+ case QualType::DK_objc_strong_lifetime:
+ Diags = ScopePair(diag::note_enters_block_captures_strong,
+ diag::note_exits_block_captures_strong);
+ break;
+ case QualType::DK_objc_weak_lifetime:
+ Diags = ScopePair(diag::note_enters_block_captures_weak,
+ diag::note_exits_block_captures_weak);
+ break;
+ case QualType::DK_none:
+ llvm_unreachable("no-liftime captured variable");
+ }
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ Loc = BDecl->getLocation();
+ Scopes.push_back(GotoScope(ParentScope,
+ Diags.first, Diags.second, Loc));
+ ParentScope = Scopes.size()-1;
+ }
+}
+
/// BuildScopeInformation - The statements from CI to CE are known to form a
/// coherent VLA scope with a specified parent node. Walk through the
/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
/// walking the AST as needed.
-void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
+void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) {
+ // If this is a statement, rather than an expression, scopes within it don't
+ // propagate out into the enclosing scope. Otherwise we have to worry
+ // about block literals, which have the lifetime of their enclosing statement.
+ unsigned independentParentScope = origParentScope;
+ unsigned &ParentScope = ((isa<Expr>(S) && !isa<StmtExpr>(S))
+ ? origParentScope : independentParentScope);
+
bool SkipFirstSubStmt = false;
// If we found a label, remember that it is in ParentScope scope.
@@ -291,17 +362,17 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
BuildScopeInformation(*I, ParentScope);
continue;
}
-
// Disallow jumps into any part of an @try statement by pushing a scope and
// walking all sub-stmts in that scope.
if (ObjCAtTryStmt *AT = dyn_cast<ObjCAtTryStmt>(SubStmt)) {
+ unsigned newParentScope;
// Recursively walk the AST for the @try part.
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_try,
diag::note_exits_objc_try,
AT->getAtTryLoc()));
if (Stmt *TryPart = AT->getTryBody())
- BuildScopeInformation(TryPart, Scopes.size()-1);
+ BuildScopeInformation(TryPart, (newParentScope = Scopes.size()-1));
// Jump from the catch to the finally or try is not valid.
for (unsigned I = 0, N = AT->getNumCatchStmts(); I != N; ++I) {
@@ -311,7 +382,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
diag::note_exits_objc_catch,
AC->getAtCatchLoc()));
// @catches are nested and it isn't
- BuildScopeInformation(AC->getCatchBody(), Scopes.size()-1);
+ BuildScopeInformation(AC->getCatchBody(),
+ (newParentScope = Scopes.size()-1));
}
// Jump from the finally to the try or catch is not valid.
@@ -320,12 +392,13 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
diag::note_protected_by_objc_finally,
diag::note_exits_objc_finally,
AF->getAtFinallyLoc()));
- BuildScopeInformation(AF, Scopes.size()-1);
+ BuildScopeInformation(AF, (newParentScope = Scopes.size()-1));
}
continue;
}
-
+
+ unsigned newParentScope;
// Disallow jumps into the protected statement of an @synchronized, but
// allow jumps into the object expression it protects.
if (ObjCAtSynchronizedStmt *AS = dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)){
@@ -339,7 +412,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
diag::note_protected_by_objc_synchronized,
diag::note_exits_objc_synchronized,
AS->getAtSynchronizedLoc()));
- BuildScopeInformation(AS->getSynchBody(), Scopes.size()-1);
+ BuildScopeInformation(AS->getSynchBody(),
+ (newParentScope = Scopes.size()-1));
continue;
}
@@ -351,7 +425,7 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
diag::note_exits_cxx_try,
TS->getSourceRange().getBegin()));
if (Stmt *TryBlock = TS->getTryBlock())
- BuildScopeInformation(TryBlock, Scopes.size()-1);
+ BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
// Jump from the catch into the try is not allowed either.
for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
@@ -360,12 +434,34 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
diag::note_protected_by_cxx_catch,
diag::note_exits_cxx_catch,
CS->getSourceRange().getBegin()));
- BuildScopeInformation(CS->getHandlerBlock(), Scopes.size()-1);
+ BuildScopeInformation(CS->getHandlerBlock(),
+ (newParentScope = Scopes.size()-1));
}
continue;
}
+ // Disallow jumps into the protected statement of an @autoreleasepool.
+ if (ObjCAutoreleasePoolStmt *AS = dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)){
+ // Recursively walk the AST for the @autoreleasepool part, protected by a new
+ // scope.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_autoreleasepool,
+ diag::note_exits_objc_autoreleasepool,
+ AS->getAtLoc()));
+ BuildScopeInformation(AS->getSubStmt(), (newParentScope = Scopes.size()-1));
+ continue;
+ }
+
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(SubStmt)) {
+ const BlockDecl *BDecl = BE->getBlockDecl();
+ for (BlockDecl::capture_const_iterator ci = BDecl->capture_begin(),
+ ce = BDecl->capture_end(); ci != ce; ++ci) {
+ VarDecl *variable = ci->getVariable();
+ BuildScopeInformation(variable, BDecl, ParentScope);
+ }
+ }
+
// Recursively walk the AST.
BuildScopeInformation(SubStmt, ParentScope);
}
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 8297b3155a8c..fdf3bb3cb012 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -141,9 +141,9 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
: TheTargetAttributesSema(0), FPFeatures(pp.getLangOptions()),
LangOpts(pp.getLangOptions()), PP(pp), Context(ctxt), Consumer(consumer),
Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
- ExternalSource(0), CodeCompleter(CodeCompleter), CurContext(0),
- PackContext(0), MSStructPragmaOn(false), VisContext(0),
- LateTemplateParser(0), OpaqueParser(0),
+ CollectStats(false), ExternalSource(0), CodeCompleter(CodeCompleter),
+ CurContext(0), PackContext(0), MSStructPragmaOn(false), VisContext(0),
+ ExprNeedsCleanups(0), LateTemplateParser(0), OpaqueParser(0),
IdResolver(pp.getLangOptions()), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
GlobalNewDeleteDeclared(false),
CompleteTranslationUnit(CompleteTranslationUnit),
@@ -154,6 +154,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
AnalysisWarnings(*this)
{
TUScope = 0;
+ LoadedExternalKnownNamespaces = false;
+
if (getLangOptions().CPlusPlus)
FieldCollector.reset(new CXXFieldCollector());
@@ -162,7 +164,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
&Context);
ExprEvalContexts.push_back(
- ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0));
+ ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0, false));
FunctionScopes.push_back(new FunctionScopeInfo(Diags));
}
@@ -202,22 +204,61 @@ Sema::~Sema() {
ExternalSema->ForgetSema();
}
+
+/// makeUnavailableInSystemHeader - There is an error in the current
+/// context. If we're still in a system header, and we can plausibly
+/// make the relevant declaration unavailable instead of erroring, do
+/// so and return true.
+bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
+ llvm::StringRef msg) {
+ // If we're not in a function, it's an error.
+ FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
+ if (!fn) return false;
+
+ // If we're in template instantiation, it's an error.
+ if (!ActiveTemplateInstantiations.empty())
+ return false;
+
+ // If that function's not in a system header, it's an error.
+ if (!Context.getSourceManager().isInSystemHeader(loc))
+ return false;
+
+ // If the function is already unavailable, it's not an error.
+ if (fn->hasAttr<UnavailableAttr>()) return true;
+
+ fn->addAttr(new (Context) UnavailableAttr(loc, Context, msg));
+ return true;
+}
+
ASTMutationListener *Sema::getASTMutationListener() const {
return getASTConsumer().GetASTMutationListener();
}
+/// \brief Print out statistics about the semantic analysis.
+void Sema::PrintStats() const {
+ llvm::errs() << "\n*** Semantic Analysis Stats:\n";
+ llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
+
+ BumpAlloc.PrintStats();
+ AnalysisWarnings.PrintStats();
+}
+
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
/// If there is already an implicit cast, merge into the existing one.
/// The result is of the given category.
ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
CastKind Kind, ExprValueKind VK,
- const CXXCastPath *BasePath) {
+ const CXXCastPath *BasePath,
+ CheckedConversionKind CCK) {
QualType ExprTy = Context.getCanonicalType(E->getType());
QualType TypeTy = Context.getCanonicalType(Ty);
if (ExprTy == TypeTy)
return Owned(E);
+ if (getLangOptions().ObjCAutoRefCount)
+ CheckObjCARCConversion(SourceRange(), Ty, E, CCK);
+
// If this is a derived-to-base cast to a through a virtual base, we
// need a vtable.
if (Kind == CK_DerivedToBase &&
@@ -729,8 +770,8 @@ void Sema::PopFunctionOrBlockScope(const AnalysisBasedWarnings::Policy *WP,
/// \brief Determine whether any errors occurred within this function/method/
/// block.
-bool Sema::hasAnyErrorsInThisFunction() const {
- return getCurFunction()->ErrorTrap.hasErrorOccurred();
+bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
+ return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred();
}
BlockScopeInfo *Sema::getCurBlock() {
@@ -748,6 +789,10 @@ ExternalSemaSource::ReadMethodPool(Selector Sel) {
return std::pair<ObjCMethodList, ObjCMethodList>();
}
+void ExternalSemaSource::ReadKnownNamespaces(
+ llvm::SmallVectorImpl<NamespaceDecl *> &Namespaces) {
+}
+
void PrettyDeclStackTraceEntry::print(llvm::raw_ostream &OS) const {
SourceLocation Loc = this->Loc;
if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation();
diff --git a/lib/Sema/SemaCXXCast.cpp b/lib/Sema/SemaCXXCast.cpp
index e46ad5bcfeb5..d053d5a9e9fe 100644
--- a/lib/Sema/SemaCXXCast.cpp
+++ b/lib/Sema/SemaCXXCast.cpp
@@ -63,7 +63,8 @@ static void CheckDynamicCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
CastKind &Kind,
CXXCastPath &BasePath);
-static bool CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType);
+static bool CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
+ bool CheckCVR, bool CheckObjCLifetime);
// The Try functions attempt a specific way of casting. If they succeed, they
// return TC_Success. If their way of casting is not appropriate for the given
@@ -109,12 +110,14 @@ static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExp
CXXCastPath &BasePath);
static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
- QualType DestType, bool CStyle,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind);
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
- QualType DestType, bool CStyle,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind,
@@ -131,17 +134,23 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult
Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
- SourceLocation LAngleBracketLoc, ParsedType Ty,
+ SourceLocation LAngleBracketLoc, Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, Expr *E,
SourceLocation RParenLoc) {
-
- TypeSourceInfo *DestTInfo;
- QualType DestType = GetTypeFromParser(Ty, &DestTInfo);
- if (!DestTInfo)
- DestTInfo = Context.getTrivialTypeSourceInfo(DestType, SourceLocation());
- return BuildCXXNamedCast(OpLoc, Kind, DestTInfo, move(E),
+ assert(!D.isInvalidType());
+
+ TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, E->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ if (getLangOptions().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ return BuildCXXNamedCast(OpLoc, Kind, TInfo, move(E),
SourceRange(LAngleBracketLoc, RAngleBracketLoc),
SourceRange(LParenLoc, RParenLoc));
}
@@ -248,8 +257,10 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
InitializedEntity entity = InitializedEntity::InitializeTemporary(destType);
InitializationKind initKind
- = InitializationKind::CreateCast(/*type range?*/ range,
- (CT == CT_CStyle || CT == CT_Functional));
+ = (CT == CT_CStyle)? InitializationKind::CreateCStyleCast(range.getBegin(),
+ range)
+ : (CT == CT_Functional)? InitializationKind::CreateFunctionalCast(range)
+ : InitializationKind::CreateCast(/*type range?*/ range);
InitializationSequence sequence(S, entity, initKind, &src, 1);
assert(sequence.Failed() && "initialization succeeded on second try?");
@@ -373,8 +384,19 @@ static bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
/// DestType casts away constness as defined in C++ 5.2.11p8ff. This is used by
/// the cast checkers. Both arguments must denote pointer (possibly to member)
/// types.
+///
+/// \param CheckCVR Whether to check for const/volatile/restrict qualifiers.
+///
+/// \param CheckObjCLifetime Whether to check Objective-C lifetime qualifiers.
static bool
-CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
+CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
+ bool CheckCVR, bool CheckObjCLifetime) {
+ // If the only checking we care about is for Objective-C lifetime qualifiers,
+ // and we're not in ARC mode, there's nothing to check.
+ if (!CheckCVR && CheckObjCLifetime &&
+ !Self.Context.getLangOptions().ObjCAutoRefCount)
+ return false;
+
// Casting away constness is defined in C++ 5.2.11p8 with reference to
// C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
// the rules are non-trivial. So first we construct Tcv *...cv* as described
@@ -394,13 +416,23 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
// purpose of this check, because other qualifiers (address spaces,
// Objective-C GC, etc.) are part of the type's identity.
while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
- Qualifiers SrcQuals;
+ // Determine the relevant qualifiers at this level.
+ Qualifiers SrcQuals, DestQuals;
Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals);
- cv1.push_back(Qualifiers::fromCVRMask(SrcQuals.getCVRQualifiers()));
-
- Qualifiers DestQuals;
Self.Context.getUnqualifiedArrayType(UnwrappedDestType, DestQuals);
- cv2.push_back(Qualifiers::fromCVRMask(DestQuals.getCVRQualifiers()));
+
+ Qualifiers RetainedSrcQuals, RetainedDestQuals;
+ if (CheckCVR) {
+ RetainedSrcQuals.setCVRQualifiers(SrcQuals.getCVRQualifiers());
+ RetainedDestQuals.setCVRQualifiers(DestQuals.getCVRQualifiers());
+ }
+
+ if (CheckObjCLifetime &&
+ !DestQuals.compatiblyIncludesObjCLifetime(SrcQuals))
+ return true;
+
+ cv1.push_back(RetainedSrcQuals);
+ cv2.push_back(RetainedDestQuals);
}
if (cv1.empty())
return false;
@@ -420,8 +452,10 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
}
// Test if they're compatible.
+ bool ObjCLifetimeConversion;
return SrcConstruct != DestConstruct &&
- !Self.IsQualificationConversion(SrcConstruct, DestConstruct, false);
+ !Self.IsQualificationConversion(SrcConstruct, DestConstruct, false,
+ ObjCLifetimeConversion);
}
/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
@@ -595,9 +629,10 @@ CheckReinterpretCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
}
unsigned msg = diag::err_bad_cxx_cast_generic;
- if (TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/false, OpRange,
- msg, Kind)
- != TC_Success && msg != 0)
+ TryCastResult tcr =
+ TryReinterpretCast(Self, SrcExpr, DestType,
+ /*CStyle*/false, OpRange, msg, Kind);
+ if (tcr != TC_Success && msg != 0)
{
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
@@ -611,7 +646,12 @@ CheckReinterpretCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
} else {
diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(), DestType);
}
- }
+ } else if (tcr == TC_Success && Self.getLangOptions().ObjCAutoRefCount) {
+ Expr *Exp = SrcExpr.get();
+ // Note that Exp does not change with CCK_OtherCast cast type
+ Self.CheckObjCARCConversion(OpRange, DestType,
+ Exp, Sema::CCK_OtherCast);
+ }
}
@@ -654,8 +694,10 @@ CheckStaticCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
}
unsigned msg = diag::err_bad_cxx_cast_generic;
- if (TryStaticCast(Self, SrcExpr, DestType, /*CStyle*/false, OpRange, msg,
- Kind, BasePath) != TC_Success && msg != 0) {
+ TryCastResult tcr
+ = TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg,
+ Kind, BasePath);
+ if (tcr != TC_Success && msg != 0) {
if (SrcExpr.isInvalid())
return;
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
@@ -667,6 +709,15 @@ CheckStaticCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
} else {
diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType);
}
+ } else if (tcr == TC_Success) {
+ if (Kind == CK_BitCast)
+ Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
+ if (Self.getLangOptions().ObjCAutoRefCount) {
+ Expr *Exp = SrcExpr.get();
+ // Note that Exp does not change with CCK_OtherCast cast type
+ Self.CheckObjCARCConversion(OpRange, DestType,
+ Exp, Sema::CCK_OtherCast);
+ }
}
else if (Kind == CK_BitCast)
Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
@@ -676,10 +727,15 @@ CheckStaticCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
/// possible. If @p CStyle, ignore access restrictions on hierarchy casting
/// and casting away constness.
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
- QualType DestType, bool CStyle,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath) {
+ // Determine whether we have the semantics of a C-style cast.
+ bool CStyle
+ = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+
// The order the tests is not entirely arbitrary. There is one conversion
// that can be handled in two different ways. Given:
// struct A {};
@@ -715,7 +771,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// C++ 5.2.9p2: An expression e can be explicitly converted to a type T
// [...] if the declaration "T t(e);" is well-formed, [...].
- tcr = TryStaticImplicitCast(Self, SrcExpr, DestType, CStyle, OpRange, msg,
+ tcr = TryStaticImplicitCast(Self, SrcExpr, DestType, CCK, OpRange, msg,
Kind);
if (SrcExpr.isInvalid())
return TC_Failed;
@@ -792,10 +848,20 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestPointee = DestPointer->getPointeeType();
if (DestPointee->isIncompleteOrObjectType()) {
// This is definitely the intended conversion, but it might fail due
- // to a const violation.
- if (!CStyle && !DestPointee.isAtLeastAsQualifiedAs(SrcPointee)) {
- msg = diag::err_bad_cxx_cast_qualifiers_away;
- return TC_Failed;
+ // to a qualifier violation. Note that we permit Objective-C lifetime
+ // and GC qualifier mismatches here.
+ if (!CStyle) {
+ Qualifiers DestPointeeQuals = DestPointee.getQualifiers();
+ Qualifiers SrcPointeeQuals = SrcPointee.getQualifiers();
+ DestPointeeQuals.removeObjCGCAttr();
+ DestPointeeQuals.removeObjCLifetime();
+ SrcPointeeQuals.removeObjCGCAttr();
+ SrcPointeeQuals.removeObjCLifetime();
+ if (DestPointeeQuals != SrcPointeeQuals &&
+ !DestPointeeQuals.compatiblyIncludes(SrcPointeeQuals)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
}
Kind = CK_BitCast;
return TC_Success;
@@ -845,6 +911,7 @@ TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
// FIXME: Should allow casting away constness if CStyle.
bool DerivedToBase;
bool ObjCConversion;
+ bool ObjCLifetimeConversion;
QualType FromType = SrcExpr->getType();
QualType ToType = R->getPointeeType();
if (CStyle) {
@@ -854,8 +921,9 @@ TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
if (Self.CompareReferenceRelationship(SrcExpr->getLocStart(),
ToType, FromType,
- DerivedToBase, ObjCConversion) <
- Sema::Ref_Compatible_With_Added_Qualification) {
+ DerivedToBase, ObjCConversion,
+ ObjCLifetimeConversion)
+ < Sema::Ref_Compatible_With_Added_Qualification) {
msg = diag::err_bad_lvalue_to_rvalue_cast;
return TC_Failed;
}
@@ -1172,7 +1240,8 @@ TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
/// @c static_cast if the declaration "T t(e);" is well-formed [...].
TryCastResult
TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
- bool CStyle, const SourceRange &OpRange, unsigned &msg,
+ Sema::CheckedConversionKind CCK,
+ const SourceRange &OpRange, unsigned &msg,
CastKind &Kind) {
if (DestType->isRecordType()) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
@@ -1184,7 +1253,11 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
InitializationKind InitKind
- = InitializationKind::CreateCast(/*FIXME:*/OpRange, CStyle);
+ = (CCK == Sema::CCK_CStyleCast)
+ ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange)
+ : (CCK == Sema::CCK_FunctionalCast)
+ ? InitializationKind::CreateFunctionalCast(OpRange)
+ : InitializationKind::CreateCast(OpRange);
Expr *SrcExprRaw = SrcExpr.get();
InitializationSequence InitSeq(Self, Entity, InitKind, &SrcExprRaw, 1);
@@ -1193,7 +1266,8 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
// There is no other way that works.
// On the other hand, if we're checking a C-style cast, we've still got
// the reinterpret_cast way.
-
+ bool CStyle
+ = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType()))
return TC_NotApplicable;
@@ -1428,7 +1502,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// constness.
// A reinterpret_cast followed by a const_cast can, though, so in C-style,
// we accept it.
- if (!CStyle && CastsAwayConstness(Self, SrcType, DestType)) {
+ if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle)) {
msg = diag::err_bad_cxx_cast_qualifiers_away;
return TC_Failed;
}
@@ -1543,7 +1618,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
// C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
// The C-style cast operator can.
- if (!CStyle && CastsAwayConstness(Self, SrcType, DestType)) {
+ if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle)) {
msg = diag::err_bad_cxx_cast_qualifiers_away;
return TC_Failed;
}
@@ -1675,11 +1751,14 @@ Sema::CXXCheckCStyleCast(SourceRange R, QualType CastTy, ExprValueKind &VK,
if (tcr == TC_Success)
Kind = CK_NoOp;
+ Sema::CheckedConversionKind CCK
+ = FunctionalStyle? Sema::CCK_FunctionalCast
+ : Sema::CCK_CStyleCast;
if (tcr == TC_NotApplicable) {
// ... or if that is not possible, a static_cast, ignoring const, ...
ExprResult CastExprRes = Owned(CastExpr);
- tcr = TryStaticCast(*this, CastExprRes, CastTy, /*CStyle*/true, R, msg,
- Kind, BasePath);
+ tcr = TryStaticCast(*this, CastExprRes, CastTy, CCK, R, msg, Kind,
+ BasePath);
if (CastExprRes.isInvalid())
return ExprError();
CastExpr = CastExprRes.take();
@@ -1694,6 +1773,9 @@ Sema::CXXCheckCStyleCast(SourceRange R, QualType CastTy, ExprValueKind &VK,
}
}
+ if (getLangOptions().ObjCAutoRefCount && tcr == TC_Success)
+ CheckObjCARCConversion(R, CastTy, CastExpr, CCK);
+
if (tcr != TC_Success && msg != 0) {
if (CastExpr->getType() == Context.OverloadTy) {
DeclAccessPair Found;
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 61d9e93f2ff5..5f8c9c62a409 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -211,25 +211,40 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
DeclContext *DC) {
assert(DC != 0 && "given null context");
- if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ if (TagDecl *tag = dyn_cast<TagDecl>(DC)) {
// If this is a dependent type, then we consider it complete.
- if (Tag->isDependentContext())
+ if (tag->isDependentContext())
return false;
// If we're currently defining this type, then lookup into the
// type is okay: don't complain that it isn't complete yet.
- const TagType *TagT = Context.getTypeDeclType(Tag)->getAs<TagType>();
- if (TagT && TagT->isBeingDefined())
+ QualType type = Context.getTypeDeclType(tag);
+ const TagType *tagType = type->getAs<TagType>();
+ if (tagType && tagType->isBeingDefined())
return false;
+ SourceLocation loc = SS.getLastQualifierNameLoc();
+ if (loc.isInvalid()) loc = SS.getRange().getBegin();
+
// The type must be complete.
- if (RequireCompleteType(SS.getRange().getBegin(),
- Context.getTypeDeclType(Tag),
+ if (RequireCompleteType(loc, type,
PDiag(diag::err_incomplete_nested_name_spec)
<< SS.getRange())) {
SS.SetInvalid(SS.getRange());
return true;
}
+
+ // Fixed enum types are complete, but they aren't valid as scopes
+ // until we see a definition, so awkwardly pull out this special
+ // case.
+ if (const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType)) {
+ if (!enumType->getDecl()->isDefinition()) {
+ Diag(loc, diag::err_incomplete_nested_name_spec)
+ << type << SS.getRange();
+ SS.SetInvalid(SS.getRange());
+ return true;
+ }
+ }
}
return false;
@@ -464,26 +479,29 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// We haven't found anything, and we're not recovering from a
// different kind of error, so look for typos.
DeclarationName Name = Found.getLookupName();
- if (CorrectTypo(Found, S, &SS, LookupCtx, EnteringContext,
- CTC_NoKeywords) &&
- Found.isSingleResult() &&
- isAcceptableNestedNameSpecifier(Found.getAsSingle<NamedDecl>())) {
+ TypoCorrection Corrected;
+ Found.clear();
+ if ((Corrected = CorrectTypo(Found.getLookupNameInfo(),
+ Found.getLookupKind(), S, &SS, LookupCtx,
+ EnteringContext, CTC_NoKeywords)) &&
+ isAcceptableNestedNameSpecifier(Corrected.getCorrectionDecl())) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_suggest)
- << Name << LookupCtx << Found.getLookupName() << SS.getRange()
- << FixItHint::CreateReplacement(Found.getNameLoc(),
- Found.getLookupName().getAsString());
+ << Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
else
Diag(Found.getNameLoc(), diag::err_undeclared_var_use_suggest)
- << Name << Found.getLookupName()
- << FixItHint::CreateReplacement(Found.getNameLoc(),
- Found.getLookupName().getAsString());
+ << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
- if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
- Diag(ND->getLocation(), diag::note_previous_decl)
- << ND->getDeclName();
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
+ Diag(ND->getLocation(), diag::note_previous_decl) << CorrectedQuotedStr;
+ Found.addDecl(ND);
+ }
+ Found.setLookupName(Corrected.getCorrection());
} else {
- Found.clear();
Found.setLookupName(&Identifier);
}
}
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 05c257ab8332..690a29d281d2 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -319,12 +320,41 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
}
// Memset/memcpy/memmove handling
- if (FDecl->getLinkage() == ExternalLinkage &&
- (!getLangOptions().CPlusPlus || FDecl->isExternC())) {
- if (FnInfo->isStr("memset") || FnInfo->isStr("memcpy") ||
- FnInfo->isStr("memmove"))
- CheckMemsetcpymoveArguments(TheCall, FnInfo);
+ int CMF = -1;
+ switch (FDecl->getBuiltinID()) {
+ case Builtin::BI__builtin_memset:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BImemset:
+ CMF = CMF_Memset;
+ break;
+
+ case Builtin::BI__builtin_memcpy:
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BImemcpy:
+ CMF = CMF_Memcpy;
+ break;
+
+ case Builtin::BI__builtin_memmove:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BImemmove:
+ CMF = CMF_Memmove;
+ break;
+
+ default:
+ if (FDecl->getLinkage() == ExternalLinkage &&
+ (!getLangOptions().CPlusPlus || FDecl->isExternC())) {
+ if (FnInfo->isStr("memset"))
+ CMF = CMF_Memset;
+ else if (FnInfo->isStr("memcpy"))
+ CMF = CMF_Memcpy;
+ else if (FnInfo->isStr("memmove"))
+ CMF = CMF_Memmove;
+ }
+ break;
}
+
+ if (CMF != -1)
+ CheckMemsetcpymoveArguments(TheCall, CheckedMemoryFunction(CMF), FnInfo);
return false;
}
@@ -382,14 +412,14 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// casts here.
// FIXME: We don't allow floating point scalars as input.
Expr *FirstArg = TheCall->getArg(0);
- if (!FirstArg->getType()->isPointerType()) {
+ const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
<< FirstArg->getType() << FirstArg->getSourceRange();
return ExprError();
}
- QualType ValType =
- FirstArg->getType()->getAs<PointerType>()->getPointeeType();
+ QualType ValType = pointerType->getPointeeType();
if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
!ValType->isBlockPointerType()) {
Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
@@ -397,6 +427,20 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
return ExprError();
}
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+ << ValType << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
// The majority of builtins return a value, but a few have special return
// types, so allow them to override appropriately below.
QualType ResultType = ValType;
@@ -518,7 +562,8 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
CastKind Kind = CK_Invalid;
ExprValueKind VK = VK_RValue;
CXXCastPath BasePath;
- Arg = CheckCastTypes(Arg.get()->getSourceRange(), ValType, Arg.take(), Kind, VK, BasePath);
+ Arg = CheckCastTypes(Arg.get()->getLocStart(), Arg.get()->getSourceRange(),
+ ValType, Arg.take(), Kind, VK, BasePath);
if (Arg.isInvalid())
return ExprError();
@@ -1812,6 +1857,27 @@ static bool isDynamicClassType(QualType T) {
return false;
}
+/// \brief If E is a sizeof expression, returns its argument expression,
+/// otherwise returns NULL.
+static const Expr *getSizeOfExprArg(const Expr* E) {
+ if (const UnaryExprOrTypeTraitExpr *SizeOf =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (SizeOf->getKind() == clang::UETT_SizeOf && !SizeOf->isArgumentType())
+ return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
+
+ return 0;
+}
+
+/// \brief If E is a sizeof expression, returns its argument type.
+static QualType getSizeOfArgType(const Expr* E) {
+ if (const UnaryExprOrTypeTraitExpr *SizeOf =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (SizeOf->getKind() == clang::UETT_SizeOf)
+ return SizeOf->getTypeOfArgument();
+
+ return QualType();
+}
+
/// \brief Check for dangerous or invalid arguments to memset().
///
/// This issues warnings on known problematic, dangerous or unspecified
@@ -1819,35 +1885,95 @@ static bool isDynamicClassType(QualType T) {
///
/// \param Call The call expression to diagnose.
void Sema::CheckMemsetcpymoveArguments(const CallExpr *Call,
- const IdentifierInfo *FnName) {
+ CheckedMemoryFunction CMF,
+ IdentifierInfo *FnName) {
// It is possible to have a non-standard definition of memset. Validate
- // we have the proper number of arguments, and if not, abort further
- // checking.
- if (Call->getNumArgs() != 3)
+ // we have enough arguments, and if not, abort further checking.
+ if (Call->getNumArgs() < 3)
return;
- unsigned LastArg = FnName->isStr("memset")? 1 : 2;
+ unsigned LastArg = (CMF == CMF_Memset? 1 : 2);
+ const Expr *LenExpr = Call->getArg(2)->IgnoreParenImpCasts();
+
+ // We have special checking when the length is a sizeof expression.
+ QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
+ const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
+ llvm::FoldingSetNodeID SizeOfArgID;
+
for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
+ SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
QualType DestTy = Dest->getType();
if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
QualType PointeeTy = DestPtrTy->getPointeeType();
+
+ // Never warn about void type pointers. This can be used to suppress
+ // false positives.
if (PointeeTy->isVoidType())
continue;
+ // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
+ // actually comparing the expressions for equality. Because computing the
+ // expression IDs can be expensive, we only do this if the diagnostic is
+ // enabled.
+ if (SizeOfArg &&
+ Diags.getDiagnosticLevel(diag::warn_sizeof_pointer_expr_memaccess,
+ SizeOfArg->getExprLoc())) {
+ // We only compute IDs for expressions if the warning is enabled, and
+ // cache the sizeof arg's ID.
+ if (SizeOfArgID == llvm::FoldingSetNodeID())
+ SizeOfArg->Profile(SizeOfArgID, Context, true);
+ llvm::FoldingSetNodeID DestID;
+ Dest->Profile(DestID, Context, true);
+ if (DestID == SizeOfArgID) {
+ unsigned ActionIdx = 0; // Default is to suggest dereferencing.
+ if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
+ if (UnaryOp->getOpcode() == UO_AddrOf)
+ ActionIdx = 1; // If its an address-of operator, just remove it.
+ if (Context.getTypeSize(PointeeTy) == Context.getCharWidth())
+ ActionIdx = 2; // If the pointee's size is sizeof(char),
+ // suggest an explicit length.
+ DiagRuntimeBehavior(SizeOfArg->getExprLoc(), Dest,
+ PDiag(diag::warn_sizeof_pointer_expr_memaccess)
+ << FnName << ArgIdx << ActionIdx
+ << Dest->getSourceRange()
+ << SizeOfArg->getSourceRange());
+ break;
+ }
+ }
+
+ // Also check for cases where the sizeof argument is the exact same
+ // type as the memory argument, and where it points to a user-defined
+ // record type.
+ if (SizeOfArgTy != QualType()) {
+ if (PointeeTy->isRecordType() &&
+ Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
+ DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
+ PDiag(diag::warn_sizeof_pointer_type_memaccess)
+ << FnName << SizeOfArgTy << ArgIdx
+ << PointeeTy << Dest->getSourceRange()
+ << LenExpr->getSourceRange());
+ break;
+ }
+ }
+
+ unsigned DiagID;
+
// Always complain about dynamic classes.
- if (isDynamicClassType(PointeeTy)) {
- DiagRuntimeBehavior(
- Dest->getExprLoc(), Dest,
- PDiag(diag::warn_dyn_class_memaccess)
- << ArgIdx << FnName << PointeeTy
- << Call->getCallee()->getSourceRange());
- } else {
+ if (isDynamicClassType(PointeeTy))
+ DiagID = diag::warn_dyn_class_memaccess;
+ else if (PointeeTy.hasNonTrivialObjCLifetime() && CMF != CMF_Memset)
+ DiagID = diag::warn_arc_object_memaccess;
+ else
continue;
- }
- SourceRange ArgRange = Call->getArg(0)->getSourceRange();
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(DiagID)
+ << ArgIdx << FnName << PointeeTy
+ << Call->getCallee()->getSourceRange());
+
DiagRuntimeBehavior(
Dest->getExprLoc(), Dest,
PDiag(diag::note_bad_memaccess_silence)
@@ -1873,7 +1999,8 @@ Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
// Perform checking for returned stack addresses, local blocks,
// label addresses or references to temporaries.
- if (lhsType->isPointerType() || lhsType->isBlockPointerType()) {
+ if (lhsType->isPointerType() ||
+ (!getLangOptions().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
stackE = EvalAddr(RetValExp, refVars);
} else if (lhsType->isReferenceType()) {
stackE = EvalVal(RetValExp, refVars);
@@ -2044,7 +2171,8 @@ static Expr *EvalAddr(Expr *E, llvm::SmallVectorImpl<DeclRefExpr *> &refVars) {
// pointer values, and pointer-to-pointer conversions.
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass:
- case Stmt::CXXFunctionalCastExprClass: {
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::ObjCBridgedCastExprClass: {
Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
QualType T = SubExpr->getType();
@@ -2077,6 +2205,14 @@ static Expr *EvalAddr(Expr *E, llvm::SmallVectorImpl<DeclRefExpr *> &refVars) {
return NULL;
}
+ case Stmt::MaterializeTemporaryExprClass:
+ if (Expr *Result = EvalAddr(
+ cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
+ refVars))
+ return Result;
+
+ return E;
+
// Everything else: we simply don't reason about them.
default:
return NULL;
@@ -2178,6 +2314,14 @@ do {
return EvalVal(M->getBase(), refVars);
}
+ case Stmt::MaterializeTemporaryExprClass:
+ if (Expr *Result = EvalVal(
+ cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
+ refVars))
+ return Result;
+
+ return E;
+
default:
// Check that we don't return or take the address of a reference to a
// temporary. This is only useful in C++.
@@ -2442,15 +2586,24 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
case BO_NE:
return IntRange::forBoolType();
- // The type of these compound assignments is the type of the LHS,
- // so the RHS is not necessarily an integer.
+ // The type of the assignments is the type of the LHS, so the RHS
+ // is not necessarily the same type.
case BO_MulAssign:
case BO_DivAssign:
case BO_RemAssign:
case BO_AddAssign:
case BO_SubAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ // TODO: bitfields?
return IntRange::forValueOfType(C, E->getType());
+ // Simple assignments just pass through the RHS, which will have
+ // been coerced to the LHS type.
+ case BO_Assign:
+ // TODO: bitfields?
+ return GetExprRange(C, BO->getRHS(), MaxWidth);
+
// Operations with opaque sources are black-listed.
case BO_PtrMemD:
case BO_PtrMemI:
@@ -2506,14 +2659,54 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
case BO_Sub:
if (BO->getLHS()->getType()->isPointerType())
return IntRange::forValueOfType(C, E->getType());
- // fallthrough
+ break;
- default:
+ // The width of a division result is mostly determined by the size
+ // of the LHS.
+ case BO_Div: {
+ // Don't 'pre-truncate' the operands.
+ unsigned opWidth = C.getIntWidth(E->getType());
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+
+ // If the divisor is constant, use that.
+ llvm::APSInt divisor;
+ if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
+ unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
+ if (log2 >= L.Width)
+ L.Width = (L.NonNegative ? 0 : 1);
+ else
+ L.Width = std::min(L.Width - log2, MaxWidth);
+ return L;
+ }
+
+ // Otherwise, just use the LHS's width.
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+ return IntRange(L.Width, L.NonNegative && R.NonNegative);
+ }
+
+ // The result of a remainder can't be larger than the result of
+ // either side.
+ case BO_Rem: {
+ // Don't 'pre-truncate' the operands.
+ unsigned opWidth = C.getIntWidth(E->getType());
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+
+ IntRange meet = IntRange::meet(L, R);
+ meet.Width = std::min(meet.Width, MaxWidth);
+ return meet;
+ }
+
+ // The default behavior is okay for these.
+ case BO_Mul:
+ case BO_Add:
+ case BO_Xor:
+ case BO_Or:
break;
}
- // Treat every other operator as if it were closed on the
- // narrowest type that encompasses both operands.
+ // The default case is to treat the operation as if it were closed
+ // on the narrowest type that encompasses both operands.
IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
return IntRange::join(L, R);
@@ -2837,18 +3030,16 @@ void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T,
if (&Value.getSemantics() == &llvm::APFloat::PPCDoubleDouble)
return;
- // Try to convert this exactly to an 64-bit integer. FIXME: It would be
- // nice to support arbitrarily large integers here.
+ // Try to convert this exactly to an integer.
bool isExact = false;
- uint64_t IntegerPart;
- if (Value.convertToInteger(&IntegerPart, 64, /*isSigned=*/true,
+ llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
+ T->hasUnsignedIntegerRepresentation());
+ if (Value.convertToInteger(IntegerValue,
llvm::APFloat::rmTowardZero, &isExact)
!= llvm::APFloat::opOK || !isExact)
return;
- llvm::APInt IntegerValue(64, IntegerPart, /*isSigned=*/true);
-
- std::string LiteralValue = IntegerValue.toString(10, /*isSigned=*/true);
+ std::string LiteralValue = IntegerValue.toString(10);
S.Diag(FL->getExprLoc(), diag::note_fix_integral_float_as_integer)
<< FixItHint::CreateReplacement(FL->getSourceRange(), LiteralValue);
}
@@ -2895,6 +3086,11 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
}
+
+ // If the vector cast is cast between two vectors of the same size, it is
+ // a bitcast, not a conversion.
+ if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ return;
Source = cast<VectorType>(Source)->getElementType().getTypePtr();
Target = cast<VectorType>(Target)->getElementType().getTypePtr();
@@ -2989,9 +3185,7 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
}
- // People want to build with -Wshorten-64-to-32 and not -Wconversion
- // and by god we'll let them.
-
+ // People want to build with -Wshorten-64-to-32 and not -Wconversion.
if (isFromSystemMacro(S, CC))
return;
@@ -3356,3 +3550,268 @@ void Sema::CheckArrayAccess(const Expr *expr) {
}
}
}
+
+//===--- CHECK: Objective-C retain cycles ----------------------------------//
+
+namespace {
+ struct RetainCycleOwner {
+ RetainCycleOwner() : Variable(0), Indirect(false) {}
+ VarDecl *Variable;
+ SourceRange Range;
+ SourceLocation Loc;
+ bool Indirect;
+
+ void setLocsFrom(Expr *e) {
+ Loc = e->getExprLoc();
+ Range = e->getSourceRange();
+ }
+ };
+}
+
+/// Consider whether capturing the given variable can possibly lead to
+/// a retain cycle.
+static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
+ // In ARC, it's captured strongly iff the variable has __strong
+ // lifetime. In MRR, it's captured strongly if the variable is
+ // __block and has an appropriate type.
+ if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ owner.Variable = var;
+ owner.setLocsFrom(ref);
+ return true;
+}
+
+static bool findRetainCycleOwner(Expr *e, RetainCycleOwner &owner) {
+ while (true) {
+ e = e->IgnoreParens();
+ if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_LValueToRValue:
+ case CK_ObjCReclaimReturnedObject:
+ e = cast->getSubExpr();
+ continue;
+
+ case CK_GetObjCProperty: {
+ // Bail out if this isn't a strong explicit property.
+ const ObjCPropertyRefExpr *pre = cast->getSubExpr()->getObjCProperty();
+ if (pre->isImplicitProperty()) return false;
+ ObjCPropertyDecl *property = pre->getExplicitProperty();
+ if (!(property->getPropertyAttributes() &
+ (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_strong)) &&
+ !(property->getPropertyIvarDecl() &&
+ property->getPropertyIvarDecl()->getType()
+ .getObjCLifetime() == Qualifiers::OCL_Strong))
+ return false;
+
+ owner.Indirect = true;
+ e = const_cast<Expr*>(pre->getBase());
+ continue;
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
+ ObjCIvarDecl *ivar = ref->getDecl();
+ if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ // Try to find a retain cycle in the base.
+ if (!findRetainCycleOwner(ref->getBase(), owner))
+ return false;
+
+ if (ref->isFreeIvar()) owner.setLocsFrom(ref);
+ owner.Indirect = true;
+ return true;
+ }
+
+ if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
+ VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
+ if (!var) return false;
+ return considerVariable(var, ref, owner);
+ }
+
+ if (BlockDeclRefExpr *ref = dyn_cast<BlockDeclRefExpr>(e)) {
+ owner.Variable = ref->getDecl();
+ owner.setLocsFrom(ref);
+ return true;
+ }
+
+ if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
+ if (member->isArrow()) return false;
+
+ // Don't count this as an indirect ownership.
+ e = member->getBase();
+ continue;
+ }
+
+ // Array ivars?
+
+ return false;
+ }
+}
+
+namespace {
+ struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
+ FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
+ : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
+ Variable(variable), Capturer(0) {}
+
+ VarDecl *Variable;
+ Expr *Capturer;
+
+ void VisitDeclRefExpr(DeclRefExpr *ref) {
+ if (ref->getDecl() == Variable && !Capturer)
+ Capturer = ref;
+ }
+
+ void VisitBlockDeclRefExpr(BlockDeclRefExpr *ref) {
+ if (ref->getDecl() == Variable && !Capturer)
+ Capturer = ref;
+ }
+
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
+ if (Capturer) return;
+ Visit(ref->getBase());
+ if (Capturer && ref->isFreeIvar())
+ Capturer = ref;
+ }
+
+ void VisitBlockExpr(BlockExpr *block) {
+ // Look inside nested blocks
+ if (block->getBlockDecl()->capturesVariable(Variable))
+ Visit(block->getBlockDecl()->getBody());
+ }
+ };
+}
+
+/// Check whether the given argument is a block which captures a
+/// variable.
+static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ assert(owner.Variable && owner.Loc.isValid());
+
+ e = e->IgnoreParenCasts();
+ BlockExpr *block = dyn_cast<BlockExpr>(e);
+ if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
+ return 0;
+
+ FindCaptureVisitor visitor(S.Context, owner.Variable);
+ visitor.Visit(block->getBlockDecl()->getBody());
+ return visitor.Capturer;
+}
+
+static void diagnoseRetainCycle(Sema &S, Expr *capturer,
+ RetainCycleOwner &owner) {
+ assert(capturer);
+ assert(owner.Variable && owner.Loc.isValid());
+
+ S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
+ << owner.Variable << capturer->getSourceRange();
+ S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
+ << owner.Indirect << owner.Range;
+}
+
+/// Check for a keyword selector that starts with the word 'add' or
+/// 'set'.
+static bool isSetterLikeSelector(Selector sel) {
+ if (sel.isUnarySelector()) return false;
+
+ llvm::StringRef str = sel.getNameForSlot(0);
+ while (!str.empty() && str.front() == '_') str = str.substr(1);
+ if (str.startswith("set") || str.startswith("add"))
+ str = str.substr(3);
+ else
+ return false;
+
+ if (str.empty()) return true;
+ return !islower(str.front());
+}
+
+/// Check a message send to see if it's likely to cause a retain cycle.
+void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
+ // Only check instance methods whose selector looks like a setter.
+ if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
+ return;
+
+ // Try to find a variable that the receiver is strongly owned by.
+ RetainCycleOwner owner;
+ if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ if (!findRetainCycleOwner(msg->getInstanceReceiver(), owner))
+ return;
+ } else {
+ assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ owner.Variable = getCurMethodDecl()->getSelfDecl();
+ owner.Loc = msg->getSuperLoc();
+ owner.Range = msg->getSuperLoc();
+ }
+
+ // Check whether the receiver is captured by any of the arguments.
+ for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i)
+ if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner))
+ return diagnoseRetainCycle(*this, capturer, owner);
+}
+
+/// Check a property assign to see if it's likely to cause a retain cycle.
+void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
+ RetainCycleOwner owner;
+ if (!findRetainCycleOwner(receiver, owner))
+ return;
+
+ if (Expr *capturer = findCapturingExpr(*this, argument, owner))
+ diagnoseRetainCycle(*this, capturer, owner);
+}
+
+bool Sema::checkUnsafeAssigns(SourceLocation Loc,
+ QualType LHS, Expr *RHS) {
+ Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
+ if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
+ return false;
+ // strip off any implicit cast added to get to the one arc-specific
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ObjCConsumeObject) {
+ Diag(Loc, diag::warn_arc_retained_assign)
+ << (LT == Qualifiers::OCL_ExplicitNone)
+ << RHS->getSourceRange();
+ return true;
+ }
+ RHS = cast->getSubExpr();
+ }
+ return false;
+}
+
+void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ QualType LHSType = LHS->getType();
+ if (checkUnsafeAssigns(Loc, LHSType, RHS))
+ return;
+ Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
+ // FIXME. Check for other life times.
+ if (LT != Qualifiers::OCL_None)
+ return;
+
+ if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(LHS)) {
+ if (PRE->isImplicitProperty())
+ return;
+ const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
+ if (!PD)
+ return;
+
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_assign)
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ObjCConsumeObject) {
+ Diag(Loc, diag::warn_arc_retained_property_assign)
+ << RHS->getSourceRange();
+ return;
+ }
+ RHS = cast->getSubExpr();
+ }
+ }
+}
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index e328eeb0aa2b..b555c8a9aa1f 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -1676,6 +1676,34 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
// Fall through: conditions and statements can have expressions.
case Sema::PCC_ParenthesizedExpression:
+ if (SemaRef.getLangOptions().ObjCAutoRefCount &&
+ CCC == Sema::PCC_ParenthesizedExpression) {
+ // (__bridge <type>)<expression>
+ Builder.AddTypedTextChunk("__bridge");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // (__bridge_transfer <Objective-C type>)<expression>
+ Builder.AddTypedTextChunk("__bridge_transfer");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("Objective-C type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // (__bridge_retained <CF type>)<expression>
+ Builder.AddTypedTextChunk("__bridge_retained");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("CF type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ // Fall through
+
case Sema::PCC_Expression: {
if (SemaRef.getLangOptions().CPlusPlus) {
// 'this', if we're in a non-static member function.
@@ -1828,7 +1856,8 @@ static const char *GetCompletionTypeString(QualType T,
CodeCompletionAllocator &Allocator) {
PrintingPolicy Policy(Context.PrintingPolicy);
Policy.AnonymousTagLocations = false;
-
+ Policy.SuppressStrongLifetime = true;
+
if (!T.getLocalQualifiers()) {
// Built-in type names are constant strings.
if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
@@ -1878,9 +1907,9 @@ static void AddResultTypeChunk(ASTContext &Context,
T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
else if (isa<UnresolvedUsingValueDecl>(ND)) {
/* Do nothing: ignore unresolved using declarations*/
- } else if (ValueDecl *Value = dyn_cast<ValueDecl>(ND))
+ } else if (ValueDecl *Value = dyn_cast<ValueDecl>(ND)) {
T = Value->getType();
- else if (ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ } else if (ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
T = Property->getType();
if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
@@ -1907,6 +1936,10 @@ static void MaybeAddSentinel(ASTContext &Context, NamedDecl *FunctionOrMethod,
static std::string FormatFunctionParameter(ASTContext &Context,
ParmVarDecl *Param,
bool SuppressName = false) {
+ PrintingPolicy Policy(Context.PrintingPolicy);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -1917,8 +1950,7 @@ static std::string FormatFunctionParameter(ASTContext &Context,
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
Result = Param->getIdentifier()->getName();
- Param->getType().getAsStringInternal(Result,
- Context.PrintingPolicy);
+ Param->getType().getAsStringInternal(Result, Policy);
if (ObjCMethodParam) {
Result = "(" + Result;
@@ -1968,8 +2000,7 @@ static std::string FormatFunctionParameter(ASTContext &Context,
// We were unable to find a FunctionProtoTypeLoc with parameter names
// for the block; just use the parameter type as a placeholder.
std::string Result;
- Param->getType().getUnqualifiedType().
- getAsStringInternal(Result, Context.PrintingPolicy);
+ Param->getType().getUnqualifiedType().getAsStringInternal(Result, Policy);
if (ObjCMethodParam) {
Result = "(" + Result;
@@ -1986,7 +2017,7 @@ static std::string FormatFunctionParameter(ASTContext &Context,
std::string Result;
QualType ResultType = Block->getTypePtr()->getResultType();
if (!ResultType->isVoidType())
- ResultType.getAsStringInternal(Result, Context.PrintingPolicy);
+ ResultType.getAsStringInternal(Result, Policy);
Result = '^' + Result;
if (!BlockProto || Block->getNumArgs() == 0) {
@@ -2071,6 +2102,9 @@ static void AddTemplateParameterChunks(ASTContext &Context,
unsigned MaxParameters = 0,
unsigned Start = 0,
bool InDefaultArg = false) {
+ PrintingPolicy Policy(Context.PrintingPolicy);
+ Policy.AnonymousTagLocations = false;
+
typedef CodeCompletionString::Chunk Chunk;
bool FirstParameter = true;
@@ -2098,8 +2132,7 @@ static void AddTemplateParameterChunks(ASTContext &Context,
= dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
PlaceholderStr = NTTP->getIdentifier()->getName();
- NTTP->getType().getAsStringInternal(PlaceholderStr,
- Context.PrintingPolicy);
+ NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
HasDefaultArg = NTTP->hasDefaultArgument();
} else {
assert(isa<TemplateTemplateParmDecl>(*P));
@@ -2286,6 +2319,10 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
typedef CodeCompletionString::Chunk Chunk;
CodeCompletionBuilder Result(Allocator, Priority, Availability);
+ PrintingPolicy Policy(S.Context.PrintingPolicy);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+
if (Kind == RK_Pattern) {
Pattern->Priority = Priority;
Pattern->Availability = Availability;
@@ -2470,7 +2507,7 @@ CodeCompletionResult::CreateCodeCompletionString(Sema &S,
if ((*P)->getType()->isBlockPointerType() && !DeclaringEntity)
Arg = FormatFunctionParameter(S.Context, *P, true);
else {
- (*P)->getType().getAsStringInternal(Arg, S.Context.PrintingPolicy);
+ (*P)->getType().getAsStringInternal(Arg, Policy);
Arg = "(" + Arg + ")";
if (IdentifierInfo *II = (*P)->getIdentifier())
if (DeclaringEntity || AllParametersAreInformative)
@@ -2519,7 +2556,10 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
Sema &S,
CodeCompletionAllocator &Allocator) const {
typedef CodeCompletionString::Chunk Chunk;
-
+ PrintingPolicy Policy(S.Context.PrintingPolicy);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+
// FIXME: Set priority, availability appropriately.
CodeCompletionBuilder Result(Allocator, 1, CXAvailability_Available);
FunctionDecl *FDecl = getFunction();
@@ -2545,7 +2585,7 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
else
Result.AddTextChunk(
Result.getAllocator().CopyString(
- Proto->getResultType().getAsString(S.Context.PrintingPolicy)));
+ Proto->getResultType().getAsString(Policy)));
Result.AddChunk(Chunk(CodeCompletionString::CK_LeftParen));
unsigned NumParams = FDecl? FDecl->getNumParams() : Proto->getNumArgs();
@@ -2563,7 +2603,7 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
ArgType = Proto->getArgType(I);
}
- ArgType.getAsStringInternal(ArgString, S.Context.PrintingPolicy);
+ ArgType.getAsStringInternal(ArgString, Policy);
if (I == CurrentArg)
Result.AddChunk(Chunk(CodeCompletionString::CK_CurrentParameter,
@@ -3204,8 +3244,23 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, ExprTy *BaseE,
return;
}
+ enum CodeCompletionContext::Kind contextKind;
+
+ if (IsArrow) {
+ contextKind = CodeCompletionContext::CCC_ArrowMemberAccess;
+ }
+ else {
+ if (BaseType->isObjCObjectPointerType() ||
+ BaseType->isObjCObjectOrInterfaceType()) {
+ contextKind = CodeCompletionContext::CCC_ObjCPropertyAccess;
+ }
+ else {
+ contextKind = CodeCompletionContext::CCC_DotMemberAccess;
+ }
+ }
+
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompletionContext(CodeCompletionContext::CCC_MemberAccess,
+ CodeCompletionContext(contextKind,
BaseType),
&ResultBuilder::IsMember);
Results.EnterNewScope();
@@ -3431,10 +3486,17 @@ void Sema::CodeCompleteCase(Scope *S) {
}
Results.ExitScope();
- if (CodeCompleter->includeMacros())
+ //We need to make sure we're setting the right context,
+ //so only say we include macros if the code completer says we do
+ enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other;
+ if (CodeCompleter->includeMacros()) {
AddMacroResults(PP, Results);
+ kind = CodeCompletionContext::CCC_OtherWithMacros;
+ }
+
+
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_OtherWithMacros,
+ kind,
Results.data(),Results.size());
}
@@ -3786,6 +3848,10 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
CXXCtorInitializer** Initializers,
unsigned NumInitializers) {
+ PrintingPolicy Policy(Context.PrintingPolicy);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+
CXXConstructorDecl *Constructor
= static_cast<CXXConstructorDecl *>(ConstructorD);
if (!Constructor)
@@ -3825,7 +3891,7 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(
- Base->getType().getAsString(Context.PrintingPolicy)));
+ Base->getType().getAsString(Policy)));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -3850,7 +3916,7 @@ void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(
- Base->getType().getAsString(Context.PrintingPolicy)));
+ Base->getType().getAsString(Policy)));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
@@ -4126,18 +4192,24 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
(Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain)))
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong)))
return true;
- // Check for more than one of { assign, copy, retain }.
+ // Check for more than one of { assign, copy, retain, strong }.
unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain);
+ ObjCDeclSpec::DQ_PR_retain|
+ ObjCDeclSpec::DQ_PR_strong);
if (AssignCopyRetMask &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
- AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain)
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong)
return true;
return false;
@@ -4157,10 +4229,15 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
Results.AddResult(CodeCompletionResult("readonly"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_assign))
Results.AddResult(CodeCompletionResult("assign"));
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCDeclSpec::DQ_PR_unsafe_unretained))
+ Results.AddResult(CodeCompletionResult("unsafe_unretained"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readwrite))
Results.AddResult(CodeCompletionResult("readwrite"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_retain))
Results.AddResult(CodeCompletionResult("retain"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_strong))
+ Results.AddResult(CodeCompletionResult("strong"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_copy))
Results.AddResult(CodeCompletionResult("copy"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
@@ -4522,6 +4599,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
if (Method->isInstanceMethod())
return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
.Case("retain", IFace)
+ .Case("strong", IFace)
.Case("autorelease", IFace)
.Case("copy", IFace)
.Case("copyWithZone", IFace)
@@ -4878,7 +4956,7 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
bool AtArgumentExpression,
bool IsSuper) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCClassMessage);
AddClassMessageCompletions(*this, S, Receiver, SelIdents, NumSelIdents,
AtArgumentExpression, IsSuper, Results);
@@ -4898,7 +4976,7 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
}
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ CodeCompletionContext::CCC_ObjCClassMessage,
Results.data(), Results.size());
}
@@ -4941,7 +5019,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
// Build the set of methods we can see.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCInstanceMessage);
Results.EnterNewScope();
// If this is a send-to-super, try to add the special "super" send
@@ -5054,7 +5132,7 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
}
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ CodeCompletionContext::CCC_ObjCInstanceMessage,
Results.data(),Results.size());
}
@@ -5245,8 +5323,7 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
false, Results);
Results.ExitScope();
- // FIXME: Add a special context for this, use cached global completion
- // results.
+ // FIXME: Use cached global completion results.
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
@@ -5255,7 +5332,7 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCSuperclass);
Results.EnterNewScope();
// Make sure that we ignore the class we're currently defining.
@@ -5269,10 +5346,9 @@ void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
false, Results);
Results.ExitScope();
- // FIXME: Add a special context for this, use cached global completion
- // results.
+ // FIXME: Use cached global completion results.
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ CodeCompletionContext::CCC_ObjCSuperclass,
Results.data(),Results.size());
}
@@ -5286,8 +5362,7 @@ void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
true, Results);
Results.ExitScope();
- // FIXME: Add a special context for this, use cached global completion
- // results.
+ // FIXME: Use cached global completion results.
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
@@ -5299,7 +5374,7 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCCategoryName);
// Ignore any categories we find that have already been implemented by this
// interface.
@@ -5323,7 +5398,7 @@ void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ CodeCompletionContext::CCC_ObjCCategoryName,
Results.data(),Results.size());
}
@@ -5342,7 +5417,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
- CodeCompletionContext::CCC_Other);
+ CodeCompletionContext::CCC_ObjCCategoryName);
// Add all of the categories that have have corresponding interface
// declarations in this class and any of its superclasses, except for
@@ -5363,7 +5438,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
- CodeCompletionContext::CCC_Other,
+ CodeCompletionContext::CCC_ObjCCategoryName,
Results.data(),Results.size());
}
@@ -6273,6 +6348,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
Results.EnterNewScope();
PrintingPolicy Policy(Context.PrintingPolicy);
Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
MEnd = KnownMethods.end();
M != MEnd; ++M) {
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 9446c0e8c03f..9d91a48bdcd7 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -45,7 +45,12 @@
using namespace clang;
using namespace sema;
-Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr) {
+Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
+ if (OwnedType) {
+ Decl *Group[2] = { OwnedType, Ptr };
+ return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, 2));
+ }
+
return DeclGroupPtrTy::make(DeclGroupRef(Ptr));
}
@@ -287,41 +292,42 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
- LookupResult Lookup(*this, &II, IILoc, LookupOrdinaryName,
- NotForRedeclaration);
+ if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(&II, IILoc),
+ LookupOrdinaryName, S, SS, NULL,
+ false, CTC_Type)) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
- if (DeclarationName Corrected = CorrectTypo(Lookup, S, SS, 0, 0, CTC_Type)) {
- if (NamedDecl *Result = Lookup.getAsSingle<NamedDecl>()) {
+ if (Corrected.isKeyword()) {
+ // We corrected to a keyword.
+ // FIXME: Actually recover with the keyword we suggest, and emit a fix-it.
+ Diag(IILoc, diag::err_unknown_typename_suggest)
+ << &II << CorrectedQuotedStr;
+ return true;
+ } else {
+ NamedDecl *Result = Corrected.getCorrectionDecl();
if ((isa<TypeDecl>(Result) || isa<ObjCInterfaceDecl>(Result)) &&
!Result->isInvalidDecl()) {
// We found a similarly-named type or interface; suggest that.
if (!SS || !SS->isSet())
Diag(IILoc, diag::err_unknown_typename_suggest)
- << &II << Lookup.getLookupName()
- << FixItHint::CreateReplacement(SourceRange(IILoc),
- Result->getNameAsString());
+ << &II << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
else if (DeclContext *DC = computeDeclContext(*SS, false))
Diag(IILoc, diag::err_unknown_nested_typename_suggest)
- << &II << DC << Lookup.getLookupName() << SS->getRange()
- << FixItHint::CreateReplacement(SourceRange(IILoc),
- Result->getNameAsString());
+ << &II << DC << CorrectedQuotedStr << SS->getRange()
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
else
llvm_unreachable("could not have corrected a typo here");
Diag(Result->getLocation(), diag::note_previous_decl)
- << Result->getDeclName();
+ << CorrectedQuotedStr;
SuggestedType = getTypeName(*Result->getIdentifier(), IILoc, S, SS,
false, false, ParsedType(),
/*NonTrivialTypeSourceInfo=*/true);
return true;
}
- } else if (Lookup.empty()) {
- // We corrected to a keyword.
- // FIXME: Actually recover with the keyword we suggest, and emit a fix-it.
- Diag(IILoc, diag::err_unknown_typename_suggest)
- << &II << Corrected;
- return true;
}
}
@@ -509,11 +515,15 @@ Corrected:
// Perform typo correction to determine if there is another name that is
// close to this name.
if (!SecondTry) {
- if (DeclarationName Corrected = CorrectTypo(Result, S, &SS)) {
+ SecondTry = true;
+ if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
+ Result.getLookupKind(), S, &SS)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
+ std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
- NamedDecl *FirstDecl = Result.empty()? 0 : *Result.begin();
+ NamedDecl *FirstDecl = Corrected.getCorrectionDecl();
NamedDecl *UnderlyingFirstDecl
= FirstDecl? FirstDecl->getUnderlyingDecl() : 0;
if (getLangOptions().CPlusPlus && NextToken.is(tok::less) &&
@@ -527,26 +537,33 @@ Corrected:
UnqualifiedDiag = diag::err_unknown_typename_suggest;
QualifiedDiag = diag::err_unknown_nested_typename_suggest;
}
-
+
if (SS.isEmpty())
Diag(NameLoc, UnqualifiedDiag)
- << Name << Corrected
- << FixItHint::CreateReplacement(NameLoc, Corrected.getAsString());
+ << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(NameLoc, CorrectedStr);
else
Diag(NameLoc, QualifiedDiag)
- << Name << computeDeclContext(SS, false) << Corrected
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
<< SS.getRange()
- << FixItHint::CreateReplacement(NameLoc, Corrected.getAsString());
+ << FixItHint::CreateReplacement(NameLoc, CorrectedStr);
// Update the name, so that the caller has the new name.
- Name = Corrected.getAsIdentifierInfo();
+ Name = Corrected.getCorrectionAsIdentifierInfo();
+ // Also update the LookupResult...
+ // FIXME: This should probably go away at some point
+ Result.clear();
+ Result.setLookupName(Corrected.getCorrection());
+ if (FirstDecl) Result.addDecl(FirstDecl);
+
// Typo correction corrected to a keyword.
- if (Result.empty())
- return Corrected.getAsIdentifierInfo();
+ if (Corrected.isKeyword())
+ return Corrected.getCorrectionAsIdentifierInfo();
- Diag(FirstDecl->getLocation(), diag::note_previous_decl)
- << FirstDecl->getDeclName();
+ if (FirstDecl)
+ Diag(FirstDecl->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
// If we found an Objective-C instance variable, let
// LookupInObjCMethod build the appropriate expression to
@@ -1137,17 +1154,18 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
/// class could not be found.
ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
- bool TypoCorrection) {
+ bool DoTypoCorrection) {
// The third "scope" argument is 0 since we aren't enabling lazy built-in
// creation from this context.
NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName);
- if (!IDecl && TypoCorrection) {
+ if (!IDecl && DoTypoCorrection) {
// Perform typo correction at the given location, but only if we
// find an Objective-C class name.
- LookupResult R(*this, Id, IdLoc, LookupOrdinaryName);
- if (CorrectTypo(R, TUScope, 0, 0, false, CTC_NoKeywords) &&
- (IDecl = R.getAsSingle<ObjCInterfaceDecl>())) {
+ TypoCorrection C;
+ if ((C = CorrectTypo(DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName,
+ TUScope, NULL, NULL, false, CTC_NoKeywords)) &&
+ (IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>())) {
Diag(IdLoc, diag::err_undef_interface_suggest)
<< Id << IDecl->getDeclName()
<< FixItHint::CreateReplacement(IdLoc, IDecl->getNameAsString());
@@ -1359,7 +1377,7 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
// The types match. Link up the redeclaration chain if the old
// declaration was a typedef.
- // FIXME: this is a potential source of wierdness if the type
+ // FIXME: this is a potential source of weirdness if the type
// spellings don't match exactly.
if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old))
New->setPreviousDeclaration(Typedef);
@@ -1527,7 +1545,8 @@ Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
/// GNU89 mode.
static bool canRedefineFunction(const FunctionDecl *FD,
const LangOptions& LangOpts) {
- return (LangOpts.GNUInline && !LangOpts.CPlusPlus &&
+ return ((FD->hasAttr<GNUInlineAttr>() || LangOpts.GNUInline) &&
+ !LangOpts.CPlusPlus &&
FD->isInlineSpecified() &&
FD->getStorageClass() == SC_Extern);
}
@@ -1927,6 +1946,7 @@ bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old) {
return false;
}
+
void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
const ObjCMethodDecl *oldMethod) {
// Merge the attributes.
@@ -2037,6 +2057,15 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
}
mergeDeclAttributes(New, Old, Context);
+ // Warn if an already-declared variable is made a weak_import in a subsequent declaration
+ if (New->getAttr<WeakImportAttr>() &&
+ Old->getStorageClass() == SC_None &&
+ !Old->getAttr<WeakImportAttr>()) {
+ Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ // Remove weak_import attribute on new declaration.
+ New->dropAttr<WeakImportAttr>();
+ }
// Merge the types.
MergeVarDeclTypes(New, Old);
@@ -2597,7 +2626,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
}
// Mock up a declarator.
- Declarator Dc(DS, Declarator::TypeNameContext);
+ Declarator Dc(DS, Declarator::MemberContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
@@ -2736,6 +2765,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
switch (Name.getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
case UnqualifiedId::IK_Identifier:
NameInfo.setName(Name.Identifier);
NameInfo.setLoc(Name.StartLocation);
@@ -2924,10 +2954,9 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
return false;
}
-Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D,
- bool IsFunctionDefinition) {
+Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
return HandleDeclarator(S, D, MultiTemplateParamsArg(*this),
- IsFunctionDefinition);
+ /*IsFunctionDefinition=*/false);
}
/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
@@ -3274,8 +3303,18 @@ Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND,
if (S && IdResolver.ReplaceDecl(PrevDecl, ND)) {
// The previous declaration was found on the identifer resolver
// chain, so remove it from its scope.
- while (S && !S->isDeclScope(PrevDecl))
- S = S->getParent();
+
+ if (S->isDeclScope(PrevDecl)) {
+ // Special case for redeclarations in the SAME scope.
+ // Because this declaration is going to be added to the identifier chain
+ // later, we should temporarily take it OFF the chain.
+ IdResolver.RemoveDecl(ND);
+
+ } else {
+ // Find the scope for the original declaration.
+ while (S && !S->isDeclScope(PrevDecl))
+ S = S->getParent();
+ }
if (S)
S->RemoveDecl(PrevDecl);
@@ -3472,6 +3511,50 @@ static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) {
DD->setQualifierInfo(SS.getWithLocInContext(DD->getASTContext()));
}
+bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
+ QualType type = decl->getType();
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+ if (lifetime == Qualifiers::OCL_Autoreleasing) {
+ // Various kinds of declaration aren't allowed to be __autoreleasing.
+ unsigned kind = -1U;
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ if (var->hasAttr<BlocksAttr>())
+ kind = 0; // __block
+ else if (!var->hasLocalStorage())
+ kind = 1; // global
+ } else if (isa<ObjCIvarDecl>(decl)) {
+ kind = 3; // ivar
+ } else if (isa<FieldDecl>(decl)) {
+ kind = 2; // field
+ }
+
+ if (kind != -1U) {
+ Diag(decl->getLocation(), diag::err_arc_autoreleasing_var)
+ << kind;
+ }
+ } else if (lifetime == Qualifiers::OCL_None) {
+ // Try to infer lifetime.
+ if (!type->isObjCLifetimeType())
+ return false;
+
+ lifetime = type->getObjCARCImplicitLifetime();
+ type = Context.getLifetimeQualifiedType(type, lifetime);
+ decl->setType(type);
+ }
+
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ // Thread-local variables cannot have lifetime.
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
+ var->isThreadSpecified()) {
+ Diag(var->getLocation(), diag::err_arc_thread_ownership)
+ << var->getType();
+ return true;
+ }
+ }
+
+ return false;
+}
+
NamedDecl*
Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
QualType R, TypeSourceInfo *TInfo,
@@ -3630,6 +3713,11 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
+ // In auto-retain/release, infer strong retension for variables of
+ // retainable type.
+ if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
+ NewVD->setInvalidDecl();
+
// Handle GNU asm-label extension (encoded as an attribute).
if (Expr *E = (Expr*)D.getAsmLabel()) {
// The parser guarantees this is a string.
@@ -4007,10 +4095,10 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
for (CXXBasePaths::decl_iterator I = Paths.found_decls_begin(),
E = Paths.found_decls_end(); I != E; ++I) {
if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(*I)) {
+ MD->addOverriddenMethod(OldMD->getCanonicalDecl());
if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
- MD->addOverriddenMethod(OldMD->getCanonicalDecl());
AddedAny = true;
}
}
@@ -4035,7 +4123,7 @@ static void DiagnoseInvalidRedeclaration(Sema &S, FunctionDecl *NewFD) {
}
NamedDecl*
-Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
QualType R, TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
@@ -4611,9 +4699,18 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// A storage-class-specifier shall not be specified in an explicit
// specialization (14.7.3)
if (SC != SC_None) {
- Diag(NewFD->getLocation(),
- diag::err_explicit_specialization_storage_class)
- << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ if (SC != NewFD->getStorageClass())
+ Diag(NewFD->getLocation(),
+ diag::err_explicit_specialization_inconsistent_storage_class)
+ << SC
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+
+ else
+ Diag(NewFD->getLocation(),
+ diag::ext_explicit_specialization_storage_class)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
}
} else if (isExplicitSpecialization && isa<CXXMethodDecl>(NewFD)) {
@@ -5208,12 +5305,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
if (!VDecl) {
- if (getLangOptions().CPlusPlus &&
- RealDecl->getLexicalDeclContext()->isRecord() &&
- isa<NamedDecl>(RealDecl))
- Diag(RealDecl->getLocation(), diag::err_member_initialization);
- else
- Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
+ assert(!isa<FieldDecl>(RealDecl) && "field init shouldn't get here");
+ Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
RealDecl->setInvalidDecl();
return;
}
@@ -5232,6 +5325,10 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
VDecl->setTypeSourceInfo(DeducedType);
VDecl->setType(DeducedType->getType());
+ // In ARC, infer lifetime.
+ if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
+ VDecl->setInvalidDecl();
+
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
if (VarDecl *Old = VDecl->getPreviousDeclaration())
@@ -5372,15 +5469,23 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// We allow integer constant expressions in all cases.
} else if (T->isIntegralOrEnumerationType()) {
- if (!Init->isValueDependent()) {
- // Check whether the expression is a constant expression.
- llvm::APSInt Value;
- SourceLocation Loc;
- if (!Init->isIntegerConstantExpr(Value, Context, &Loc)) {
- Diag(Loc, diag::err_in_class_initializer_non_constant)
- << Init->getSourceRange();
- VDecl->setInvalidDecl();
- }
+ // Check whether the expression is a constant expression.
+ SourceLocation Loc;
+ if (Init->isValueDependent())
+ ; // Nothing to check.
+ else if (Init->isIntegerConstantExpr(Context, &Loc))
+ ; // Ok, it's an ICE!
+ else if (Init->isEvaluatable(Context)) {
+ // If we can constant fold the initializer through heroics, accept it,
+ // but report this as a use of an extension for -pedantic.
+ Diag(Loc, diag::ext_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ } else {
+ // Otherwise, this is some crazy unknown case. Report the issue at the
+ // location provided by the isIntegerConstantExpr failed check.
+ Diag(Loc, diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
}
// We allow floating-point constants as an extension in C++03, and
@@ -5466,7 +5571,10 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// Check any implicit conversions within the expression.
CheckImplicitConversions(Init, VDecl->getLocation());
-
+
+ if (!VDecl->isInvalidDecl())
+ checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
+
Init = MaybeCreateExprWithCleanups(Init);
// Attach the initializer to the decl.
VDecl->setInit(Init);
@@ -5735,6 +5843,23 @@ void Sema::ActOnCXXForRangeDecl(Decl *D) {
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isInvalidDecl()) return;
+ // In ARC, don't allow jumps past the implicit initialization of a
+ // local retaining variable.
+ if (getLangOptions().ObjCAutoRefCount &&
+ var->hasLocalStorage()) {
+ switch (var->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ getCurFunction()->setHasBranchProtectedScope();
+ break;
+ }
+ }
+
// All the following checks are C++ only.
if (!getLangOptions().CPlusPlus) return;
@@ -5874,21 +5999,13 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
DiagnoseFunctionSpecifiers(D);
- TagDecl *OwnedDecl = 0;
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType parmDeclType = TInfo->getType();
if (getLangOptions().CPlusPlus) {
// Check that there are no default arguments inside the type of this
// parameter.
CheckExtraCXXDefaultArguments(D);
-
- if (OwnedDecl && OwnedDecl->isDefinition()) {
- // C++ [dcl.fct]p6:
- // Types shall not be defined in return or parameter types.
- Diag(OwnedDecl->getLocation(), diag::err_type_defined_in_param_type)
- << Context.getTypeDeclType(OwnedDecl);
- }
// Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
if (D.getCXXScopeSpec().isSet()) {
@@ -6003,7 +6120,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
// Warn if the return value is pass-by-value and larger than the specified
// threshold.
- if (ReturnTy->isPODType()) {
+ if (ReturnTy.isPODType(Context)) {
unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(D->getLocation(), diag::warn_return_value_size)
@@ -6014,7 +6131,7 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
// threshold.
for (; Param != ParamEnd; ++Param) {
QualType T = (*Param)->getType();
- if (!T->isPODType())
+ if (!T.isPODType(Context))
continue;
unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
@@ -6028,8 +6145,31 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
QualType T, TypeSourceInfo *TSInfo,
VarDecl::StorageClass StorageClass,
VarDecl::StorageClass StorageClassAsWritten) {
+ // In ARC, infer a lifetime qualifier for appropriate parameter types.
+ if (getLangOptions().ObjCAutoRefCount &&
+ T.getObjCLifetime() == Qualifiers::OCL_None &&
+ T->isObjCLifetimeType()) {
+
+ Qualifiers::ObjCLifetime lifetime;
+
+ // Special cases for arrays:
+ // - if it's const, use __unsafe_unretained
+ // - otherwise, it's an error
+ if (T->isArrayType()) {
+ if (!T.isConstQualified()) {
+ Diag(NameLoc, diag::err_arc_array_param_no_ownership)
+ << TSInfo->getTypeLoc().getSourceRange();
+ }
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ lifetime = T->getObjCARCImplicitLifetime();
+ }
+ T = Context.getLifetimeQualifiedType(T, lifetime);
+ }
+
ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name,
- adjustParameterType(T), TSInfo,
+ Context.getAdjustedParameterType(T),
+ TSInfo,
StorageClass, StorageClassAsWritten,
0);
@@ -6318,6 +6458,10 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Implements C++ [basic.start.main]p5 and C99 5.1.2.2.3.
FD->setHasImplicitReturnZero(true);
WP.disableCheckFallThrough();
+ } else if (FD->hasAttr<NakedAttr>()) {
+ // If the function is marked 'naked', don't complain about missing return
+ // statements.
+ WP.disableCheckFallThrough();
}
// MSVC permits the use of pure specifier (=0) on function definition,
@@ -6364,7 +6508,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// Verify that that gotos and switch cases don't jump into scopes illegally.
if (getCurFunction()->NeedsScopeChecking() &&
!dcl->isInvalidDecl() &&
- !hasAnyErrorsInThisFunction())
+ !hasAnyUnrecoverableErrorsInThisFunction())
DiagnoseInvalidJumps(Body);
if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
@@ -6379,15 +6523,17 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
if (PP.getDiagnostics().hasErrorOccurred() ||
- PP.getDiagnostics().getSuppressAllDiagnostics())
+ PP.getDiagnostics().getSuppressAllDiagnostics()) {
ExprTemporaries.clear();
- else if (!isa<FunctionTemplateDecl>(dcl)) {
+ ExprNeedsCleanups = false;
+ } else if (!isa<FunctionTemplateDecl>(dcl)) {
// Since the body is valid, issue any analysis-based warnings that are
// enabled.
ActivePolicy = &WP;
}
assert(ExprTemporaries.empty() && "Leftover temporaries in function");
+ assert(!ExprNeedsCleanups && "Unaccounted cleanups in function");
}
if (!IsInstantiation)
@@ -6398,8 +6544,10 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (getDiagnostics().hasErrorOccurred())
+ if (getDiagnostics().hasErrorOccurred()) {
ExprTemporaries.clear();
+ ExprNeedsCleanups = false;
+ }
return dcl;
}
@@ -6439,6 +6587,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
Declarator D(DS, Declarator::BlockContext);
D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, SourceLocation(), 0,
0, 0, true, SourceLocation(),
+ SourceLocation(),
EST_None, SourceLocation(),
0, 0, 0, 0, Loc, Loc, D),
DS.getAttributes(),
@@ -6466,6 +6615,9 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/// These attributes can apply both to implicitly-declared builtins
/// (like __builtin___printf_chk) or to library-declared functions
/// like NSLog or printf.
+///
+/// We need to check for duplicate attributes both here and where user-written
+/// attributes are applied to declarations.
void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
if (FD->isInvalidDecl())
return;
@@ -6499,9 +6651,9 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
}
- if (Context.BuiltinInfo.isNoThrow(BuiltinID))
+ if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->getAttr<NoThrowAttr>())
FD->addAttr(::new (Context) NoThrowAttr(FD->getLocation(), Context));
- if (Context.BuiltinInfo.isConst(BuiltinID))
+ if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->getAttr<ConstAttr>())
FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
}
@@ -7728,6 +7880,11 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// FIXME: What to pass instead of TUScope?
ProcessDeclAttributes(TUScope, NewFD, *D);
+ // In auto-retain/release, infer strong retension for fields of
+ // retainable type.
+ if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
+ NewFD->setInvalidDecl();
+
if (T.isObjCGCWeak())
Diag(Loc, diag::warn_attribute_weak_on_field);
@@ -7761,6 +7918,21 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) {
member = CXXDestructor;
if (member != CXXInvalid) {
+ if (getLangOptions().ObjCAutoRefCount && RDecl->hasObjectMember()) {
+ // Objective-C++ ARC: it is an error to have a non-trivial field of
+ // a union. However, system headers in Objective-C programs
+ // occasionally have Objective-C lifetime objects within unions,
+ // and rather than cause the program to fail, we make those
+ // members unavailable.
+ SourceLocation Loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(Loc)) {
+ if (!FD->hasAttr<UnavailableAttr>())
+ FD->addAttr(new (Context) UnavailableAttr(Loc, Context,
+ "this system field has retaining ownership"));
+ return false;
+ }
+ }
+
Diag(FD->getLocation(), diag::err_illegal_union_or_anon_struct_member)
<< (int)FD->getParent()->isUnion() << FD->getDeclName() << member;
DiagnoseNontrivial(RT, member);
@@ -7913,6 +8085,21 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
return;
}
}
+
+ if (EltTy->isObjCLifetimeType()) {
+ switch (EltTy.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ Diag((*fi)->getLocation(), diag::note_nontrivial_objc_ownership)
+ << QT << EltTy.getObjCLifetime();
+ return;
+ }
+ }
}
assert(0 && "found no explanation for non-trivial member");
@@ -8022,6 +8209,10 @@ Decl *Sema::ActOnIvar(Scope *S,
if (D.isInvalidType())
NewID->setInvalidDecl();
+ // In ARC, infer 'retaining' for ivars of retainable type.
+ if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
+ NewID->setInvalidDecl();
+
if (II) {
// FIXME: When interfaces are DeclContexts, we'll need to add
// these to the interface.
@@ -8093,6 +8284,7 @@ void Sema::ActOnFields(Scope* S,
llvm::SmallVector<FieldDecl*, 32> RecFields;
RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
+ bool ARCErrReported = false;
for (unsigned i = 0; i != NumFields; ++i) {
FieldDecl *FD = cast<FieldDecl>(Fields[i]);
@@ -8158,7 +8350,7 @@ void Sema::ActOnFields(Scope* S,
continue;
}
if (!FD->getType()->isDependentType() &&
- !Context.getBaseElementType(FD->getType())->isPODType()) {
+ !Context.getBaseElementType(FD->getType()).isPODType(Context)) {
Diag(FD->getLocation(), diag::err_flexible_array_has_nonpod_type)
<< FD->getDeclName() << FD->getType();
FD->setInvalidDecl();
@@ -8205,17 +8397,45 @@ void Sema::ActOnFields(Scope* S,
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
- } else if (getLangOptions().ObjC1 &&
+ }
+ else if (!getLangOptions().CPlusPlus) {
+ if (getLangOptions().ObjCAutoRefCount && Record && !ARCErrReported) {
+ // It's an error in ARC if a field has lifetime.
+ // We don't want to report this in a system header, though,
+ // so we just make the field unavailable.
+ // FIXME: that's really not sufficient; we need to make the type
+ // itself invalid to, say, initialize or copy.
+ QualType T = FD->getType();
+ Qualifiers::ObjCLifetime lifetime = T.getObjCLifetime();
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone) {
+ SourceLocation loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(loc)) {
+ if (!FD->hasAttr<UnavailableAttr>()) {
+ FD->addAttr(new (Context) UnavailableAttr(loc, Context,
+ "this system field has retaining ownership"));
+ }
+ } else {
+ Diag(FD->getLocation(), diag::err_arc_objc_object_in_struct);
+ }
+ ARCErrReported = true;
+ }
+ }
+ else if (getLangOptions().ObjC1 &&
getLangOptions().getGCMode() != LangOptions::NonGC &&
- Record &&
- (FD->getType()->isObjCObjectPointerType() ||
- FD->getType().isObjCGCStrong()))
- Record->setHasObjectMember(true);
- else if (Context.getAsArrayType(FD->getType())) {
- QualType BaseType = Context.getBaseElementType(FD->getType());
- if (Record && BaseType->isRecordType() &&
- BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
- Record->setHasObjectMember(true);
+ Record && !Record->hasObjectMember()) {
+ if (FD->getType()->isObjCObjectPointerType() ||
+ FD->getType().isObjCGCStrong())
+ Record->setHasObjectMember(true);
+ else if (Context.getAsArrayType(FD->getType())) {
+ QualType BaseType = Context.getBaseElementType(FD->getType());
+ if (BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
+ Record->setHasObjectMember(true);
+ else if (BaseType->isObjCObjectPointerType() ||
+ BaseType.isObjCGCStrong())
+ Record->setHasObjectMember(true);
+ }
+ }
}
// Keep track of the number of named members.
if (FD->getIdentifier())
@@ -8234,6 +8454,42 @@ void Sema::ActOnFields(Scope* S,
Convs->setAccess(I, (*I)->getAccess());
if (!CXXRecord->isDependentType()) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, copy assignment
+ // operator, and destructor are non-trivial.
+ //
+ // This rule is also handled by CXXRecordDecl::completeDefinition().
+ // However, here we check whether this particular class is only
+ // non-POD because of the presence of an Objective-C pointer member.
+ // If so, objects of this type cannot be shared between code compiled
+ // with instant objects and code compiled with manual retain/release.
+ if (getLangOptions().ObjCAutoRefCount &&
+ CXXRecord->hasObjectMember() &&
+ CXXRecord->getLinkage() == ExternalLinkage) {
+ if (CXXRecord->isPOD()) {
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_non_pod_class_with_object_member)
+ << CXXRecord;
+ } else {
+ // FIXME: Fix-Its would be nice here, but finding a good location
+ // for them is going to be tricky.
+ if (CXXRecord->hasTrivialCopyConstructor())
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_trivial_member_function_with_object_member)
+ << CXXRecord << 0;
+ if (CXXRecord->hasTrivialCopyAssignment())
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_trivial_member_function_with_object_member)
+ << CXXRecord << 1;
+ if (CXXRecord->hasTrivialDestructor())
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_trivial_member_function_with_object_member)
+ << CXXRecord << 2;
+ }
+ }
+
// Adjust user-defined destructor exception spec.
if (getLangOptions().CPlusPlus0x &&
CXXRecord->hasUserDeclaredDestructor())
@@ -8807,7 +9063,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
// Adjust the Expr initializer and type.
if (ECD->getInitExpr() &&
- !Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
+ !Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
CK_IntegralCast,
ECD->getInitExpr(),
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index 7f93ab72d6d1..61b7b3ee05e3 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
@@ -48,14 +49,14 @@ enum {
// Helper functions
//===----------------------------------------------------------------------===//
-static const FunctionType *getFunctionType(const Decl *d,
+static const FunctionType *getFunctionType(const Decl *D,
bool blocksToo = true) {
QualType Ty;
- if (const ValueDecl *decl = dyn_cast<ValueDecl>(d))
+ if (const ValueDecl *decl = dyn_cast<ValueDecl>(D))
Ty = decl->getType();
- else if (const FieldDecl *decl = dyn_cast<FieldDecl>(d))
+ else if (const FieldDecl *decl = dyn_cast<FieldDecl>(D))
Ty = decl->getType();
- else if (const TypedefNameDecl* decl = dyn_cast<TypedefNameDecl>(d))
+ else if (const TypedefNameDecl* decl = dyn_cast<TypedefNameDecl>(D))
Ty = decl->getUnderlyingType();
else
return 0;
@@ -73,46 +74,47 @@ static const FunctionType *getFunctionType(const Decl *d,
/// isFunction - Return true if the given decl has function
/// type (function or function-typed variable).
-static bool isFunction(const Decl *d) {
- return getFunctionType(d, false) != NULL;
+static bool isFunction(const Decl *D) {
+ return getFunctionType(D, false) != NULL;
}
/// isFunctionOrMethod - Return true if the given decl has function
/// type (function or function-typed variable) or an Objective-C
/// method.
-static bool isFunctionOrMethod(const Decl *d) {
- return isFunction(d)|| isa<ObjCMethodDecl>(d);
+static bool isFunctionOrMethod(const Decl *D) {
+ return isFunction(D)|| isa<ObjCMethodDecl>(D);
}
/// isFunctionOrMethodOrBlock - Return true if the given decl has function
/// type (function or function-typed variable) or an Objective-C
/// method or a block.
-static bool isFunctionOrMethodOrBlock(const Decl *d) {
- if (isFunctionOrMethod(d))
+static bool isFunctionOrMethodOrBlock(const Decl *D) {
+ if (isFunctionOrMethod(D))
return true;
// check for block is more involved.
- if (const VarDecl *V = dyn_cast<VarDecl>(d)) {
+ if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
QualType Ty = V->getType();
return Ty->isBlockPointerType();
}
- return isa<BlockDecl>(d);
+ return isa<BlockDecl>(D);
}
/// Return true if the given decl has a declarator that should have
/// been processed by Sema::GetTypeForDeclarator.
-static bool hasDeclarator(const Decl *d) {
- // In some sense, TypedefNameDecl really *ought* to be a DeclaratorDecl.
- return isa<DeclaratorDecl>(d) || isa<BlockDecl>(d) || isa<TypedefNameDecl>(d);
+static bool hasDeclarator(const Decl *D) {
+ // In some sense, TypedefDecl really *ought* to be a DeclaratorDecl.
+ return isa<DeclaratorDecl>(D) || isa<BlockDecl>(D) || isa<TypedefNameDecl>(D) ||
+ isa<ObjCPropertyDecl>(D);
}
/// hasFunctionProto - Return true if the given decl has a argument
/// information. This decl should have already passed
/// isFunctionOrMethod or isFunctionOrMethodOrBlock.
-static bool hasFunctionProto(const Decl *d) {
- if (const FunctionType *FnTy = getFunctionType(d))
+static bool hasFunctionProto(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D))
return isa<FunctionProtoType>(FnTy);
else {
- assert(isa<ObjCMethodDecl>(d) || isa<BlockDecl>(d));
+ assert(isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D));
return true;
}
}
@@ -120,42 +122,42 @@ static bool hasFunctionProto(const Decl *d) {
/// getFunctionOrMethodNumArgs - Return number of function or method
/// arguments. It is an error to call this on a K&R function (use
/// hasFunctionProto first).
-static unsigned getFunctionOrMethodNumArgs(const Decl *d) {
- if (const FunctionType *FnTy = getFunctionType(d))
+static unsigned getFunctionOrMethodNumArgs(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D))
return cast<FunctionProtoType>(FnTy)->getNumArgs();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(d))
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
return BD->getNumParams();
- return cast<ObjCMethodDecl>(d)->param_size();
+ return cast<ObjCMethodDecl>(D)->param_size();
}
-static QualType getFunctionOrMethodArgType(const Decl *d, unsigned Idx) {
- if (const FunctionType *FnTy = getFunctionType(d))
+static QualType getFunctionOrMethodArgType(const Decl *D, unsigned Idx) {
+ if (const FunctionType *FnTy = getFunctionType(D))
return cast<FunctionProtoType>(FnTy)->getArgType(Idx);
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(d))
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
return BD->getParamDecl(Idx)->getType();
- return cast<ObjCMethodDecl>(d)->param_begin()[Idx]->getType();
+ return cast<ObjCMethodDecl>(D)->param_begin()[Idx]->getType();
}
-static QualType getFunctionOrMethodResultType(const Decl *d) {
- if (const FunctionType *FnTy = getFunctionType(d))
+static QualType getFunctionOrMethodResultType(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D))
return cast<FunctionProtoType>(FnTy)->getResultType();
- return cast<ObjCMethodDecl>(d)->getResultType();
+ return cast<ObjCMethodDecl>(D)->getResultType();
}
-static bool isFunctionOrMethodVariadic(const Decl *d) {
- if (const FunctionType *FnTy = getFunctionType(d)) {
+static bool isFunctionOrMethodVariadic(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D)) {
const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
return proto->isVariadic();
- } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(d))
+ } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
return BD->isVariadic();
else {
- return cast<ObjCMethodDecl>(d)->isVariadic();
+ return cast<ObjCMethodDecl>(D)->isVariadic();
}
}
-static bool isInstanceMethod(const Decl *d) {
- if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(d))
+static bool isInstanceMethod(const Decl *D) {
+ if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D))
return MethodDecl->isInstance();
return false;
}
@@ -192,6 +194,16 @@ static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
}
+static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned int Num) {
+ if (Attr.getNumArgs() != Num) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Num;
+ return false;
+ }
+
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Attribute Implementations
//===----------------------------------------------------------------------===//
@@ -200,9 +212,9 @@ static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
// least add some helper functions to check most argument patterns (#
// and types of args).
-static void HandleExtVectorTypeAttr(Scope *scope, Decl *d,
- const AttributeList &Attr, Sema &S) {
- TypedefNameDecl *tDecl = dyn_cast<TypedefNameDecl>(d);
+static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
+ TypedefNameDecl *tDecl = dyn_cast<TypedefNameDecl>(D);
if (tDecl == 0) {
S.Diag(Attr.getLoc(), diag::err_typecheck_ext_vector_not_typedef);
return;
@@ -217,13 +229,17 @@ static void HandleExtVectorTypeAttr(Scope *scope, Decl *d,
CXXScopeSpec SS;
UnqualifiedId id;
id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
- sizeExpr = S.ActOnIdExpression(scope, SS, id, false, false).takeAs<Expr>();
+
+ ExprResult Size = S.ActOnIdExpression(scope, SS, id, false, false);
+ if (Size.isInvalid())
+ return;
+
+ sizeExpr = Size.get();
} else {
// check the attribute arguments.
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ if (!checkAttributeNumArgs(S, Attr, 1))
return;
- }
+
sizeExpr = Attr.getArg(0);
}
@@ -239,16 +255,14 @@ static void HandleExtVectorTypeAttr(Scope *scope, Decl *d,
}
}
-static void HandlePackedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() > 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (TagDecl *TD = dyn_cast<TagDecl>(d))
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
TD->addAttr(::new (S.Context) PackedAttr(Attr.getLoc(), S.Context));
- else if (FieldDecl *FD = dyn_cast<FieldDecl>(d)) {
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
// If the alignment is less than or equal to 8 bits, the packed attribute
// has no effect.
if (!FD->getType()->isIncompleteType() &&
@@ -261,49 +275,45 @@ static void HandlePackedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
}
-static void HandleMsStructAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- if (TagDecl *TD = dyn_cast<TagDecl>(d))
+static void handleMsStructAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
TD->addAttr(::new (S.Context) MsStructAttr(Attr.getLoc(), S.Context));
else
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
}
-static void HandleIBAction(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleIBAction(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() > 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
// The IBAction attributes only apply to instance methods.
- if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d))
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
if (MD->isInstanceMethod()) {
- d->addAttr(::new (S.Context) IBActionAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) IBActionAttr(Attr.getLoc(), S.Context));
return;
}
S.Diag(Attr.getLoc(), diag::warn_attribute_ibaction) << Attr.getName();
}
-static void HandleIBOutlet(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleIBOutlet(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() > 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
// The IBOutlet attributes only apply to instance variables of
// Objective-C classes.
- if (isa<ObjCIvarDecl>(d) || isa<ObjCPropertyDecl>(d)) {
- d->addAttr(::new (S.Context) IBOutletAttr(Attr.getLoc(), S.Context));
+ if (isa<ObjCIvarDecl>(D) || isa<ObjCPropertyDecl>(D)) {
+ D->addAttr(::new (S.Context) IBOutletAttr(Attr.getLoc(), S.Context));
return;
}
S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
}
-static void HandleIBOutletCollection(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleIBOutletCollection(Sema &S, Decl *D,
+ const AttributeList &Attr) {
// The iboutletcollection attribute can have zero or one arguments.
if (Attr.getParameterName() && Attr.getNumArgs() > 0) {
@@ -313,17 +323,17 @@ static void HandleIBOutletCollection(Decl *d, const AttributeList &Attr,
// The IBOutletCollection attributes only apply to instance variables of
// Objective-C classes.
- if (!(isa<ObjCIvarDecl>(d) || isa<ObjCPropertyDecl>(d))) {
+ if (!(isa<ObjCIvarDecl>(D) || isa<ObjCPropertyDecl>(D))) {
S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
return;
}
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(d))
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
S.Diag(Attr.getLoc(), diag::err_iboutletcollection_object_type)
<< VD->getType() << 0;
return;
}
- if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(d))
+ if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
S.Diag(Attr.getLoc(), diag::err_iboutletcollection_object_type)
<< PD->getType() << 1;
@@ -335,7 +345,7 @@ static void HandleIBOutletCollection(Decl *d, const AttributeList &Attr,
II = &S.Context.Idents.get("id");
ParsedType TypeRep = S.getTypeName(*II, Attr.getLoc(),
- S.getScopeForContext(d->getDeclContext()->getParent()));
+ S.getScopeForContext(D->getDeclContext()->getParent()));
if (!TypeRep) {
S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << II;
return;
@@ -350,14 +360,29 @@ static void HandleIBOutletCollection(Decl *d, const AttributeList &Attr,
S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << II;
return;
}
- d->addAttr(::new (S.Context) IBOutletCollectionAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) IBOutletCollectionAttr(Attr.getLoc(), S.Context,
QT));
}
-static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void possibleTransparentUnionPointerType(QualType &T) {
+ if (const RecordType *UT = T->getAsUnionType())
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ RecordDecl *UD = UT->getDecl();
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end(); it != itend; ++it) {
+ QualType QT = it->getType();
+ if (QT->isAnyPointerType() || QT->isBlockPointerType()) {
+ T = QT;
+ return;
+ }
+ }
+ }
+}
+
+static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// GCC ignores the nonnull attribute on K&R style function prototypes, so we
// ignore it as well
- if (!isFunctionOrMethod(d) || !hasFunctionProto(d)) {
+ if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
@@ -365,8 +390,8 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// In C++ the implicit 'this' function parameter also counts, and they are
// counted from one.
- bool HasImplicitThisParam = isInstanceMethod(d);
- unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
// The nonnull attribute only applies to pointers.
llvm::SmallVector<unsigned, 10> NonNullArgs;
@@ -405,7 +430,9 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// Is the function argument a pointer type?
- QualType T = getFunctionOrMethodArgType(d, x).getNonReferenceType();
+ QualType T = getFunctionOrMethodArgType(D, x).getNonReferenceType();
+ possibleTransparentUnionPointerType(T);
+
if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
// FIXME: Should also highlight argument in decl.
S.Diag(Attr.getLoc(), diag::warn_nonnull_pointers_only)
@@ -419,23 +446,11 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// If no arguments were specified to __attribute__((nonnull)) then all pointer
// arguments have a nonnull attribute.
if (NonNullArgs.empty()) {
- for (unsigned I = 0, E = getFunctionOrMethodNumArgs(d); I != E; ++I) {
- QualType T = getFunctionOrMethodArgType(d, I).getNonReferenceType();
+ for (unsigned I = 0, E = getFunctionOrMethodNumArgs(D); I != E; ++I) {
+ QualType T = getFunctionOrMethodArgType(D, I).getNonReferenceType();
+ possibleTransparentUnionPointerType(T);
if (T->isAnyPointerType() || T->isBlockPointerType())
NonNullArgs.push_back(I);
- else if (const RecordType *UT = T->getAsUnionType()) {
- if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
- RecordDecl *UD = UT->getDecl();
- for (RecordDecl::field_iterator it = UD->field_begin(),
- itend = UD->field_end(); it != itend; ++it) {
- T = it->getType();
- if (T->isAnyPointerType() || T->isBlockPointerType()) {
- NonNullArgs.push_back(I);
- break;
- }
- }
- }
- }
}
// No pointer arguments?
@@ -451,11 +466,11 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
unsigned* start = &NonNullArgs[0];
unsigned size = NonNullArgs.size();
llvm::array_pod_sort(start, start + size);
- d->addAttr(::new (S.Context) NonNullAttr(Attr.getLoc(), S.Context, start,
+ D->addAttr(::new (S.Context) NonNullAttr(Attr.getLoc(), S.Context, start,
size));
}
-static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
+static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
// This attribute must be applied to a function declaration.
// The first argument to the attribute must be a string,
// the name of the resource, for example "malloc".
@@ -500,7 +515,7 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
llvm_unreachable("Unknown ownership attribute");
}
- if (!isFunction(d) || !hasFunctionProto(d)) {
+ if (!isFunction(D) || !hasFunctionProto(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
<< AL.getName() << ExpectedFunction;
return;
@@ -508,8 +523,8 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
// In C++ the implicit 'this' function parameter also counts, and they are
// counted from one.
- bool HasImplicitThisParam = isInstanceMethod(d);
- unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
llvm::StringRef Module = AL.getParameterName()->getName();
@@ -552,7 +567,7 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
case OwnershipAttr::Takes:
case OwnershipAttr::Holds: {
// Is the function argument a pointer type?
- QualType T = getFunctionOrMethodArgType(d, x);
+ QualType T = getFunctionOrMethodArgType(D, x);
if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
// FIXME: Should also highlight argument in decl.
S.Diag(AL.getLoc(), diag::err_ownership_type)
@@ -584,8 +599,8 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
// Check we don't have a conflict with another ownership attribute.
for (specific_attr_iterator<OwnershipAttr>
- i = d->specific_attr_begin<OwnershipAttr>(),
- e = d->specific_attr_end<OwnershipAttr>();
+ i = D->specific_attr_begin<OwnershipAttr>(),
+ e = D->specific_attr_end<OwnershipAttr>();
i != e; ++i) {
if ((*i)->getOwnKind() != K) {
for (const unsigned *I = (*i)->args_begin(), *E = (*i)->args_end();
@@ -609,7 +624,7 @@ static void HandleOwnershipAttr(Decl *d, const AttributeList &AL, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) OwnershipAttr(AL.getLoc(), S.Context, K, Module,
+ D->addAttr(::new (S.Context) OwnershipAttr(AL.getLoc(), S.Context, K, Module,
start, size));
}
@@ -633,20 +648,20 @@ static bool hasEffectivelyInternalLinkage(NamedDecl *D) {
return false;
}
-static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Check the attribute arguments.
if (Attr.getNumArgs() > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
}
- if (!isa<VarDecl>(d) && !isa<FunctionDecl>(d)) {
+ if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariableOrFunction;
return;
}
- NamedDecl *nd = cast<NamedDecl>(d);
+ NamedDecl *nd = cast<NamedDecl>(D);
// gcc rejects
// class c {
@@ -658,7 +673,7 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// static int a __attribute__((weakref ("v2")));
// }
// we reject them
- const DeclContext *Ctx = d->getDeclContext()->getRedeclContext();
+ const DeclContext *Ctx = D->getDeclContext()->getRedeclContext();
if (!Ctx->isFileContext()) {
S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context) <<
nd->getNameAsString();
@@ -704,14 +719,14 @@ static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// GCC will accept anything as the argument of weakref. Should we
// check for an existing decl?
- d->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context,
Str->getString()));
}
- d->addAttr(::new (S.Context) WeakRefAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) WeakRefAttr(Attr.getLoc(), S.Context));
}
-static void HandleAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
@@ -735,55 +750,52 @@ static void HandleAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// FIXME: check if target symbol exists in current file
- d->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) AliasAttr(Attr.getLoc(), S.Context,
Str->getString()));
}
-static void HandleNakedAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isa<FunctionDecl>(d)) {
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) NakedAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) NakedAttr(Attr.getLoc(), S.Context));
}
-static void HandleAlwaysInlineAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleAlwaysInlineAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
// Check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- if (!isa<FunctionDecl>(d)) {
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) AlwaysInlineAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) AlwaysInlineAttr(Attr.getLoc(), S.Context));
}
-static void HandleMallocAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleMallocAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(d)) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
QualType RetTy = FD->getResultType();
if (RetTy->isAnyPointerType() || RetTy->isBlockPointerType()) {
- d->addAttr(::new (S.Context) MallocAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) MallocAttr(Attr.getLoc(), S.Context));
return;
}
}
@@ -791,46 +803,44 @@ static void HandleMallocAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(Attr.getLoc(), diag::warn_attribute_malloc_pointer_only);
}
-static void HandleMayAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleMayAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- d->addAttr(::new (S.Context) MayAliasAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) MayAliasAttr(Attr.getLoc(), S.Context));
}
-static void HandleNoCommonAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- assert(Attr.isInvalid() == false);
- if (isa<VarDecl>(d))
- d->addAttr(::new (S.Context) NoCommonAttr(Attr.getLoc(), S.Context));
+static void handleNoCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+ if (isa<VarDecl>(D))
+ D->addAttr(::new (S.Context) NoCommonAttr(Attr.getLoc(), S.Context));
else
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariable;
}
-static void HandleCommonAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- assert(Attr.isInvalid() == false);
- if (isa<VarDecl>(d))
- d->addAttr(::new (S.Context) CommonAttr(Attr.getLoc(), S.Context));
+static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+ if (isa<VarDecl>(D))
+ D->addAttr(::new (S.Context) CommonAttr(Attr.getLoc(), S.Context));
else
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariable;
}
-static void HandleNoReturnAttr(Decl *d, const AttributeList &attr, Sema &S) {
- if (hasDeclarator(d)) return;
+static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) {
+ if (hasDeclarator(D)) return;
if (S.CheckNoReturnAttr(attr)) return;
- if (!isa<ObjCMethodDecl>(d)) {
+ if (!isa<ObjCMethodDecl>(D)) {
S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< attr.getName() << ExpectedFunctionOrMethod;
return;
}
- d->addAttr(::new (S.Context) NoReturnAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) NoReturnAttr(attr.getLoc(), S.Context));
}
bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
@@ -843,19 +853,17 @@ bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
return false;
}
-static void HandleAnalyzerNoReturnAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
// The checking path for 'noreturn' and 'analyzer_noreturn' are different
// because 'analyzer_noreturn' does not impact the type.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
- }
+ if(!checkAttributeNumArgs(S, Attr, 0))
+ return;
- if (!isFunctionOrMethod(d) && !isa<BlockDecl>(d)) {
- ValueDecl *VD = dyn_cast<ValueDecl>(d);
+ if (!isFunctionOrMethod(D) && !isa<BlockDecl>(D)) {
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (VD == 0 || (!VD->getType()->isBlockPointerType()
&& !VD->getType()->isFunctionPointerType())) {
S.Diag(Attr.getLoc(),
@@ -866,12 +874,11 @@ static void HandleAnalyzerNoReturnAttr(Decl *d, const AttributeList &Attr,
}
}
- d->addAttr(::new (S.Context) AnalyzerNoReturnAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) AnalyzerNoReturnAttr(Attr.getLoc(), S.Context));
}
// PS3 PPU-specific.
-static void HandleVecReturnAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleVecReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) {
/*
Returning a Vector Class in Registers
@@ -895,18 +902,18 @@ static void HandleVecReturnAttr(Decl *d, const AttributeList &Attr,
return result; // This will be returned in a register
}
*/
- if (!isa<RecordDecl>(d)) {
+ if (!isa<RecordDecl>(D)) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedClass;
return;
}
- if (d->getAttr<VecReturnAttr>()) {
+ if (D->getAttr<VecReturnAttr>()) {
S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "vecreturn";
return;
}
- RecordDecl *record = cast<RecordDecl>(d);
+ RecordDecl *record = cast<RecordDecl>(D);
int count = 0;
if (!isa<CXXRecordDecl>(record)) {
@@ -928,11 +935,11 @@ static void HandleVecReturnAttr(Decl *d, const AttributeList &Attr,
count++;
}
- d->addAttr(::new (S.Context) VecReturnAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) VecReturnAttr(Attr.getLoc(), S.Context));
}
-static void HandleDependencyAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- if (!isFunctionOrMethod(d) && !isa<ParmVarDecl>(d)) {
+static void handleDependencyAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!isFunctionOrMethod(D) && !isa<ParmVarDecl>(D)) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunctionMethodOrParameter;
return;
@@ -940,45 +947,45 @@ static void HandleDependencyAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// FIXME: Actually store the attribute on the declaration
}
-static void HandleUnusedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleUnusedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- if (!isa<VarDecl>(d) && !isa<ObjCIvarDecl>(d) && !isFunctionOrMethod(d) &&
- !isa<TypeDecl>(d) && !isa<LabelDecl>(d)) {
+ if (!isa<VarDecl>(D) && !isa<ObjCIvarDecl>(D) && !isFunctionOrMethod(D) &&
+ !isa<TypeDecl>(D) && !isa<LabelDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariableFunctionOrLabel;
return;
}
- d->addAttr(::new (S.Context) UnusedAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) UnusedAttr(Attr.getLoc(), S.Context));
}
-static void HandleUsedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- if (const VarDecl *VD = dyn_cast<VarDecl>(d)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasLocalStorage() || VD->hasExternalStorage()) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "used";
return;
}
- } else if (!isFunctionOrMethod(d)) {
+ } else if (!isFunctionOrMethod(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariableOrFunction;
return;
}
- d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
}
-static void HandleConstructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.getNumArgs() > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
@@ -998,17 +1005,17 @@ static void HandleConstructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
priority = Idx.getZExtValue();
}
- if (!isa<FunctionDecl>(d)) {
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) ConstructorAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) ConstructorAttr(Attr.getLoc(), S.Context,
priority));
}
-static void HandleDestructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.getNumArgs() > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
@@ -1028,17 +1035,17 @@ static void HandleDestructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
priority = Idx.getZExtValue();
}
- if (!isa<FunctionDecl>(d)) {
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) DestructorAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) DestructorAttr(Attr.getLoc(), S.Context,
priority));
}
-static void HandleDeprecatedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
unsigned NumArgs = Attr.getNumArgs();
if (NumArgs > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
@@ -1057,10 +1064,10 @@ static void HandleDeprecatedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
Str = SE->getString();
}
- d->addAttr(::new (S.Context) DeprecatedAttr(Attr.getLoc(), S.Context, Str));
+ D->addAttr(::new (S.Context) DeprecatedAttr(Attr.getLoc(), S.Context, Str));
}
-static void HandleUnavailableAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleUnavailableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
unsigned NumArgs = Attr.getNumArgs();
if (NumArgs > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
@@ -1078,11 +1085,23 @@ static void HandleUnavailableAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
Str = SE->getString();
}
- d->addAttr(::new (S.Context) UnavailableAttr(Attr.getLoc(), S.Context, Str));
+ D->addAttr(::new (S.Context) UnavailableAttr(Attr.getLoc(), S.Context, Str));
+}
+
+static void handleArcWeakrefUnavailableAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ArcWeakrefUnavailableAttr(
+ Attr.getLoc(), S.Context));
}
-static void HandleAvailabilityAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleAvailabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
IdentifierInfo *Platform = Attr.getParameterName();
SourceLocation PlatformLoc = Attr.getParameterLoc();
@@ -1126,7 +1145,7 @@ static void HandleAvailabilityAttr(Decl *d, const AttributeList &Attr,
return;
}
- d->addAttr(::new (S.Context) AvailabilityAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) AvailabilityAttr(Attr.getLoc(), S.Context,
Platform,
Introduced.Version,
Deprecated.Version,
@@ -1134,12 +1153,10 @@ static void HandleAvailabilityAttr(Decl *d, const AttributeList &Attr,
IsUnavailable));
}
-static void HandleVisibilityAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ if(!checkAttributeNumArgs(S, Attr, 1))
return;
- }
Expr *Arg = Attr.getArg(0);
Arg = Arg->IgnoreParenCasts();
@@ -1167,30 +1184,30 @@ static void HandleVisibilityAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) VisibilityAttr(Attr.getLoc(), S.Context, type));
+ D->addAttr(::new (S.Context) VisibilityAttr(Attr.getLoc(), S.Context, type));
}
-static void HandleObjCMethodFamilyAttr(Decl *decl, const AttributeList &attr,
- Sema &S) {
+static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl,
+ const AttributeList &Attr) {
ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(decl);
if (!method) {
- S.Diag(attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
<< ExpectedMethod;
return;
}
- if (attr.getNumArgs() != 0 || !attr.getParameterName()) {
- if (!attr.getParameterName() && attr.getNumArgs() == 1) {
- S.Diag(attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ if (Attr.getNumArgs() != 0 || !Attr.getParameterName()) {
+ if (!Attr.getParameterName() && Attr.getNumArgs() == 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
<< "objc_method_family" << 1;
} else {
- S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
}
- attr.setInvalid();
+ Attr.setInvalid();
return;
}
- llvm::StringRef param = attr.getParameterName()->getName();
+ llvm::StringRef param = Attr.getParameterName()->getName();
ObjCMethodFamilyAttr::FamilyKind family;
if (param == "none")
family = ObjCMethodFamilyAttr::OMF_None;
@@ -1207,20 +1224,26 @@ static void HandleObjCMethodFamilyAttr(Decl *decl, const AttributeList &attr,
else {
// Just warn and ignore it. This is future-proof against new
// families being used in system headers.
- S.Diag(attr.getParameterLoc(), diag::warn_unknown_method_family);
+ S.Diag(Attr.getParameterLoc(), diag::warn_unknown_method_family);
return;
}
- decl->addAttr(new (S.Context) ObjCMethodFamilyAttr(attr.getLoc(),
- S.Context, family));
+ if (family == ObjCMethodFamilyAttr::OMF_init &&
+ !method->getResultType()->isObjCObjectPointerType()) {
+ S.Diag(method->getLocation(), diag::err_init_method_bad_return_type)
+ << method->getResultType();
+ // Ignore the attribute.
+ return;
+ }
+
+ method->addAttr(new (S.Context) ObjCMethodFamilyAttr(Attr.getLoc(),
+ S.Context, family));
}
-static void HandleObjCExceptionAttr(Decl *D, const AttributeList &Attr,
- Sema &S) {
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+static void handleObjCExceptionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
ObjCInterfaceDecl *OCI = dyn_cast<ObjCInterfaceDecl>(D);
if (OCI == 0) {
@@ -1231,7 +1254,7 @@ static void HandleObjCExceptionAttr(Decl *D, const AttributeList &Attr,
D->addAttr(::new (S.Context) ObjCExceptionAttr(Attr.getLoc(), S.Context));
}
-static void HandleObjCNSObject(Decl *D, const AttributeList &Attr, Sema &S) {
+static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
if (Attr.getNumArgs() != 0) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
@@ -1248,7 +1271,7 @@ static void HandleObjCNSObject(Decl *D, const AttributeList &Attr, Sema &S) {
}
static void
-HandleOverloadableAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+handleOverloadableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (Attr.getNumArgs() != 0) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
@@ -1262,7 +1285,7 @@ HandleOverloadableAttr(Decl *D, const AttributeList &Attr, Sema &S) {
D->addAttr(::new (S.Context) OverloadableAttr(Attr.getLoc(), S.Context));
}
-static void HandleBlocksAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!Attr.getParameterName()) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
<< "blocks" << 1;
@@ -1283,10 +1306,10 @@ static void HandleBlocksAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) BlocksAttr(Attr.getLoc(), S.Context, type));
+ D->addAttr(::new (S.Context) BlocksAttr(Attr.getLoc(), S.Context, type));
}
-static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.getNumArgs() > 2) {
S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 2;
@@ -1333,7 +1356,7 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
}
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(d)) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
const FunctionType *FT = FD->getType()->getAs<FunctionType>();
assert(FT && "FunctionDecl has non-function type?");
@@ -1346,19 +1369,19 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
return;
}
- } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d)) {
+ } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
if (!MD->isVariadic()) {
S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
return;
}
- } else if (isa<BlockDecl>(d)) {
+ } else if (isa<BlockDecl>(D)) {
// Note! BlockDecl is typeless. Variadic diagnostics will be issued by the
// caller.
;
- } else if (const VarDecl *V = dyn_cast<VarDecl>(d)) {
+ } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
QualType Ty = V->getType();
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
- const FunctionType *FT = Ty->isFunctionPointerType() ? getFunctionType(d)
+ const FunctionType *FT = Ty->isFunctionPointerType() ? getFunctionType(D)
: Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
if (!cast<FunctionProtoType>(FT)->isVariadic()) {
int m = Ty->isFunctionPointerType() ? 0 : 1;
@@ -1375,16 +1398,14 @@ static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
<< Attr.getName() << ExpectedFunctionMethodOrBlock;
return;
}
- d->addAttr(::new (S.Context) SentinelAttr(Attr.getLoc(), S.Context, sentinel,
+ D->addAttr(::new (S.Context) SentinelAttr(Attr.getLoc(), S.Context, sentinel,
nullPos));
}
-static void HandleWarnUnusedResult(Decl *D, const AttributeList &Attr, Sema &S) {
+static void handleWarnUnusedResult(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
if (!isFunction(D) && !isa<ObjCMethodDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
@@ -1407,36 +1428,35 @@ static void HandleWarnUnusedResult(Decl *D, const AttributeList &Attr, Sema &S)
D->addAttr(::new (S.Context) WarnUnusedResultAttr(Attr.getLoc(), S.Context));
}
-static void HandleWeakAttr(Decl *d, const AttributeList &attr, Sema &S) {
+static void handleWeakAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (attr.hasParameterOrArguments()) {
- S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- if (!isa<VarDecl>(d) && !isa<FunctionDecl>(d)) {
- S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << attr.getName() << ExpectedVariableOrFunction;
+ if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
return;
}
- NamedDecl *nd = cast<NamedDecl>(d);
+ NamedDecl *nd = cast<NamedDecl>(D);
// 'weak' only applies to declarations with external linkage.
if (hasEffectivelyInternalLinkage(nd)) {
- S.Diag(attr.getLoc(), diag::err_attribute_weak_static);
+ S.Diag(Attr.getLoc(), diag::err_attribute_weak_static);
return;
}
- nd->addAttr(::new (S.Context) WeakAttr(attr.getLoc(), S.Context));
+ nd->addAttr(::new (S.Context) WeakAttr(Attr.getLoc(), S.Context));
}
-static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
+
// weak_import only applies to variable & function declarations.
bool isDef = false;
@@ -1459,13 +1479,11 @@ static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
D->addAttr(::new (S.Context) WeakImportAttr(Attr.getLoc(), S.Context));
}
-static void HandleReqdWorkGroupSize(Decl *D, const AttributeList &Attr,
- Sema &S) {
+static void handleReqdWorkGroupSize(Sema &S, Decl *D,
+ const AttributeList &Attr) {
// Attribute has 3 arguments.
- if (Attr.getNumArgs() != 3) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ if (!checkAttributeNumArgs(S, Attr, 3))
return;
- }
unsigned WGSize[3];
for (unsigned i = 0; i < 3; ++i) {
@@ -1484,12 +1502,10 @@ static void HandleReqdWorkGroupSize(Decl *D, const AttributeList &Attr,
WGSize[2]));
}
-static void HandleSectionAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Attribute has no arguments.
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ if (!checkAttributeNumArgs(S, Attr, 1))
return;
- }
// Make sure that there is a string literal as the sections's single
// argument.
@@ -1519,37 +1535,45 @@ static void HandleSectionAttr(Decl *D, const AttributeList &Attr, Sema &S) {
}
-static void HandleNothrowAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleNothrowAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
-
- d->addAttr(::new (S.Context) NoThrowAttr(Attr.getLoc(), S.Context));
+
+ if (NoThrowAttr *Existing = D->getAttr<NoThrowAttr>()) {
+ if (Existing->getLocation().isInvalid())
+ Existing->setLocation(Attr.getLoc());
+ } else {
+ D->addAttr(::new (S.Context) NoThrowAttr(Attr.getLoc(), S.Context));
+ }
}
-static void HandleConstAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleConstAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- d->addAttr(::new (S.Context) ConstAttr(Attr.getLoc(), S.Context));
+ if (ConstAttr *Existing = D->getAttr<ConstAttr>()) {
+ if (Existing->getLocation().isInvalid())
+ Existing->setLocation(Attr.getLoc());
+ } else {
+ D->addAttr(::new (S.Context) ConstAttr(Attr.getLoc(), S.Context));
+ }
}
-static void HandlePureAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handlePureAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- d->addAttr(::new (S.Context) PureAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) PureAttr(Attr.getLoc(), S.Context));
}
-static void HandleCleanupAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!Attr.getParameterName()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
return;
@@ -1560,7 +1584,7 @@ static void HandleCleanupAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- VarDecl *VD = dyn_cast<VarDecl>(d);
+ VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD || !VD->hasLocalStorage()) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "cleanup";
@@ -1605,18 +1629,17 @@ static void HandleCleanupAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) CleanupAttr(Attr.getLoc(), S.Context, FD));
+ D->addAttr(::new (S.Context) CleanupAttr(Attr.getLoc(), S.Context, FD));
S.MarkDeclarationReferenced(Attr.getParameterLoc(), FD);
}
/// Handle __attribute__((format_arg((idx)))) attribute based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
-static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkAttributeNumArgs(S, Attr, 1))
return;
- }
- if (!isFunctionOrMethod(d) || !hasFunctionProto(d)) {
+
+ if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
@@ -1624,8 +1647,8 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// In C++ the implicit 'this' function parameter also counts, and they are
// counted from one.
- bool HasImplicitThisParam = isInstanceMethod(d);
- unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
unsigned FirstIdx = 1;
// checks for the 2nd argument
@@ -1656,7 +1679,7 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// make sure the format string is really a string
- QualType Ty = getFunctionOrMethodArgType(d, ArgIdx);
+ QualType Ty = getFunctionOrMethodArgType(D, ArgIdx);
bool not_nsstring_type = !isNSStringType(Ty, S.Context);
if (not_nsstring_type &&
@@ -1669,7 +1692,7 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
<< IdxExpr->getSourceRange();
return;
}
- Ty = getFunctionOrMethodResultType(d);
+ Ty = getFunctionOrMethodResultType(D);
if (!isNSStringType(Ty, S.Context) &&
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
@@ -1681,7 +1704,7 @@ static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) FormatArgAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) FormatArgAttr(Attr.getLoc(), S.Context,
Idx.getZExtValue()));
}
@@ -1722,19 +1745,19 @@ static FormatAttrKind getFormatAttrKind(llvm::StringRef Format) {
/// Handle __attribute__((init_priority(priority))) attributes based on
/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
-static void HandleInitPriorityAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleInitPriorityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
if (!S.getLangOptions().CPlusPlus) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
return;
}
- if (!isa<VarDecl>(d) || S.getCurFunctionOrMethodDecl()) {
+ if (!isa<VarDecl>(D) || S.getCurFunctionOrMethodDecl()) {
S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
Attr.setInvalid();
return;
}
- QualType T = dyn_cast<VarDecl>(d)->getType();
+ QualType T = dyn_cast<VarDecl>(D)->getType();
if (S.Context.getAsArrayType(T))
T = S.Context.getBaseElementType(T);
if (!T->getAs<RecordType>()) {
@@ -1765,13 +1788,13 @@ static void HandleInitPriorityAttr(Decl *d, const AttributeList &Attr,
Attr.setInvalid();
return;
}
- d->addAttr(::new (S.Context) InitPriorityAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) InitPriorityAttr(Attr.getLoc(), S.Context,
prioritynum));
}
/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
-static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!Attr.getParameterName()) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
@@ -1784,7 +1807,7 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- if (!isFunctionOrMethodOrBlock(d) || !hasFunctionProto(d)) {
+ if (!isFunctionOrMethodOrBlock(D) || !hasFunctionProto(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
@@ -1792,8 +1815,8 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// In C++ the implicit 'this' function parameter also counts, and they are
// counted from one.
- bool HasImplicitThisParam = isInstanceMethod(d);
- unsigned NumArgs = getFunctionOrMethodNumArgs(d) + HasImplicitThisParam;
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
unsigned FirstIdx = 1;
llvm::StringRef Format = Attr.getParameterName()->getName();
@@ -1844,7 +1867,7 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
}
// make sure the format string is really a string
- QualType Ty = getFunctionOrMethodArgType(d, ArgIdx);
+ QualType Ty = getFunctionOrMethodArgType(D, ArgIdx);
if (Kind == CFStringFormat) {
if (!isCFStringType(Ty, S.Context)) {
@@ -1881,10 +1904,10 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// check if the function is variadic if the 3rd argument non-zero
if (FirstArg != 0) {
- if (isFunctionOrMethodVariadic(d)) {
+ if (isFunctionOrMethodVariadic(D)) {
++NumArgs; // +1 for ...
} else {
- S.Diag(d->getLocation(), diag::err_format_attribute_requires_variadic);
+ S.Diag(D->getLocation(), diag::err_format_attribute_requires_variadic);
return;
}
}
@@ -1904,26 +1927,42 @@ static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) FormatAttr(Attr.getLoc(), S.Context, Format,
+ // Check whether we already have an equivalent format attribute.
+ for (specific_attr_iterator<FormatAttr>
+ i = D->specific_attr_begin<FormatAttr>(),
+ e = D->specific_attr_end<FormatAttr>();
+ i != e ; ++i) {
+ FormatAttr *f = *i;
+ if (f->getType() == Format &&
+ f->getFormatIdx() == (int)Idx.getZExtValue() &&
+ f->getFirstArg() == (int)FirstArg.getZExtValue()) {
+ // If we don't have a valid location for this attribute, adopt the
+ // location.
+ if (f->getLocation().isInvalid())
+ f->setLocation(Attr.getLoc());
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) FormatAttr(Attr.getLoc(), S.Context, Format,
Idx.getZExtValue(),
FirstArg.getZExtValue()));
}
-static void HandleTransparentUnionAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleTransparentUnionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
+
// Try to find the underlying union declaration.
RecordDecl *RD = 0;
- TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(d);
+ TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
if (TD && TD->getUnderlyingType()->isUnionType())
RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
else
- RD = dyn_cast<RecordDecl>(d);
+ RD = dyn_cast<RecordDecl>(D);
if (!RD || !RD->isUnion()) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
@@ -1977,12 +2016,11 @@ static void HandleTransparentUnionAttr(Decl *d, const AttributeList &Attr,
RD->addAttr(::new (S.Context) TransparentUnionAttr(Attr.getLoc(), S.Context));
}
-static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ if (!checkAttributeNumArgs(S, Attr, 1))
return;
- }
+
Expr *ArgExpr = Attr.getArg(0);
StringLiteral *SE = dyn_cast<StringLiteral>(ArgExpr);
@@ -1992,11 +2030,11 @@ static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(ArgExpr->getLocStart(), diag::err_attribute_not_string) <<"annotate";
return;
}
- d->addAttr(::new (S.Context) AnnotateAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) AnnotateAttr(Attr.getLoc(), S.Context,
SE->getString()));
}
-static void HandleAlignedAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
if (Attr.getNumArgs() > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
@@ -2045,21 +2083,20 @@ void Sema::AddAlignedAttr(SourceLocation AttrLoc, Decl *D, TypeSourceInfo *TS) {
return;
}
-/// HandleModeAttr - This attribute modifies the width of a decl with primitive
+/// handleModeAttr - This attribute modifies the width of a decl with primitive
/// type.
///
/// Despite what would be logical, the mode attribute is a decl attribute, not a
/// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be
/// HImode, not an intermediate pointer.
-static void HandleModeAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// This attribute isn't documented, but glibc uses it. It changes
// the width of an int or unsigned int to the specified size.
// Check that there aren't any arguments
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
+
IdentifierInfo *Name = Attr.getParameterName();
if (!Name) {
@@ -2216,57 +2253,53 @@ static void HandleModeAttr(Decl *D, const AttributeList &Attr, Sema &S) {
cast<ValueDecl>(D)->setType(NewTy);
}
-static void HandleNoDebugAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() > 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isFunctionOrMethod(d)) {
+ if (!isFunctionOrMethod(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) NoDebugAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) NoDebugAttr(Attr.getLoc(), S.Context));
}
-static void HandleNoInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleNoInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isa<FunctionDecl>(d)) {
+
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) NoInlineAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) NoInlineAttr(Attr.getLoc(), S.Context));
}
-static void HandleNoInstrumentFunctionAttr(Decl *d, const AttributeList &Attr,
- Sema &S) {
+static void handleNoInstrumentFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isa<FunctionDecl>(d)) {
+
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) NoInstrumentFunctionAttr(Attr.getLoc(),
+ D->addAttr(::new (S.Context) NoInstrumentFunctionAttr(Attr.getLoc(),
S.Context));
}
-static void HandleConstantAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleConstantAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.CUDA) {
// check the attribute arguments.
if (Attr.hasParameterOrArguments()) {
@@ -2274,19 +2307,19 @@ static void HandleConstantAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- if (!isa<VarDecl>(d)) {
+ if (!isa<VarDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariable;
return;
}
- d->addAttr(::new (S.Context) CUDAConstantAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) CUDAConstantAttr(Attr.getLoc(), S.Context));
} else {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "constant";
}
}
-static void HandleDeviceAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleDeviceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.CUDA) {
// check the attribute arguments.
if (Attr.getNumArgs() != 0) {
@@ -2294,33 +2327,31 @@ static void HandleDeviceAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- if (!isa<FunctionDecl>(d) && !isa<VarDecl>(d)) {
+ if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariableOrFunction;
return;
}
- d->addAttr(::new (S.Context) CUDADeviceAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) CUDADeviceAttr(Attr.getLoc(), S.Context));
} else {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "device";
}
}
-static void HandleGlobalAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.CUDA) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isa<FunctionDecl>(d)) {
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- FunctionDecl *FD = cast<FunctionDecl>(d);
+ FunctionDecl *FD = cast<FunctionDecl>(D);
if (!FD->getResultType()->isVoidType()) {
TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
if (FunctionTypeLoc* FTL = dyn_cast<FunctionTypeLoc>(&TL)) {
@@ -2335,60 +2366,56 @@ static void HandleGlobalAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) CUDAGlobalAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) CUDAGlobalAttr(Attr.getLoc(), S.Context));
} else {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "global";
}
}
-static void HandleHostAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleHostAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.CUDA) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isa<FunctionDecl>(d)) {
+
+ if (!isa<FunctionDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
return;
}
- d->addAttr(::new (S.Context) CUDAHostAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) CUDAHostAttr(Attr.getLoc(), S.Context));
} else {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "host";
}
}
-static void HandleSharedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleSharedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.CUDA) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- if (!isa<VarDecl>(d)) {
+
+ if (!isa<VarDecl>(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedVariable;
return;
}
- d->addAttr(::new (S.Context) CUDASharedAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) CUDASharedAttr(Attr.getLoc(), S.Context));
} else {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "shared";
}
}
-static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ if (!checkAttributeNumArgs(S, Attr, 0))
return;
- }
- FunctionDecl *Fn = dyn_cast<FunctionDecl>(d);
+ FunctionDecl *Fn = dyn_cast<FunctionDecl>(D);
if (Fn == 0) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunction;
@@ -2400,47 +2427,47 @@ static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- d->addAttr(::new (S.Context) GNUInlineAttr(Attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) GNUInlineAttr(Attr.getLoc(), S.Context));
}
-static void HandleCallConvAttr(Decl *d, const AttributeList &attr, Sema &S) {
- if (hasDeclarator(d)) return;
+static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
- // Diagnostic is emitted elsewhere: here we store the (valid) attr
+ // Diagnostic is emitted elsewhere: here we store the (valid) Attr
// in the Decl node for syntactic reasoning, e.g., pretty-printing.
CallingConv CC;
- if (S.CheckCallingConvAttr(attr, CC))
+ if (S.CheckCallingConvAttr(Attr, CC))
return;
- if (!isa<ObjCMethodDecl>(d)) {
- S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << attr.getName() << ExpectedFunctionOrMethod;
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
return;
}
- switch (attr.getKind()) {
+ switch (Attr.getKind()) {
case AttributeList::AT_fastcall:
- d->addAttr(::new (S.Context) FastCallAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) FastCallAttr(Attr.getLoc(), S.Context));
return;
case AttributeList::AT_stdcall:
- d->addAttr(::new (S.Context) StdCallAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) StdCallAttr(Attr.getLoc(), S.Context));
return;
case AttributeList::AT_thiscall:
- d->addAttr(::new (S.Context) ThisCallAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) ThisCallAttr(Attr.getLoc(), S.Context));
return;
case AttributeList::AT_cdecl:
- d->addAttr(::new (S.Context) CDeclAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) CDeclAttr(Attr.getLoc(), S.Context));
return;
case AttributeList::AT_pascal:
- d->addAttr(::new (S.Context) PascalAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) PascalAttr(Attr.getLoc(), S.Context));
return;
case AttributeList::AT_pcs: {
- Expr *Arg = attr.getArg(0);
+ Expr *Arg = Attr.getArg(0);
StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
if (Str == 0 || Str->isWide()) {
- S.Diag(attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
<< "pcs" << 1;
- attr.setInvalid();
+ Attr.setInvalid();
return;
}
@@ -2451,12 +2478,12 @@ static void HandleCallConvAttr(Decl *d, const AttributeList &attr, Sema &S) {
else if (StrRef == "aapcs-vfp")
PCS = PcsAttr::AAPCS_VFP;
else {
- S.Diag(attr.getLoc(), diag::err_invalid_pcs);
- attr.setInvalid();
+ S.Diag(Attr.getLoc(), diag::err_invalid_pcs);
+ Attr.setInvalid();
return;
}
- d->addAttr(::new (S.Context) PcsAttr(attr.getLoc(), S.Context, PCS));
+ D->addAttr(::new (S.Context) PcsAttr(Attr.getLoc(), S.Context, PCS));
}
default:
llvm_unreachable("unexpected attribute kind");
@@ -2464,9 +2491,9 @@ static void HandleCallConvAttr(Decl *d, const AttributeList &attr, Sema &S) {
}
}
-static void HandleOpenCLKernelAttr(Decl *d, const AttributeList &Attr, Sema &S){
- assert(Attr.isInvalid() == false);
- d->addAttr(::new (S.Context) OpenCLKernelAttr(Attr.getLoc(), S.Context));
+static void handleOpenCLKernelAttr(Sema &S, Decl *D, const AttributeList &Attr){
+ assert(!Attr.isInvalid());
+ D->addAttr(::new (S.Context) OpenCLKernelAttr(Attr.getLoc(), S.Context));
}
bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
@@ -2515,63 +2542,63 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
return false;
}
-static void HandleRegparmAttr(Decl *d, const AttributeList &attr, Sema &S) {
- if (hasDeclarator(d)) return;
+static void handleRegparmAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
unsigned numParams;
- if (S.CheckRegparmAttr(attr, numParams))
+ if (S.CheckRegparmAttr(Attr, numParams))
return;
- if (!isa<ObjCMethodDecl>(d)) {
- S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << attr.getName() << ExpectedFunctionOrMethod;
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
return;
}
- d->addAttr(::new (S.Context) RegparmAttr(attr.getLoc(), S.Context, numParams));
+ D->addAttr(::new (S.Context) RegparmAttr(Attr.getLoc(), S.Context, numParams));
}
/// Checks a regparm attribute, returning true if it is ill-formed and
/// otherwise setting numParams to the appropriate value.
-bool Sema::CheckRegparmAttr(const AttributeList &attr, unsigned &numParams) {
- if (attr.isInvalid())
+bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
+ if (Attr.isInvalid())
return true;
- if (attr.getNumArgs() != 1) {
- Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
- attr.setInvalid();
+ if (Attr.getNumArgs() != 1) {
+ Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
return true;
}
- Expr *NumParamsExpr = attr.getArg(0);
+ Expr *NumParamsExpr = Attr.getArg(0);
llvm::APSInt NumParams(32);
if (NumParamsExpr->isTypeDependent() || NumParamsExpr->isValueDependent() ||
!NumParamsExpr->isIntegerConstantExpr(NumParams, Context)) {
- Diag(attr.getLoc(), diag::err_attribute_argument_not_int)
+ Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
<< "regparm" << NumParamsExpr->getSourceRange();
- attr.setInvalid();
+ Attr.setInvalid();
return true;
}
if (Context.Target.getRegParmMax() == 0) {
- Diag(attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
+ Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
<< NumParamsExpr->getSourceRange();
- attr.setInvalid();
+ Attr.setInvalid();
return true;
}
numParams = NumParams.getZExtValue();
if (numParams > Context.Target.getRegParmMax()) {
- Diag(attr.getLoc(), diag::err_attribute_regparm_invalid_number)
+ Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
<< Context.Target.getRegParmMax() << NumParamsExpr->getSourceRange();
- attr.setInvalid();
+ Attr.setInvalid();
return true;
}
return false;
}
-static void HandleLaunchBoundsAttr(Decl *d, const AttributeList &Attr, Sema &S){
+static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr){
if (S.LangOpts.CUDA) {
// check the attribute arguments.
if (Attr.getNumArgs() != 1 && Attr.getNumArgs() != 2) {
@@ -2580,7 +2607,7 @@ static void HandleLaunchBoundsAttr(Decl *d, const AttributeList &Attr, Sema &S){
return;
}
- if (!isFunctionOrMethod(d)) {
+ if (!isFunctionOrMethod(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << ExpectedFunctionOrMethod;
return;
@@ -2608,7 +2635,7 @@ static void HandleLaunchBoundsAttr(Decl *d, const AttributeList &Attr, Sema &S){
}
}
- d->addAttr(::new (S.Context) CUDALaunchBoundsAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) CUDALaunchBoundsAttr(Attr.getLoc(), S.Context,
MaxThreads.getZExtValue(),
MinBlocks.getZExtValue()));
} else {
@@ -2627,16 +2654,16 @@ static bool isValidSubjectOfCFAttribute(Sema &S, QualType type) {
return type->isPointerType() || isValidSubjectOfNSAttribute(S, type);
}
-static void HandleNSConsumedAttr(Decl *d, const AttributeList &attr, Sema &S) {
- ParmVarDecl *param = dyn_cast<ParmVarDecl>(d);
+static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ ParmVarDecl *param = dyn_cast<ParmVarDecl>(D);
if (!param) {
- S.Diag(d->getLocStart(), diag::warn_attribute_wrong_decl_type)
- << SourceRange(attr.getLoc()) << attr.getName() << ExpectedParameter;
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << SourceRange(Attr.getLoc()) << Attr.getName() << ExpectedParameter;
return;
}
bool typeOK, cf;
- if (attr.getKind() == AttributeList::AT_ns_consumed) {
+ if (Attr.getKind() == AttributeList::AT_ns_consumed) {
typeOK = isValidSubjectOfNSAttribute(S, param->getType());
cf = false;
} else {
@@ -2645,47 +2672,52 @@ static void HandleNSConsumedAttr(Decl *d, const AttributeList &attr, Sema &S) {
}
if (!typeOK) {
- S.Diag(d->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
- << SourceRange(attr.getLoc()) << attr.getName() << cf;
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
+ << SourceRange(Attr.getLoc()) << Attr.getName() << cf;
return;
}
if (cf)
- param->addAttr(::new (S.Context) CFConsumedAttr(attr.getLoc(), S.Context));
+ param->addAttr(::new (S.Context) CFConsumedAttr(Attr.getLoc(), S.Context));
else
- param->addAttr(::new (S.Context) NSConsumedAttr(attr.getLoc(), S.Context));
+ param->addAttr(::new (S.Context) NSConsumedAttr(Attr.getLoc(), S.Context));
}
-static void HandleNSConsumesSelfAttr(Decl *d, const AttributeList &attr,
- Sema &S) {
- if (!isa<ObjCMethodDecl>(d)) {
- S.Diag(d->getLocStart(), diag::warn_attribute_wrong_decl_type)
- << SourceRange(attr.getLoc()) << attr.getName() << ExpectedMethod;
+static void handleNSConsumesSelfAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << SourceRange(Attr.getLoc()) << Attr.getName() << ExpectedMethod;
return;
}
- d->addAttr(::new (S.Context) NSConsumesSelfAttr(attr.getLoc(), S.Context));
+ D->addAttr(::new (S.Context) NSConsumesSelfAttr(Attr.getLoc(), S.Context));
}
-static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &attr,
- Sema &S) {
+static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
QualType returnType;
- if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d))
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
returnType = MD->getResultType();
- else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(d))
+ else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
+ returnType = PD->getType();
+ else if (S.getLangOptions().ObjCAutoRefCount && hasDeclarator(D) &&
+ (Attr.getKind() == AttributeList::AT_ns_returns_retained))
+ return; // ignore: was handled as a type attribute
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
returnType = FD->getResultType();
else {
- S.Diag(d->getLocStart(), diag::warn_attribute_wrong_decl_type)
- << SourceRange(attr.getLoc()) << attr.getName()
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << SourceRange(Attr.getLoc()) << Attr.getName()
<< ExpectedFunctionOrMethod;
return;
}
bool typeOK;
bool cf;
- switch (attr.getKind()) {
+ switch (Attr.getKind()) {
default: llvm_unreachable("invalid ownership attribute"); return;
case AttributeList::AT_ns_returns_autoreleased:
case AttributeList::AT_ns_returns_retained:
@@ -2702,39 +2734,95 @@ static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &attr,
}
if (!typeOK) {
- S.Diag(d->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
- << SourceRange(attr.getLoc())
- << attr.getName() << isa<ObjCMethodDecl>(d) << cf;
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
+ << SourceRange(Attr.getLoc())
+ << Attr.getName() << isa<ObjCMethodDecl>(D) << cf;
return;
}
- switch (attr.getKind()) {
+ switch (Attr.getKind()) {
default:
assert(0 && "invalid ownership attribute");
return;
case AttributeList::AT_ns_returns_autoreleased:
- d->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(attr.getLoc(),
+ D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(Attr.getLoc(),
S.Context));
return;
case AttributeList::AT_cf_returns_not_retained:
- d->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(attr.getLoc(),
+ D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(Attr.getLoc(),
S.Context));
return;
case AttributeList::AT_ns_returns_not_retained:
- d->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(attr.getLoc(),
+ D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(Attr.getLoc(),
S.Context));
return;
case AttributeList::AT_cf_returns_retained:
- d->addAttr(::new (S.Context) CFReturnsRetainedAttr(attr.getLoc(),
+ D->addAttr(::new (S.Context) CFReturnsRetainedAttr(Attr.getLoc(),
S.Context));
return;
case AttributeList::AT_ns_returns_retained:
- d->addAttr(::new (S.Context) NSReturnsRetainedAttr(attr.getLoc(),
+ D->addAttr(::new (S.Context) NSReturnsRetainedAttr(Attr.getLoc(),
S.Context));
return;
};
}
+static void handleObjCOwnershipAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
+
+ SourceLocation L = Attr.getLoc();
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << SourceRange(L, L) << Attr.getName() << 12 /* variable */;
+}
+
+static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<VarDecl>(D) && !isa<FieldDecl>(D)) {
+ SourceLocation L = Attr.getLoc();
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << SourceRange(L, L) << Attr.getName() << 12 /* variable */;
+ return;
+ }
+
+ ValueDecl *vd = cast<ValueDecl>(D);
+ QualType type = vd->getType();
+
+ if (!type->isDependentType() &&
+ !type->isObjCLifetimeType()) {
+ S.Diag(Attr.getLoc(), diag::err_objc_precise_lifetime_bad_type)
+ << type;
+ return;
+ }
+
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+
+ // If we have no lifetime yet, check the lifetime we're presumably
+ // going to infer.
+ if (lifetime == Qualifiers::OCL_None && !type->isDependentType())
+ lifetime = type->getObjCARCImplicitLifetime();
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ assert(type->isDependentType() &&
+ "didn't infer lifetime for non-dependent type?");
+ break;
+
+ case Qualifiers::OCL_Weak: // meaningful
+ case Qualifiers::OCL_Strong: // meaningful
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ S.Diag(Attr.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
+ << (lifetime == Qualifiers::OCL_Autoreleasing);
+ break;
+ }
+
+ D->addAttr(::new (S.Context)
+ ObjCPreciseLifetimeAttr(Attr.getLoc(), S.Context));
+}
+
static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
return Attr.getKind() == AttributeList::AT_dllimport ||
Attr.getKind() == AttributeList::AT_dllexport ||
@@ -2745,13 +2833,12 @@ static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
// Microsoft specific attribute handlers.
//===----------------------------------------------------------------------===//
-static void HandleUuidAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (S.LangOpts.Microsoft || S.LangOpts.Borland) {
// check the attribute arguments.
- if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ if (!checkAttributeNumArgs(S, Attr, 1))
return;
- }
+
Expr *Arg = Attr.getArg(0);
StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
if (Str == 0 || Str->isWide()) {
@@ -2794,7 +2881,7 @@ static void HandleUuidAttr(Decl *d, const AttributeList &Attr, Sema &S) {
I++;
}
- d->addAttr(::new (S.Context) UuidAttr(Attr.getLoc(), S.Context,
+ D->addAttr(::new (S.Context) UuidAttr(Attr.getLoc(), S.Context,
Str->getString()));
} else
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "uuid";
@@ -2804,24 +2891,24 @@ static void HandleUuidAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
-static void ProcessNonInheritableDeclAttr(Scope *scope, Decl *D,
- const AttributeList &Attr, Sema &S) {
+static void ProcessNonInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
switch (Attr.getKind()) {
- case AttributeList::AT_device: HandleDeviceAttr (D, Attr, S); break;
- case AttributeList::AT_host: HandleHostAttr (D, Attr, S); break;
- case AttributeList::AT_overloadable:HandleOverloadableAttr(D, Attr, S); break;
+ case AttributeList::AT_device: handleDeviceAttr (S, D, Attr); break;
+ case AttributeList::AT_host: handleHostAttr (S, D, Attr); break;
+ case AttributeList::AT_overloadable:handleOverloadableAttr(S, D, Attr); break;
default:
break;
}
}
-static void ProcessInheritableDeclAttr(Scope *scope, Decl *D,
- const AttributeList &Attr, Sema &S) {
+static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
switch (Attr.getKind()) {
- case AttributeList::AT_IBAction: HandleIBAction(D, Attr, S); break;
- case AttributeList::AT_IBOutlet: HandleIBOutlet(D, Attr, S); break;
+ case AttributeList::AT_IBAction: handleIBAction(S, D, Attr); break;
+ case AttributeList::AT_IBOutlet: handleIBOutlet(S, D, Attr); break;
case AttributeList::AT_IBOutletCollection:
- HandleIBOutletCollection(D, Attr, S); break;
+ handleIBOutletCollection(S, D, Attr); break;
case AttributeList::AT_address_space:
case AttributeList::AT_opencl_image_access:
case AttributeList::AT_objc_gc:
@@ -2837,100 +2924,108 @@ static void ProcessInheritableDeclAttr(Scope *scope, Decl *D,
// Ignore, this is a non-inheritable attribute, handled
// by ProcessNonInheritableDeclAttr.
break;
- case AttributeList::AT_alias: HandleAliasAttr (D, Attr, S); break;
- case AttributeList::AT_aligned: HandleAlignedAttr (D, Attr, S); break;
+ case AttributeList::AT_alias: handleAliasAttr (S, D, Attr); break;
+ case AttributeList::AT_aligned: handleAlignedAttr (S, D, Attr); break;
case AttributeList::AT_always_inline:
- HandleAlwaysInlineAttr (D, Attr, S); break;
+ handleAlwaysInlineAttr (S, D, Attr); break;
case AttributeList::AT_analyzer_noreturn:
- HandleAnalyzerNoReturnAttr (D, Attr, S); break;
- case AttributeList::AT_annotate: HandleAnnotateAttr (D, Attr, S); break;
- case AttributeList::AT_availability:HandleAvailabilityAttr(D, Attr, S); break;
+ handleAnalyzerNoReturnAttr (S, D, Attr); break;
+ case AttributeList::AT_annotate: handleAnnotateAttr (S, D, Attr); break;
+ case AttributeList::AT_availability:handleAvailabilityAttr(S, D, Attr); break;
case AttributeList::AT_carries_dependency:
- HandleDependencyAttr (D, Attr, S); break;
- case AttributeList::AT_common: HandleCommonAttr (D, Attr, S); break;
- case AttributeList::AT_constant: HandleConstantAttr (D, Attr, S); break;
- case AttributeList::AT_constructor: HandleConstructorAttr (D, Attr, S); break;
- case AttributeList::AT_deprecated: HandleDeprecatedAttr (D, Attr, S); break;
- case AttributeList::AT_destructor: HandleDestructorAttr (D, Attr, S); break;
+ handleDependencyAttr (S, D, Attr); break;
+ case AttributeList::AT_common: handleCommonAttr (S, D, Attr); break;
+ case AttributeList::AT_constant: handleConstantAttr (S, D, Attr); break;
+ case AttributeList::AT_constructor: handleConstructorAttr (S, D, Attr); break;
+ case AttributeList::AT_deprecated: handleDeprecatedAttr (S, D, Attr); break;
+ case AttributeList::AT_destructor: handleDestructorAttr (S, D, Attr); break;
case AttributeList::AT_ext_vector_type:
- HandleExtVectorTypeAttr(scope, D, Attr, S);
+ handleExtVectorTypeAttr(S, scope, D, Attr);
break;
- case AttributeList::AT_format: HandleFormatAttr (D, Attr, S); break;
- case AttributeList::AT_format_arg: HandleFormatArgAttr (D, Attr, S); break;
- case AttributeList::AT_global: HandleGlobalAttr (D, Attr, S); break;
- case AttributeList::AT_gnu_inline: HandleGNUInlineAttr (D, Attr, S); break;
+ case AttributeList::AT_format: handleFormatAttr (S, D, Attr); break;
+ case AttributeList::AT_format_arg: handleFormatArgAttr (S, D, Attr); break;
+ case AttributeList::AT_global: handleGlobalAttr (S, D, Attr); break;
+ case AttributeList::AT_gnu_inline: handleGNUInlineAttr (S, D, Attr); break;
case AttributeList::AT_launch_bounds:
- HandleLaunchBoundsAttr(D, Attr, S);
+ handleLaunchBoundsAttr(S, D, Attr);
break;
- case AttributeList::AT_mode: HandleModeAttr (D, Attr, S); break;
- case AttributeList::AT_malloc: HandleMallocAttr (D, Attr, S); break;
- case AttributeList::AT_may_alias: HandleMayAliasAttr (D, Attr, S); break;
- case AttributeList::AT_nocommon: HandleNoCommonAttr (D, Attr, S); break;
- case AttributeList::AT_nonnull: HandleNonNullAttr (D, Attr, S); break;
+ case AttributeList::AT_mode: handleModeAttr (S, D, Attr); break;
+ case AttributeList::AT_malloc: handleMallocAttr (S, D, Attr); break;
+ case AttributeList::AT_may_alias: handleMayAliasAttr (S, D, Attr); break;
+ case AttributeList::AT_nocommon: handleNoCommonAttr (S, D, Attr); break;
+ case AttributeList::AT_nonnull: handleNonNullAttr (S, D, Attr); break;
case AttributeList::AT_ownership_returns:
case AttributeList::AT_ownership_takes:
case AttributeList::AT_ownership_holds:
- HandleOwnershipAttr (D, Attr, S); break;
- case AttributeList::AT_naked: HandleNakedAttr (D, Attr, S); break;
- case AttributeList::AT_noreturn: HandleNoReturnAttr (D, Attr, S); break;
- case AttributeList::AT_nothrow: HandleNothrowAttr (D, Attr, S); break;
- case AttributeList::AT_shared: HandleSharedAttr (D, Attr, S); break;
- case AttributeList::AT_vecreturn: HandleVecReturnAttr (D, Attr, S); break;
+ handleOwnershipAttr (S, D, Attr); break;
+ case AttributeList::AT_naked: handleNakedAttr (S, D, Attr); break;
+ case AttributeList::AT_noreturn: handleNoReturnAttr (S, D, Attr); break;
+ case AttributeList::AT_nothrow: handleNothrowAttr (S, D, Attr); break;
+ case AttributeList::AT_shared: handleSharedAttr (S, D, Attr); break;
+ case AttributeList::AT_vecreturn: handleVecReturnAttr (S, D, Attr); break;
+
+ case AttributeList::AT_objc_ownership:
+ handleObjCOwnershipAttr(S, D, Attr); break;
+ case AttributeList::AT_objc_precise_lifetime:
+ handleObjCPreciseLifetimeAttr(S, D, Attr); break;
// Checker-specific.
case AttributeList::AT_cf_consumed:
- case AttributeList::AT_ns_consumed: HandleNSConsumedAttr (D, Attr, S); break;
+ case AttributeList::AT_ns_consumed: handleNSConsumedAttr (S, D, Attr); break;
case AttributeList::AT_ns_consumes_self:
- HandleNSConsumesSelfAttr(D, Attr, S); break;
+ handleNSConsumesSelfAttr(S, D, Attr); break;
case AttributeList::AT_ns_returns_autoreleased:
case AttributeList::AT_ns_returns_not_retained:
case AttributeList::AT_cf_returns_not_retained:
case AttributeList::AT_ns_returns_retained:
case AttributeList::AT_cf_returns_retained:
- HandleNSReturnsRetainedAttr(D, Attr, S); break;
+ handleNSReturnsRetainedAttr(S, D, Attr); break;
case AttributeList::AT_reqd_wg_size:
- HandleReqdWorkGroupSize(D, Attr, S); break;
+ handleReqdWorkGroupSize(S, D, Attr); break;
case AttributeList::AT_init_priority:
- HandleInitPriorityAttr(D, Attr, S); break;
+ handleInitPriorityAttr(S, D, Attr); break;
- case AttributeList::AT_packed: HandlePackedAttr (D, Attr, S); break;
- case AttributeList::AT_MsStruct: HandleMsStructAttr (D, Attr, S); break;
- case AttributeList::AT_section: HandleSectionAttr (D, Attr, S); break;
- case AttributeList::AT_unavailable: HandleUnavailableAttr (D, Attr, S); break;
- case AttributeList::AT_unused: HandleUnusedAttr (D, Attr, S); break;
- case AttributeList::AT_used: HandleUsedAttr (D, Attr, S); break;
- case AttributeList::AT_visibility: HandleVisibilityAttr (D, Attr, S); break;
- case AttributeList::AT_warn_unused_result: HandleWarnUnusedResult(D,Attr,S);
+ case AttributeList::AT_packed: handlePackedAttr (S, D, Attr); break;
+ case AttributeList::AT_MsStruct: handleMsStructAttr (S, D, Attr); break;
+ case AttributeList::AT_section: handleSectionAttr (S, D, Attr); break;
+ case AttributeList::AT_unavailable: handleUnavailableAttr (S, D, Attr); break;
+ case AttributeList::AT_arc_weakref_unavailable:
+ handleArcWeakrefUnavailableAttr (S, D, Attr);
break;
- case AttributeList::AT_weak: HandleWeakAttr (D, Attr, S); break;
- case AttributeList::AT_weakref: HandleWeakRefAttr (D, Attr, S); break;
- case AttributeList::AT_weak_import: HandleWeakImportAttr (D, Attr, S); break;
+ case AttributeList::AT_unused: handleUnusedAttr (S, D, Attr); break;
+ case AttributeList::AT_used: handleUsedAttr (S, D, Attr); break;
+ case AttributeList::AT_visibility: handleVisibilityAttr (S, D, Attr); break;
+ case AttributeList::AT_warn_unused_result: handleWarnUnusedResult(S, D, Attr);
+ break;
+ case AttributeList::AT_weak: handleWeakAttr (S, D, Attr); break;
+ case AttributeList::AT_weakref: handleWeakRefAttr (S, D, Attr); break;
+ case AttributeList::AT_weak_import: handleWeakImportAttr (S, D, Attr); break;
case AttributeList::AT_transparent_union:
- HandleTransparentUnionAttr(D, Attr, S);
+ handleTransparentUnionAttr(S, D, Attr);
break;
case AttributeList::AT_objc_exception:
- HandleObjCExceptionAttr(D, Attr, S);
+ handleObjCExceptionAttr(S, D, Attr);
break;
case AttributeList::AT_objc_method_family:
- HandleObjCMethodFamilyAttr(D, Attr, S);
+ handleObjCMethodFamilyAttr(S, D, Attr);
break;
- case AttributeList::AT_nsobject: HandleObjCNSObject (D, Attr, S); break;
- case AttributeList::AT_blocks: HandleBlocksAttr (D, Attr, S); break;
- case AttributeList::AT_sentinel: HandleSentinelAttr (D, Attr, S); break;
- case AttributeList::AT_const: HandleConstAttr (D, Attr, S); break;
- case AttributeList::AT_pure: HandlePureAttr (D, Attr, S); break;
- case AttributeList::AT_cleanup: HandleCleanupAttr (D, Attr, S); break;
- case AttributeList::AT_nodebug: HandleNoDebugAttr (D, Attr, S); break;
- case AttributeList::AT_noinline: HandleNoInlineAttr (D, Attr, S); break;
- case AttributeList::AT_regparm: HandleRegparmAttr (D, Attr, S); break;
+ case AttributeList::AT_nsobject: handleObjCNSObject (S, D, Attr); break;
+ case AttributeList::AT_blocks: handleBlocksAttr (S, D, Attr); break;
+ case AttributeList::AT_sentinel: handleSentinelAttr (S, D, Attr); break;
+ case AttributeList::AT_const: handleConstAttr (S, D, Attr); break;
+ case AttributeList::AT_pure: handlePureAttr (S, D, Attr); break;
+ case AttributeList::AT_cleanup: handleCleanupAttr (S, D, Attr); break;
+ case AttributeList::AT_nodebug: handleNoDebugAttr (S, D, Attr); break;
+ case AttributeList::AT_noinline: handleNoInlineAttr (S, D, Attr); break;
+ case AttributeList::AT_regparm: handleRegparmAttr (S, D, Attr); break;
case AttributeList::IgnoredAttribute:
// Just ignore
break;
case AttributeList::AT_no_instrument_function: // Interacts with -pg.
- HandleNoInstrumentFunctionAttr(D, Attr, S);
+ handleNoInstrumentFunctionAttr(S, D, Attr);
break;
case AttributeList::AT_stdcall:
case AttributeList::AT_cdecl:
@@ -2938,13 +3033,13 @@ static void ProcessInheritableDeclAttr(Scope *scope, Decl *D,
case AttributeList::AT_thiscall:
case AttributeList::AT_pascal:
case AttributeList::AT_pcs:
- HandleCallConvAttr(D, Attr, S);
+ handleCallConvAttr(S, D, Attr);
break;
case AttributeList::AT_opencl_kernel_function:
- HandleOpenCLKernelAttr(D, Attr, S);
+ handleOpenCLKernelAttr(S, D, Attr);
break;
case AttributeList::AT_uuid:
- HandleUuidAttr(D, Attr, S);
+ handleUuidAttr(S, D, Attr);
break;
default:
// Ask target about the attribute.
@@ -2960,8 +3055,8 @@ static void ProcessInheritableDeclAttr(Scope *scope, Decl *D,
/// the attribute applies to decls. If the attribute is a type attribute, just
/// silently ignore it if a GNU attribute. FIXME: Applying a C++0x attribute to
/// the wrong thing is illegal (C++0x [dcl.attr.grammar]/4).
-static void ProcessDeclAttribute(Scope *scope, Decl *D,
- const AttributeList &Attr, Sema &S,
+static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr,
bool NonInheritable, bool Inheritable) {
if (Attr.isInvalid())
return;
@@ -2971,10 +3066,10 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
return;
if (NonInheritable)
- ProcessNonInheritableDeclAttr(scope, D, Attr, S);
+ ProcessNonInheritableDeclAttr(S, scope, D, Attr);
if (Inheritable)
- ProcessInheritableDeclAttr(scope, D, Attr, S);
+ ProcessInheritableDeclAttr(S, scope, D, Attr);
}
/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
@@ -2983,7 +3078,7 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
const AttributeList *AttrList,
bool NonInheritable, bool Inheritable) {
for (const AttributeList* l = AttrList; l; l = l->getNext()) {
- ProcessDeclAttribute(S, D, *l, *this, NonInheritable, Inheritable);
+ ProcessDeclAttribute(*this, S, D, *l, NonInheritable, Inheritable);
}
// GCC accepts
@@ -3085,6 +3180,32 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
}
+/// Is the given declaration allowed to use a forbidden type?
+static bool isForbiddenTypeAllowed(Sema &S, Decl *decl) {
+ // Private ivars are always okay. Unfortunately, people don't
+ // always properly make their ivars private, even in system headers.
+ // Plus we need to make fields okay, too.
+ if (!isa<FieldDecl>(decl) && !isa<ObjCPropertyDecl>(decl))
+ return false;
+
+ // Require it to be declared in a system header.
+ return S.Context.getSourceManager().isInSystemHeader(decl->getLocation());
+}
+
+/// Handle a delayed forbidden-type diagnostic.
+static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
+ Decl *decl) {
+ if (decl && isForbiddenTypeAllowed(S, decl)) {
+ decl->addAttr(new (S.Context) UnavailableAttr(diag.Loc, S.Context,
+ "this system declaration uses an unsupported type"));
+ return;
+ }
+
+ S.Diag(diag.Loc, diag.getForbiddenTypeDiagnostic())
+ << diag.getForbiddenTypeOperand() << diag.getForbiddenTypeArgument();
+ diag.Triggered = true;
+}
+
// This duplicates a vector push_back but hides the need to know the
// size of the type.
void Sema::DelayedDiagnostics::add(const DelayedDiagnostic &diag) {
@@ -3126,7 +3247,7 @@ void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
// We only want to actually emit delayed diagnostics when we
// successfully parsed a decl.
- if (decl) {
+ if (decl && !decl->isInvalidDecl()) {
// We emit all the active diagnostics, not just those starting
// from the saved state. The idea is this: we get one push for a
// decl spec and another for each declarator; in a decl group like:
@@ -3147,6 +3268,10 @@ void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
case DelayedDiagnostic::Access:
S.HandleDelayedAccessCheck(diag, decl);
break;
+
+ case DelayedDiagnostic::ForbiddenType:
+ handleDelayedForbiddenType(S, diag, decl);
+ break;
}
}
}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index ce99efbd0bd2..d793daf9d826 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -185,9 +185,9 @@ void Sema::ImplicitExceptionSpecification::CalledExpr(Expr *E) {
// FIXME:
//
// C++0x [except.spec]p14:
- // [An] implicit exception-specification specifies the type-id T if and
- // only if T is allowed by the exception-specification of a function directly
- // invoked by f’s implicit definition; f shall allow all exceptions if any
+ // [An] implicit exception-specification specifies the type-id T if and
+ // only if T is allowed by the exception-specification of a function directly
+ // invoked by f's implicit definition; f shall allow all exceptions if any
// function it directly invokes allows all exceptions, and f shall allow no
// exceptions if every function it directly invokes allows no exceptions.
//
@@ -762,13 +762,6 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
QualType NewBaseType
= Context.getCanonicalType(Bases[idx]->getType());
NewBaseType = NewBaseType.getLocalUnqualifiedType();
- if (!Class->hasObjectMember()) {
- if (const RecordType *FDTTy =
- NewBaseType.getTypePtr()->getAs<RecordType>())
- if (FDTTy->getDecl()->hasObjectMember())
- Class->setHasObjectMember(true);
- }
-
if (KnownBaseTypes[NewBaseType]) {
// C++ [class.mi]p3:
// A class shall not be specified as a direct base class of a
@@ -1086,14 +1079,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
assert(!DS.isFriendSpecified());
assert(!Init || !HasDeferredInit);
- bool isFunc = false;
- if (D.isFunctionDeclarator())
- isFunc = true;
- else if (D.getNumTypeObjects() == 0 &&
- D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_typename) {
- QualType TDType = GetTypeFromParser(DS.getRepAsType());
- isFunc = TDType->isFunctionType();
- }
+ bool isFunc = D.isDeclarationOfFunction();
// C++ 9.2p6: A member shall not be declared to have automatic storage
// duration (auto, register) or with the extern storage-class-specifier.
@@ -1440,25 +1426,27 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
}
// If no results were found, try to correct typos.
+ TypoCorrection Corr;
if (R.empty() && BaseType.isNull() &&
- CorrectTypo(R, S, &SS, ClassDecl, 0, CTC_NoKeywords) &&
- R.isSingleResult()) {
- if (FieldDecl *Member = R.getAsSingle<FieldDecl>()) {
+ (Corr = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ ClassDecl, false, CTC_NoKeywords))) {
+ std::string CorrectedStr(Corr.getAsString(getLangOptions()));
+ std::string CorrectedQuotedStr(Corr.getQuoted(getLangOptions()));
+ if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) {
if (Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl)) {
// We have found a non-static data member with a similar
// name to what was typed; complain and initialize that
// member.
Diag(R.getNameLoc(), diag::err_mem_init_not_member_or_class_suggest)
- << MemberOrBase << true << R.getLookupName()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- R.getLookupName().getAsString());
+ << MemberOrBase << true << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
Diag(Member->getLocation(), diag::note_previous_decl)
- << Member->getDeclName();
+ << CorrectedQuotedStr;
return BuildMemberInitializer(Member, (Expr**)Args, NumArgs, IdLoc,
LParenLoc, RParenLoc);
}
- } else if (TypeDecl *Type = R.getAsSingle<TypeDecl>()) {
+ } else if (TypeDecl *Type = Corr.getCorrectionDeclAs<TypeDecl>()) {
const CXXBaseSpecifier *DirectBaseSpec;
const CXXBaseSpecifier *VirtualBaseSpec;
if (FindBaseInitializer(*this, ClassDecl,
@@ -1468,9 +1456,8 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
// similar name to what was typed; complain and initialize
// that base class.
Diag(R.getNameLoc(), diag::err_mem_init_not_member_or_class_suggest)
- << MemberOrBase << false << R.getLookupName()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- R.getLookupName().getAsString());
+ << MemberOrBase << false << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
const CXXBaseSpecifier *BaseSpec = DirectBaseSpec? DirectBaseSpec
: VirtualBaseSpec;
@@ -1617,14 +1604,10 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr **Args,
// Can't check initialization for a member of dependent type or when
// any of the arguments are type-dependent expressions.
Init = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
- RParenLoc);
-
- // Erase any temporaries within this evaluation context; we're not
- // going to track them in the AST, since we'll be rebuilding the
- // ASTs during template instantiation.
- ExprTemporaries.erase(
- ExprTemporaries.begin() + ExprEvalContexts.back().NumTemporaries,
- ExprTemporaries.end());
+ RParenLoc,
+ Member->getType().getNonReferenceType());
+
+ DiscardCleanupsInEvaluationContext();
} else {
// Initialize the member.
InitializedEntity MemberEntity =
@@ -1658,8 +1641,9 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr **Args,
// initializer. However, deconstructing the ASTs is a dicey process,
// and this approach is far more likely to get the corner cases right.
if (CurContext->isDependentContext())
- Init = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
- RParenLoc);
+ Init = new (Context) ParenListExpr(
+ Context, LParenLoc, Args, NumArgs, RParenLoc,
+ Member->getType().getNonReferenceType());
else
Init = MemberInit.get();
}
@@ -1715,22 +1699,7 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo,
if (DelegationInit.isInvalid())
return true;
- // If we are in a dependent context, template instantiation will
- // perform this type-checking again. Just save the arguments that we
- // received in a ParenListExpr.
- // FIXME: This isn't quite ideal, since our ASTs don't capture all
- // of the information that we have about the base
- // initializer. However, deconstructing the ASTs is a dicey process,
- // and this approach is far more likely to get the corner cases right.
- if (CurContext->isDependentContext()) {
- ExprResult Init
- = Owned(new (Context) ParenListExpr(Context, LParenLoc, Args,
- NumArgs, RParenLoc));
- return new (Context) CXXCtorInitializer(Context, Loc, LParenLoc,
- Constructor, Init.takeAs<Expr>(),
- RParenLoc);
- }
-
+ assert(!CurContext->isDependentContext());
return new (Context) CXXCtorInitializer(Context, Loc, LParenLoc, Constructor,
DelegationInit.takeAs<Expr>(),
RParenLoc);
@@ -1815,14 +1784,9 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// any of the arguments are type-dependent expressions.
ExprResult BaseInit
= Owned(new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
- RParenLoc));
+ RParenLoc, BaseType));
- // Erase any temporaries within this evaluation context; we're not
- // going to track them in the AST, since we'll be rebuilding the
- // ASTs during template instantiation.
- ExprTemporaries.erase(
- ExprTemporaries.begin() + ExprEvalContexts.back().NumTemporaries,
- ExprTemporaries.end());
+ DiscardCleanupsInEvaluationContext();
return new (Context) CXXCtorInitializer(Context, BaseTInfo,
/*IsVirtual=*/false,
@@ -1878,7 +1842,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
if (CurContext->isDependentContext()) {
ExprResult Init
= Owned(new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
- RParenLoc));
+ RParenLoc, BaseType));
return new (Context) CXXCtorInitializer(Context, BaseTInfo,
BaseSpec->isVirtual(),
LParenLoc,
@@ -1989,6 +1953,11 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (ImplicitInitKind == IIK_Copy) {
ParmVarDecl *Param = Constructor->getParamDecl(0);
QualType ParamType = Param->getType().getNonReferenceType();
+
+ // Suppress copying zero-width bitfields.
+ if (const Expr *Width = Field->getBitWidth())
+ if (Width->EvaluateAsInt(SemaRef.Context) == 0)
+ return false;
Expr *MemberExprBase =
DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), Param,
@@ -2134,6 +2103,20 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
}
}
+ if (SemaRef.getLangOptions().ObjCAutoRefCount &&
+ FieldBaseElementType->isObjCRetainableType() &&
+ FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_None &&
+ FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // Instant objects:
+ // Default-initialize Objective-C pointers to NULL.
+ CXXMemberInit
+ = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field,
+ Loc, Loc,
+ new (SemaRef.Context) ImplicitValueInitExpr(Field->getType()),
+ Loc);
+ return false;
+ }
+
// Nothing to initialize.
CXXMemberInit = 0;
return false;
@@ -2207,11 +2190,8 @@ static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
}
}
- // Fallthrough and construct a default initializer for the union as
- // a whole, which can call its default constructor if such a thing exists
- // (C++0x perhaps). FIXME: It's not clear that this is the correct
- // behavior going forward with C++0x, when anonymous unions there are
- // finalized, we should revisit this.
+ // FIXME: C++0x unrestricted unions might call a default constructor here.
+ return false;
} else {
// For structs, we simply descend through to initialize all members where
// necessary.
@@ -2259,11 +2239,10 @@ Sema::SetDelegatingInitializer(CXXConstructorDecl *Constructor,
return false;
}
-bool
-Sema::SetCtorInitializers(CXXConstructorDecl *Constructor,
- CXXCtorInitializer **Initializers,
- unsigned NumInitializers,
- bool AnyErrors) {
+bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer **Initializers,
+ unsigned NumInitializers,
+ bool AnyErrors) {
if (Constructor->getDeclContext()->isDependentContext()) {
// Just store the initializers as written, they will be checked during
// instantiation.
@@ -3405,7 +3384,7 @@ void Sema::CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *DD) {
bool Sema::ShouldDeleteDefaultConstructor(CXXConstructorDecl *CD) {
CXXRecordDecl *RD = CD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x)
+ if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
return false;
SourceLocation Loc = CD->getLocation();
@@ -3586,7 +3565,7 @@ bool Sema::ShouldDeleteDefaultConstructor(CXXConstructorDecl *CD) {
bool Sema::ShouldDeleteCopyConstructor(CXXConstructorDecl *CD) {
CXXRecordDecl *RD = CD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x)
+ if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
return false;
SourceLocation Loc = CD->getLocation();
@@ -3636,7 +3615,7 @@ bool Sema::ShouldDeleteCopyConstructor(CXXConstructorDecl *CD) {
// resolution, as applied to B's [copy] constructor, results in an
// ambiguity or a function that is deleted or inaccessible from the
// defaulted constructor
- CXXConstructorDecl *BaseCtor = LookupCopyConstructor(BaseDecl, ArgQuals);
+ CXXConstructorDecl *BaseCtor = LookupCopyingConstructor(BaseDecl, ArgQuals);
if (!BaseCtor || BaseCtor->isDeleted())
return true;
if (CheckConstructorAccess(Loc, BaseCtor, BaseCtor->getAccess(), PDiag()) !=
@@ -3664,7 +3643,7 @@ bool Sema::ShouldDeleteCopyConstructor(CXXConstructorDecl *CD) {
// resolution, as applied to B's [copy] constructor, results in an
// ambiguity or a function that is deleted or inaccessible from the
// defaulted constructor
- CXXConstructorDecl *BaseCtor = LookupCopyConstructor(BaseDecl, ArgQuals);
+ CXXConstructorDecl *BaseCtor = LookupCopyingConstructor(BaseDecl, ArgQuals);
if (!BaseCtor || BaseCtor->isDeleted())
return true;
if (CheckConstructorAccess(Loc, BaseCtor, BaseCtor->getAccess(), PDiag()) !=
@@ -3726,8 +3705,8 @@ bool Sema::ShouldDeleteCopyConstructor(CXXConstructorDecl *CD) {
// cannot be [copied] because overload resolution, as applied to B's
// [copy] constructor, results in an ambiguity or a function that is
// deleted or inaccessible from the defaulted constructor
- CXXConstructorDecl *FieldCtor = LookupCopyConstructor(FieldRecord,
- ArgQuals);
+ CXXConstructorDecl *FieldCtor = LookupCopyingConstructor(FieldRecord,
+ ArgQuals);
if (!FieldCtor || FieldCtor->isDeleted())
return true;
if (CheckConstructorAccess(Loc, FieldCtor, FieldCtor->getAccess(),
@@ -3742,7 +3721,7 @@ bool Sema::ShouldDeleteCopyConstructor(CXXConstructorDecl *CD) {
bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
CXXRecordDecl *RD = MD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x)
+ if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
return false;
SourceLocation Loc = MD->getLocation();
@@ -3752,19 +3731,15 @@ bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
bool Union = RD->isUnion();
- bool ConstArg =
- MD->getParamDecl(0)->getType()->getPointeeType().isConstQualified();
+ unsigned ArgQuals =
+ MD->getParamDecl(0)->getType()->getPointeeType().isConstQualified() ?
+ Qualifiers::Const : 0;
// We do this because we should never actually use an anonymous
// union's constructor.
if (Union && RD->isAnonymousStructOrUnion())
return false;
- DeclarationName OperatorName =
- Context.DeclarationNames.getCXXOperatorName(OO_Equal);
- LookupResult R(*this, OperatorName, Loc, LookupOrdinaryName);
- R.suppressDiagnostics();
-
// FIXME: We should put some diagnostic logic right into this function.
// C++0x [class.copy]/11
@@ -3786,37 +3761,11 @@ bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
// resolution, as applied to B's [copy] assignment operator, results in
// an ambiguity or a function that is deleted or inaccessible from the
// assignment operator
-
- LookupQualifiedName(R, BaseDecl, false);
-
- // Filter out any result that isn't a copy-assignment operator.
- LookupResult::Filter F = R.makeFilter();
- while (F.hasNext()) {
- NamedDecl *D = F.next();
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isCopyAssignmentOperator())
- continue;
-
- F.erase();
- }
- F.done();
-
- // Build a fake argument expression
- QualType ArgType = BaseType;
- QualType ThisType = BaseType;
- if (ConstArg)
- ArgType.addConst();
- Expr *Args[] = { new (Context) OpaqueValueExpr(Loc, ThisType, VK_LValue)
- , new (Context) OpaqueValueExpr(Loc, ArgType, VK_LValue)
- };
-
- OverloadCandidateSet OCS((Loc));
- OverloadCandidateSet::iterator Best;
-
- AddFunctionCandidates(R.asUnresolvedSet(), Args, 2, OCS);
-
- if (OCS.BestViableFunction(*this, Loc, Best, false) !=
- OR_Success)
+ CXXMethodDecl *CopyOper = LookupCopyingAssignment(BaseDecl, ArgQuals, false,
+ 0);
+ if (!CopyOper || CopyOper->isDeleted())
+ return true;
+ if (CheckDirectMemberAccess(Loc, CopyOper, PDiag()) != AR_accessible)
return true;
}
@@ -3831,37 +3780,11 @@ bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
// resolution, as applied to B's [copy] assignment operator, results in
// an ambiguity or a function that is deleted or inaccessible from the
// assignment operator
-
- LookupQualifiedName(R, BaseDecl, false);
-
- // Filter out any result that isn't a copy-assignment operator.
- LookupResult::Filter F = R.makeFilter();
- while (F.hasNext()) {
- NamedDecl *D = F.next();
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isCopyAssignmentOperator())
- continue;
-
- F.erase();
- }
- F.done();
-
- // Build a fake argument expression
- QualType ArgType = BaseType;
- QualType ThisType = BaseType;
- if (ConstArg)
- ArgType.addConst();
- Expr *Args[] = { new (Context) OpaqueValueExpr(Loc, ThisType, VK_LValue)
- , new (Context) OpaqueValueExpr(Loc, ArgType, VK_LValue)
- };
-
- OverloadCandidateSet OCS((Loc));
- OverloadCandidateSet::iterator Best;
-
- AddFunctionCandidates(R.asUnresolvedSet(), Args, 2, OCS);
-
- if (OCS.BestViableFunction(*this, Loc, Best, false) !=
- OR_Success)
+ CXXMethodDecl *CopyOper = LookupCopyingAssignment(BaseDecl, ArgQuals, false,
+ 0);
+ if (!CopyOper || CopyOper->isDeleted())
+ return true;
+ if (CheckDirectMemberAccess(Loc, CopyOper, PDiag()) != AR_accessible)
return true;
}
@@ -3908,37 +3831,12 @@ bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
return true;
}
- LookupQualifiedName(R, FieldRecord, false);
-
- // Filter out any result that isn't a copy-assignment operator.
- LookupResult::Filter F = R.makeFilter();
- while (F.hasNext()) {
- NamedDecl *D = F.next();
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isCopyAssignmentOperator())
- continue;
-
- F.erase();
- }
- F.done();
-
- // Build a fake argument expression
- QualType ArgType = FieldType;
- QualType ThisType = FieldType;
- if (ConstArg)
- ArgType.addConst();
- Expr *Args[] = { new (Context) OpaqueValueExpr(Loc, ThisType, VK_LValue)
- , new (Context) OpaqueValueExpr(Loc, ArgType, VK_LValue)
- };
-
- OverloadCandidateSet OCS((Loc));
- OverloadCandidateSet::iterator Best;
-
- AddFunctionCandidates(R.asUnresolvedSet(), Args, 2, OCS);
-
- if (OCS.BestViableFunction(*this, Loc, Best, false) !=
- OR_Success)
- return true;
+ CXXMethodDecl *CopyOper = LookupCopyingAssignment(FieldRecord, ArgQuals,
+ false, 0);
+ if (!CopyOper || CopyOper->isDeleted())
+ return false;
+ if (CheckDirectMemberAccess(Loc, CopyOper, PDiag()) != AR_accessible)
+ return false;
}
}
@@ -3948,7 +3846,7 @@ bool Sema::ShouldDeleteCopyAssignmentOperator(CXXMethodDecl *MD) {
bool Sema::ShouldDeleteDestructor(CXXDestructorDecl *DD) {
CXXRecordDecl *RD = DD->getParent();
assert(!RD->isDependentType() && "do deletion after instantiation");
- if (!LangOpts.CPlusPlus0x)
+ if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
return false;
SourceLocation Loc = DD->getLocation();
@@ -4806,6 +4704,13 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
// Make our StdNamespace cache point at the first real definition of the
// "std" namespace.
StdNamespace = Namespc;
+
+ // Add this instance of "std" to the set of known namespaces
+ KnownNamespaces[Namespc] = false;
+ } else if (!Namespc->isInline()) {
+ // Since this is an "original" namespace, add it to the known set of
+ // namespaces if it is not an inline namespace.
+ KnownNamespaces[Namespc] = false;
}
PushOnScopeChains(Namespc, DeclRegionScope);
@@ -4941,6 +4846,39 @@ static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
}
}
+static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
+ R.clear();
+ if (TypoCorrection Corrected = S.CorrectTypo(R.getLookupNameInfo(),
+ R.getLookupKind(), Sc, &SS, NULL,
+ false, S.CTC_NoKeywords, NULL)) {
+ if (Corrected.getCorrectionDeclAs<NamespaceDecl>() ||
+ Corrected.getCorrectionDeclAs<NamespaceAliasDecl>()) {
+ std::string CorrectedStr(Corrected.getAsString(S.getLangOptions()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(S.getLangOptions()));
+ if (DeclContext *DC = S.computeDeclContext(SS, false))
+ S.Diag(IdentLoc, diag::err_using_directive_member_suggest)
+ << Ident << DC << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+ else
+ S.Diag(IdentLoc, diag::err_using_directive_suggest)
+ << Ident << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+
+ S.Diag(Corrected.getCorrectionDecl()->getLocation(),
+ diag::note_namespace_defined_here) << CorrectedQuotedStr;
+
+ Ident = Corrected.getCorrectionAsIdentifierInfo();
+ R.addDecl(Corrected.getCorrectionDecl());
+ return true;
+ }
+ R.setLookupName(Ident);
+ }
+ return false;
+}
+
Decl *Sema::ActOnUsingDirective(Scope *S,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
@@ -4969,6 +4907,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
return 0;
if (R.empty()) {
+ R.clear();
// Allow "using namespace std;" or "using namespace ::std;" even if
// "std" hasn't been defined yet, for GCC compatibility.
if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) &&
@@ -4978,27 +4917,7 @@ Decl *Sema::ActOnUsingDirective(Scope *S,
R.resolveKind();
}
// Otherwise, attempt typo correction.
- else if (DeclarationName Corrected = CorrectTypo(R, S, &SS, 0, false,
- CTC_NoKeywords, 0)) {
- if (R.getAsSingle<NamespaceDecl>() ||
- R.getAsSingle<NamespaceAliasDecl>()) {
- if (DeclContext *DC = computeDeclContext(SS, false))
- Diag(IdentLoc, diag::err_using_directive_member_suggest)
- << NamespcName << DC << Corrected << SS.getRange()
- << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
- else
- Diag(IdentLoc, diag::err_using_directive_suggest)
- << NamespcName << Corrected
- << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
- Diag(R.getFoundDecl()->getLocation(), diag::note_namespace_defined_here)
- << Corrected;
-
- NamespcName = Corrected.getAsIdentifierInfo();
- } else {
- R.clear();
- R.setLookupName(NamespcName);
- }
- }
+ else TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
}
if (!R.empty()) {
@@ -5065,6 +4984,7 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
switch (Name.getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
case UnqualifiedId::IK_Identifier:
case UnqualifiedId::IK_OperatorFunctionId:
case UnqualifiedId::IK_LiteralOperatorId:
@@ -5916,30 +5836,7 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S,
return 0;
if (R.empty()) {
- if (DeclarationName Corrected = CorrectTypo(R, S, &SS, 0, false,
- CTC_NoKeywords, 0)) {
- if (R.getAsSingle<NamespaceDecl>() ||
- R.getAsSingle<NamespaceAliasDecl>()) {
- if (DeclContext *DC = computeDeclContext(SS, false))
- Diag(IdentLoc, diag::err_using_directive_member_suggest)
- << Ident << DC << Corrected << SS.getRange()
- << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
- else
- Diag(IdentLoc, diag::err_using_directive_suggest)
- << Ident << Corrected
- << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
-
- Diag(R.getFoundDecl()->getLocation(), diag::note_namespace_defined_here)
- << Corrected;
-
- Ident = Corrected.getAsIdentifierInfo();
- } else {
- R.clear();
- R.setLookupName(Ident);
- }
- }
-
- if (R.empty()) {
+ if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) {
Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange();
return 0;
}
@@ -5982,6 +5879,8 @@ Sema::ComputeDefaultedDefaultCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
// An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
ImplicitExceptionSpecification ExceptSpec(Context);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
// Direct base-class constructors.
for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
@@ -6355,7 +6254,9 @@ Sema::ComputeDefaultedDtorExceptionSpec(CXXRecordDecl *ClassDecl) {
// An implicitly declared special member function (Clause 12) shall have
// an exception-specification.
ImplicitExceptionSpecification ExceptSpec(Context);
-
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
// Direct base-class destructors.
for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
BEnd = ClassDecl->bases_end();
@@ -6687,61 +6588,12 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
Loc, Copy.take());
}
-/// \brief Determine whether the given class has a copy assignment operator
-/// that accepts a const-qualified argument.
-static bool hasConstCopyAssignment(Sema &S, const CXXRecordDecl *CClass) {
- CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(CClass);
-
- if (!Class->hasDeclaredCopyAssignment())
- S.DeclareImplicitCopyAssignment(Class);
-
- QualType ClassType = S.Context.getCanonicalType(S.Context.getTypeDeclType(Class));
- DeclarationName OpName
- = S.Context.DeclarationNames.getCXXOperatorName(OO_Equal);
-
- DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = Class->lookup(OpName); Op != OpEnd; ++Op) {
- // C++ [class.copy]p9:
- // A user-declared copy assignment operator is a non-static non-template
- // member function of class X with exactly one parameter of type X, X&,
- // const X&, volatile X& or const volatile X&.
- const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
- if (!Method)
- continue;
-
- if (Method->isStatic())
- continue;
- if (Method->getPrimaryTemplate())
- continue;
- const FunctionProtoType *FnType =
- Method->getType()->getAs<FunctionProtoType>();
- assert(FnType && "Overloaded operator has no prototype.");
- // Don't assert on this; an invalid decl might have been left in the AST.
- if (FnType->getNumArgs() != 1 || FnType->isVariadic())
- continue;
- bool AcceptsConst = true;
- QualType ArgType = FnType->getArgType(0);
- if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()){
- ArgType = Ref->getPointeeType();
- // Is it a non-const lvalue reference?
- if (!ArgType.isConstQualified())
- AcceptsConst = false;
- }
- if (!S.Context.hasSameUnqualifiedType(ArgType, ClassType))
- continue;
-
- // We have a single argument of type cv X or cv X&, i.e. we've found the
- // copy assignment operator. Return whether it accepts const arguments.
- return AcceptsConst;
- }
- assert(Class->isInvalidDecl() &&
- "No copy assignment operator declared in valid code.");
- return false;
-}
-
std::pair<Sema::ImplicitExceptionSpecification, bool>
Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
CXXRecordDecl *ClassDecl) {
+ if (ClassDecl->isInvalidDecl())
+ return std::make_pair(ImplicitExceptionSpecification(Context), false);
+
// C++ [class.copy]p10:
// If the class definition does not explicitly declare a copy
// assignment operator, one is declared implicitly.
@@ -6759,11 +6611,28 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
BaseEnd = ClassDecl->bases_end();
HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ // We'll handle this below
+ if (LangOpts.CPlusPlus0x && Base->isVirtual())
+ continue;
+
assert(!Base->getType()->isDependentType() &&
"Cannot generate implicit members for class with dependent bases.");
- const CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- HasConstCopyAssignment = hasConstCopyAssignment(*this, BaseClassDecl);
+ CXXRecordDecl *BaseClassDecl = Base->getType()->getAsCXXRecordDecl();
+ LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const, false, 0,
+ &HasConstCopyAssignment);
+ }
+
+ // In C++0x, the above citation has "or virtual added"
+ if (LangOpts.CPlusPlus0x) {
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ assert(!Base->getType()->isDependentType() &&
+ "Cannot generate implicit members for class with dependent bases.");
+ CXXRecordDecl *BaseClassDecl = Base->getType()->getAsCXXRecordDecl();
+ LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const, false, 0,
+ &HasConstCopyAssignment);
+ }
}
// -- for all the nonstatic data members of X that are of a class
@@ -6775,10 +6644,9 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
HasConstCopyAssignment && Field != FieldEnd;
++Field) {
QualType FieldType = Context.getBaseElementType((*Field)->getType());
- if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
- const CXXRecordDecl *FieldClassDecl
- = cast<CXXRecordDecl>(FieldClassType->getDecl());
- HasConstCopyAssignment = hasConstCopyAssignment(*this, FieldClassDecl);
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ LookupCopyingAssignment(FieldClassDecl, Qualifiers::Const, false, 0,
+ &HasConstCopyAssignment);
}
}
@@ -6790,36 +6658,48 @@ Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
// C++ [except.spec]p14:
// An implicitly declared special member function (Clause 12) shall have an
// exception-specification. [...]
+
+ // It is unspecified whether or not an implicit copy assignment operator
+ // attempts to deduplicate calls to assignment operators of virtual bases are
+ // made. As such, this exception specification is effectively unspecified.
+ // Based on a similar decision made for constness in C++0x, we're erring on
+ // the side of assuming such calls to be made regardless of whether they
+ // actually happen.
ImplicitExceptionSpecification ExceptSpec(Context);
+ unsigned ArgQuals = HasConstCopyAssignment ? Qualifiers::Const : 0;
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
BaseEnd = ClassDecl->bases_end();
Base != BaseEnd; ++Base) {
+ if (Base->isVirtual())
+ continue;
+
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
-
- if (!BaseClassDecl->hasDeclaredCopyAssignment())
- DeclareImplicitCopyAssignment(BaseClassDecl);
+ if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
+ ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
- if (CXXMethodDecl *CopyAssign
- = BaseClassDecl->getCopyAssignmentOperator(HasConstCopyAssignment))
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
+ ArgQuals, false, 0))
ExceptSpec.CalledDecl(CopyAssign);
}
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd;
++Field) {
QualType FieldType = Context.getBaseElementType((*Field)->getType());
- if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
- CXXRecordDecl *FieldClassDecl
- = cast<CXXRecordDecl>(FieldClassType->getDecl());
-
- if (!FieldClassDecl->hasDeclaredCopyAssignment())
- DeclareImplicitCopyAssignment(FieldClassDecl);
-
- if (CXXMethodDecl *CopyAssign
- = FieldClassDecl->getCopyAssignmentOperator(HasConstCopyAssignment))
- ExceptSpec.CalledDecl(CopyAssign);
- }
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXMethodDecl *CopyAssign =
+ LookupCopyingAssignment(FieldClassDecl, ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
}
return std::make_pair(ExceptSpec, HasConstCopyAssignment);
@@ -6875,7 +6755,13 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
PushOnScopeChains(CopyAssignment, S, false);
ClassDecl->addDecl(CopyAssignment);
- if (ShouldDeleteCopyAssignmentOperator(CopyAssignment))
+ // C++0x [class.copy]p18:
+ // ... If the class definition declares a move constructor or move
+ // assignment operator, the implicitly declared copy assignment operator is
+ // defined as deleted; ...
+ if (ClassDecl->hasUserDeclaredMoveConstructor() ||
+ ClassDecl->hasUserDeclaredMoveAssignment() ||
+ ShouldDeleteCopyAssignmentOperator(CopyAssignment))
CopyAssignment->setDeletedAsWritten();
AddOverriddenMethods(ClassDecl, CopyAssignment);
@@ -7014,6 +6900,11 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
Invalid = true;
continue;
}
+
+ // Suppress assigning zero-width bitfields.
+ if (const Expr *Width = Field->getBitWidth())
+ if (Width->EvaluateAsInt(Context) == 0)
+ continue;
QualType FieldType = Field->getType().getNonReferenceType();
if (FieldType->isIncompleteArrayType()) {
@@ -7041,10 +6932,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// explicit assignments, do so. This optimization only applies for arrays
// of scalars and arrays of class type with trivial copy-assignment
// operators.
- if (FieldType->isArrayType() &&
- (!BaseType->isRecordType() ||
- cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl())
- ->hasTrivialCopyAssignment())) {
+ if (FieldType->isArrayType() &&
+ BaseType.hasTrivialCopyAssignment(Context)) {
// Compute the size of the memory buffer to be copied.
QualType SizeType = Context.getSizeType();
llvm::APInt Size(Context.getTypeSize(SizeType),
@@ -7179,6 +7068,9 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
std::pair<Sema::ImplicitExceptionSpecification, bool>
Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
+ if (ClassDecl->isInvalidDecl())
+ return std::make_pair(ImplicitExceptionSpecification(Context), false);
+
// C++ [class.copy]p5:
// The implicitly-declared copy constructor for a class X will
// have the form
@@ -7202,8 +7094,8 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- LookupCopyConstructor(BaseClassDecl, Qualifiers::Const,
- &HasConstCopyConstructor);
+ LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const,
+ &HasConstCopyConstructor);
}
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
@@ -7212,8 +7104,8 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
++Base) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- LookupCopyConstructor(BaseClassDecl, Qualifiers::Const,
- &HasConstCopyConstructor);
+ LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const,
+ &HasConstCopyConstructor);
}
// -- for all the nonstatic data members of X that are of a
@@ -7226,8 +7118,8 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
++Field) {
QualType FieldType = Context.getBaseElementType((*Field)->getType());
if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
- LookupCopyConstructor(FieldClassDecl, Qualifiers::Const,
- &HasConstCopyConstructor);
+ LookupCopyingConstructor(FieldClassDecl, Qualifiers::Const,
+ &HasConstCopyConstructor);
}
}
// Otherwise, the implicitly declared copy constructor will have
@@ -7251,7 +7143,7 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (CXXConstructorDecl *CopyConstructor =
- LookupCopyConstructor(BaseClassDecl, Quals))
+ LookupCopyingConstructor(BaseClassDecl, Quals))
ExceptSpec.CalledDecl(CopyConstructor);
}
for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
@@ -7261,7 +7153,7 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (CXXConstructorDecl *CopyConstructor =
- LookupCopyConstructor(BaseClassDecl, Quals))
+ LookupCopyingConstructor(BaseClassDecl, Quals))
ExceptSpec.CalledDecl(CopyConstructor);
}
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
@@ -7271,7 +7163,7 @@ Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
QualType FieldType = Context.getBaseElementType((*Field)->getType());
if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
if (CXXConstructorDecl *CopyConstructor =
- LookupCopyConstructor(FieldClassDecl, Quals))
+ LookupCopyingConstructor(FieldClassDecl, Quals))
ExceptSpec.CalledDecl(CopyConstructor);
}
}
@@ -7334,7 +7226,13 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
PushOnScopeChains(CopyConstructor, S, false);
ClassDecl->addDecl(CopyConstructor);
- if (ShouldDeleteCopyConstructor(CopyConstructor))
+ // C++0x [class.copy]p7:
+ // ... If the class definition declares a move constructor or move
+ // assignment operator, the implicitly declared constructor is defined as
+ // deleted; ...
+ if (ClassDecl->hasUserDeclaredMoveConstructor() ||
+ ClassDecl->hasUserDeclaredMoveAssignment() ||
+ ShouldDeleteCopyConstructor(CopyConstructor))
CopyConstructor->setDeletedAsWritten();
return CopyConstructor;
@@ -7523,6 +7421,10 @@ void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
VDecl->setTypeSourceInfo(DeducedType);
VDecl->setType(DeducedType->getType());
+ // In ARC, infer lifetime.
+ if (getLangOptions().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
+ VDecl->setInvalidDecl();
+
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
if (VarDecl *Old = VDecl->getPreviousDeclaration())
@@ -7605,9 +7507,9 @@ void Sema::AddCXXDirectInitializerToDecl(Decl *RealDecl,
// Store the initialization expressions as a ParenListExpr.
unsigned NumExprs = Exprs.size();
- VDecl->setInit(new (Context) ParenListExpr(Context, LParenLoc,
- (Expr **)Exprs.release(),
- NumExprs, RParenLoc));
+ VDecl->setInit(new (Context) ParenListExpr(
+ Context, LParenLoc, (Expr **)Exprs.release(), NumExprs, RParenLoc,
+ VDecl->getType().getNonReferenceType()));
return;
}
@@ -8143,10 +8045,8 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
Diag(Loc, diag::err_objc_object_catch);
Invalid = true;
} else if (T->isObjCObjectPointerType()) {
- if (!getLangOptions().ObjCNonFragileABI) {
- Diag(Loc, diag::err_objc_pointer_cxx_catch_fragile);
- Invalid = true;
- }
+ if (!getLangOptions().ObjCNonFragileABI)
+ Diag(Loc, diag::warn_objc_pointer_cxx_catch_fragile);
}
}
@@ -8154,7 +8054,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
ExDeclType, TInfo, SC_None, SC_None);
ExDecl->setExceptionVariable(true);
- if (!Invalid) {
+ if (!Invalid && !ExDeclType->isDependentType()) {
if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) {
// C++ [except.handle]p16:
// The object declared in an exception-declaration or, if the
@@ -9020,25 +8920,16 @@ DeclResult Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) {
// new class or enumeration.
assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
"Parser allowed 'typedef' as storage class of condition decl.");
-
- TagDecl *OwnedTag = 0;
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedTag);
- QualType Ty = TInfo->getType();
-
- if (Ty->isFunctionType()) { // The declarator shall not specify a function...
- // We exit without creating a CXXConditionDeclExpr because a FunctionDecl
- // would be created and CXXConditionDeclExpr wants a VarDecl.
- Diag(D.getIdentifierLoc(), diag::err_invalid_use_of_function_type)
- << D.getSourceRange();
- return DeclResult();
- } else if (OwnedTag && OwnedTag->isDefinition()) {
- // The type-specifier-seq shall not declare a new class or enumeration.
- Diag(OwnedTag->getLocation(), diag::err_type_defined_in_condition);
- }
-
+
Decl *Dcl = ActOnDeclarator(S, D);
if (!Dcl)
- return DeclResult();
+ return true;
+
+ if (isa<FunctionDecl>(Dcl)) { // The declarator shall not specify a function.
+ Diag(Dcl->getLocation(), diag::err_invalid_use_of_function_type)
+ << D.getSourceRange();
+ return true;
+ }
return Dcl;
}
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index de9097e98b4f..aa8152b03b0a 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -16,14 +16,96 @@
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Sema/DeclSpec.h"
#include "llvm/ADT/DenseSet.h"
using namespace clang;
+/// Check whether the given method, which must be in the 'init'
+/// family, is a valid member of that family.
+///
+/// \param receiverTypeIfCall - if null, check this as if declaring it;
+/// if non-null, check this as if making a call to it with the given
+/// receiver type
+///
+/// \return true to indicate that there was an error and appropriate
+/// actions were taken
+bool Sema::checkInitMethod(ObjCMethodDecl *method,
+ QualType receiverTypeIfCall) {
+ if (method->isInvalidDecl()) return true;
+
+ // This castAs is safe: methods that don't return an object
+ // pointer won't be inferred as inits and will reject an explicit
+ // objc_method_family(init).
+
+ // We ignore protocols here. Should we? What about Class?
+
+ const ObjCObjectType *result = method->getResultType()
+ ->castAs<ObjCObjectPointerType>()->getObjectType();
+
+ if (result->isObjCId()) {
+ return false;
+ } else if (result->isObjCClass()) {
+ // fall through: always an error
+ } else {
+ ObjCInterfaceDecl *resultClass = result->getInterface();
+ assert(resultClass && "unexpected object type!");
+
+ // It's okay for the result type to still be a forward declaration
+ // if we're checking an interface declaration.
+ if (resultClass->isForwardDecl()) {
+ if (receiverTypeIfCall.isNull() &&
+ !isa<ObjCImplementationDecl>(method->getDeclContext()))
+ return false;
+
+ // Otherwise, we try to compare class types.
+ } else {
+ // If this method was declared in a protocol, we can't check
+ // anything unless we have a receiver type that's an interface.
+ const ObjCInterfaceDecl *receiverClass = 0;
+ if (isa<ObjCProtocolDecl>(method->getDeclContext())) {
+ if (receiverTypeIfCall.isNull())
+ return false;
+
+ receiverClass = receiverTypeIfCall->castAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl();
+
+ // This can be null for calls to e.g. id<Foo>.
+ if (!receiverClass) return false;
+ } else {
+ receiverClass = method->getClassInterface();
+ assert(receiverClass && "method not associated with a class!");
+ }
+
+ // If either class is a subclass of the other, it's fine.
+ if (receiverClass->isSuperClassOf(resultClass) ||
+ resultClass->isSuperClassOf(receiverClass))
+ return false;
+ }
+ }
+
+ SourceLocation loc = method->getLocation();
+
+ // If we're in a system header, and this is not a call, just make
+ // the method unusable.
+ if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) {
+ method->addAttr(new (Context) UnavailableAttr(loc, Context,
+ "init method returns a type unrelated to its receiver type"));
+ return true;
+ }
+
+ // Otherwise, it's an error.
+ Diag(loc, diag::err_arc_init_method_unrelated_result_type);
+ method->setInvalidDecl();
+ return true;
+}
+
bool Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden,
bool IsImplementation) {
@@ -36,7 +118,7 @@ bool Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
QualType ResultType = NewMethod->getResultType();
SourceRange ResultTypeRange;
if (const TypeSourceInfo *ResultTypeInfo
- = NewMethod->getResultTypeSourceInfo())
+ = NewMethod->getResultTypeSourceInfo())
ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
// Figure out which class this method is part of, if any.
@@ -73,7 +155,15 @@ bool Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
return false;
}
-
+/// \brief Check for consistency between a given method declaration and the
+/// methods it overrides within the class hierarchy.
+///
+/// This method walks the inheritance hierarchy starting at the given
+/// declaration context (\p DC), invoking Sema::CheckObjCMethodOverride() with
+/// the given new method (\p NewMethod) and any method it directly overrides
+/// in the hierarchy. Sema::CheckObjCMethodOverride() is responsible for
+/// checking consistency, e.g., among return types for methods that return a
+/// related result type.
static bool CheckObjCMethodOverrides(Sema &S, ObjCMethodDecl *NewMethod,
DeclContext *DC,
bool SkipCurrent = true) {
@@ -99,10 +189,10 @@ static bool CheckObjCMethodOverrides(Sema &S, ObjCMethodDecl *NewMethod,
if (CheckObjCMethodOverrides(S, NewMethod, Category, false))
return true;
}
-
+
// Look through protocols.
for (ObjCList<ObjCProtocolDecl>::iterator I = Class->protocol_begin(),
- IEnd = Class->protocol_end();
+ IEnd = Class->protocol_end();
I != IEnd; ++I)
if (CheckObjCMethodOverrides(S, NewMethod, *I, false))
return true;
@@ -115,7 +205,7 @@ static bool CheckObjCMethodOverrides(Sema &S, ObjCMethodDecl *NewMethod,
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(DC)) {
// Look through protocols.
for (ObjCList<ObjCProtocolDecl>::iterator I = Category->protocol_begin(),
- IEnd = Category->protocol_end();
+ IEnd = Category->protocol_end();
I != IEnd; ++I)
if (CheckObjCMethodOverrides(S, NewMethod, *I, false))
return true;
@@ -126,7 +216,7 @@ static bool CheckObjCMethodOverrides(Sema &S, ObjCMethodDecl *NewMethod,
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(DC)) {
// Look through protocols.
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocol->protocol_begin(),
- IEnd = Protocol->protocol_end();
+ IEnd = Protocol->protocol_end();
I != IEnd; ++I)
if (CheckObjCMethodOverrides(S, NewMethod, *I, false))
return true;
@@ -141,13 +231,13 @@ bool Sema::CheckObjCMethodOverrides(ObjCMethodDecl *NewMethod,
DeclContext *DC) {
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(DC))
return ::CheckObjCMethodOverrides(*this, NewMethod, Class);
-
+
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(DC))
return ::CheckObjCMethodOverrides(*this, NewMethod, Category);
-
+
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(DC))
return ::CheckObjCMethodOverrides(*this, NewMethod, Protocol);
-
+
if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(DC))
return ::CheckObjCMethodOverrides(*this, NewMethod,
Impl->getClassInterface());
@@ -159,6 +249,55 @@ bool Sema::CheckObjCMethodOverrides(ObjCMethodDecl *NewMethod,
return ::CheckObjCMethodOverrides(*this, NewMethod, CurContext);
}
+/// \brief Check a method declaration for compatibility with the Objective-C
+/// ARC conventions.
+static bool CheckARCMethodDecl(Sema &S, ObjCMethodDecl *method) {
+ ObjCMethodFamily family = method->getMethodFamily();
+ switch (family) {
+ case OMF_None:
+ case OMF_dealloc:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_self:
+ return false;
+
+ case OMF_init:
+ // If the method doesn't obey the init rules, don't bother annotating it.
+ if (S.checkInitMethod(method, QualType()))
+ return true;
+
+ method->addAttr(new (S.Context) NSConsumesSelfAttr(SourceLocation(),
+ S.Context));
+
+ // Don't add a second copy of this attribute, but otherwise don't
+ // let it be suppressed.
+ if (method->hasAttr<NSReturnsRetainedAttr>())
+ return false;
+ break;
+
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ if (method->hasAttr<NSReturnsRetainedAttr>() ||
+ method->hasAttr<NSReturnsNotRetainedAttr>() ||
+ method->hasAttr<NSReturnsAutoreleasedAttr>())
+ return false;
+ break;
+
+ case OMF_performSelector:
+ // we don't annotate performSelector's
+ return true;
+
+ }
+
+ method->addAttr(new (S.Context) NSReturnsRetainedAttr(SourceLocation(),
+ S.Context));
+ return false;
+}
+
static void DiagnoseObjCImplementedDeprecations(Sema &S,
NamedDecl *ND,
SourceLocation ImplLoc,
@@ -212,6 +351,31 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
if ((*PI)->getIdentifier())
PushOnScopeChains(*PI, FnBodyScope);
}
+
+ // In ARC, disallow definition of retain/release/autorelease/retainCount
+ if (getLangOptions().ObjCAutoRefCount) {
+ switch (MDecl->getMethodFamily()) {
+ case OMF_retain:
+ case OMF_retainCount:
+ case OMF_release:
+ case OMF_autorelease:
+ Diag(MDecl->getLocation(), diag::err_arc_illegal_method_def)
+ << MDecl->getSelector();
+ break;
+
+ case OMF_None:
+ case OMF_dealloc:
+ case OMF_alloc:
+ case OMF_init:
+ case OMF_mutableCopy:
+ case OMF_copy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_performSelector:
+ break;
+ }
+ }
+
// Warn on implementating deprecated methods under
// -Wdeprecated-implementations flag.
if (ObjCInterfaceDecl *IC = MDecl->getClassInterface())
@@ -284,9 +448,10 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
if (!PrevDecl) {
// Try to correct for a typo in the superclass name.
- LookupResult R(*this, SuperName, SuperLoc, LookupOrdinaryName);
- if (CorrectTypo(R, TUScope, 0, 0, false, CTC_NoKeywords) &&
- (PrevDecl = R.getAsSingle<ObjCInterfaceDecl>())) {
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName, TUScope,
+ NULL, NULL, false, CTC_NoKeywords);
+ if ((PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>())) {
Diag(SuperLoc, diag::err_undef_superclass_suggest)
<< SuperName << ClassName << PrevDecl->getDeclName();
Diag(PrevDecl->getLocation(), diag::note_previous_decl)
@@ -333,10 +498,13 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
if (!SuperClassDecl)
Diag(SuperLoc, diag::err_undef_superclass)
<< SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
- else if (SuperClassDecl->isForwardDecl())
- Diag(SuperLoc, diag::err_undef_superclass)
+ else if (SuperClassDecl->isForwardDecl()) {
+ Diag(SuperLoc, diag::err_forward_superclass)
<< SuperClassDecl->getDeclName() << ClassName
<< SourceRange(AtInterfaceLoc, ClassLoc);
+ Diag(SuperClassDecl->getLocation(), diag::note_forward_class);
+ SuperClassDecl = 0;
+ }
}
IDecl->setSuperClass(SuperClassDecl);
IDecl->setSuperClassLoc(SuperLoc);
@@ -494,12 +662,12 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations,
ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first,
ProtocolId[i].second);
if (!PDecl) {
- LookupResult R(*this, ProtocolId[i].first, ProtocolId[i].second,
- LookupObjCProtocolName);
- if (CorrectTypo(R, TUScope, 0, 0, false, CTC_NoKeywords) &&
- (PDecl = R.getAsSingle<ObjCProtocolDecl>())) {
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(ProtocolId[i].first, ProtocolId[i].second),
+ LookupObjCProtocolName, TUScope, NULL, NULL, false, CTC_NoKeywords);
+ if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>())) {
Diag(ProtocolId[i].second, diag::err_undeclared_protocol_suggest)
- << ProtocolId[i].first << R.getLookupName();
+ << ProtocolId[i].first << Corrected.getCorrection();
Diag(PDecl->getLocation(), diag::note_previous_decl)
<< PDecl->getDeclName();
}
@@ -736,20 +904,20 @@ Decl *Sema::ActOnStartClassImplementation(
} else {
// We did not find anything with the name ClassName; try to correct for
// typos in the class name.
- LookupResult R(*this, ClassName, ClassLoc, LookupOrdinaryName);
- if (CorrectTypo(R, TUScope, 0, 0, false, CTC_NoKeywords) &&
- (IDecl = R.getAsSingle<ObjCInterfaceDecl>())) {
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope,
+ NULL, NULL, false, CTC_NoKeywords);
+ if ((IDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>())) {
// Suggest the (potentially) correct interface name. However, put the
// fix-it hint itself in a separate note, since changing the name in
// the warning would make the fix-it change semantics.However, don't
// provide a code-modification hint or use the typo name for recovery,
// because this is just a warning. The program may actually be correct.
+ DeclarationName CorrectedName = Corrected.getCorrection();
Diag(ClassLoc, diag::warn_undef_interface_suggest)
- << ClassName << R.getLookupName();
- Diag(IDecl->getLocation(), diag::note_previous_decl)
- << R.getLookupName()
- << FixItHint::CreateReplacement(ClassLoc,
- R.getLookupName().getAsString());
+ << ClassName << CorrectedName;
+ Diag(IDecl->getLocation(), diag::note_previous_decl) << CorrectedName
+ << FixItHint::CreateReplacement(ClassLoc, CorrectedName.getAsString());
IDecl = 0;
} else {
Diag(ClassLoc, diag::warn_undef_interface) << ClassName;
@@ -915,6 +1083,9 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
void Sema::WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
bool &IncompleteImpl, unsigned DiagID) {
+ // No point warning no definition of method which is 'unavailable'.
+ if (method->hasAttr<UnavailableAttr>())
+ return;
if (!IncompleteImpl) {
Diag(ImpLoc, diag::warn_incomplete_impl);
IncompleteImpl = true;
@@ -1091,11 +1262,84 @@ static void CheckMethodOverrideParam(Sema &S,
S.Diag(IfaceVar->getLocation(), diag::note_previous_definition)
<< getTypeRange(IfaceVar->getTypeSourceInfo());
}
-
+
+/// In ARC, check whether the conventional meanings of the two methods
+/// match. If they don't, it's a hard error.
+static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl,
+ ObjCMethodDecl *decl) {
+ ObjCMethodFamily implFamily = impl->getMethodFamily();
+ ObjCMethodFamily declFamily = decl->getMethodFamily();
+ if (implFamily == declFamily) return false;
+
+ // Since conventions are sorted by selector, the only possibility is
+ // that the types differ enough to cause one selector or the other
+ // to fall out of the family.
+ assert(implFamily == OMF_None || declFamily == OMF_None);
+
+ // No further diagnostics required on invalid declarations.
+ if (impl->isInvalidDecl() || decl->isInvalidDecl()) return true;
+
+ const ObjCMethodDecl *unmatched = impl;
+ ObjCMethodFamily family = declFamily;
+ unsigned errorID = diag::err_arc_lost_method_convention;
+ unsigned noteID = diag::note_arc_lost_method_convention;
+ if (declFamily == OMF_None) {
+ unmatched = decl;
+ family = implFamily;
+ errorID = diag::err_arc_gained_method_convention;
+ noteID = diag::note_arc_gained_method_convention;
+ }
+
+ // Indexes into a %select clause in the diagnostic.
+ enum FamilySelector {
+ F_alloc, F_copy, F_mutableCopy = F_copy, F_init, F_new
+ };
+ FamilySelector familySelector = FamilySelector();
+
+ switch (family) {
+ case OMF_None: llvm_unreachable("logic error, no method convention");
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_dealloc:
+ case OMF_retainCount:
+ case OMF_self:
+ case OMF_performSelector:
+ // Mismatches for these methods don't change ownership
+ // conventions, so we don't care.
+ return false;
+
+ case OMF_init: familySelector = F_init; break;
+ case OMF_alloc: familySelector = F_alloc; break;
+ case OMF_copy: familySelector = F_copy; break;
+ case OMF_mutableCopy: familySelector = F_mutableCopy; break;
+ case OMF_new: familySelector = F_new; break;
+ }
+
+ enum ReasonSelector { R_NonObjectReturn, R_UnrelatedReturn };
+ ReasonSelector reasonSelector;
+
+ // The only reason these methods don't fall within their families is
+ // due to unusual result types.
+ if (unmatched->getResultType()->isObjCObjectPointerType()) {
+ reasonSelector = R_UnrelatedReturn;
+ } else {
+ reasonSelector = R_NonObjectReturn;
+ }
+
+ S.Diag(impl->getLocation(), errorID) << familySelector << reasonSelector;
+ S.Diag(decl->getLocation(), noteID) << familySelector << reasonSelector;
+
+ return true;
+}
void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl) {
+ if (getLangOptions().ObjCAutoRefCount &&
+ checkMethodFamilyMismatch(*this, ImpMethodDecl, MethodDecl))
+ return;
+
CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
IsProtocolMethodDecl);
@@ -1204,7 +1448,7 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
CheckProtocolMethodDefs(ImpLoc, *PI, IncompleteImpl, InsMap, ClsMap, IDecl);
}
-/// MatchAllMethodDeclarations - Check methods declaraed in interface or
+/// MatchAllMethodDeclarations - Check methods declared in interface
/// or protocol against those declared in their implementations.
///
void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
@@ -1422,48 +1666,117 @@ Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
return CDecl;
}
+static bool tryMatchRecordTypes(ASTContext &Context,
+ Sema::MethodMatchStrategy strategy,
+ const Type *left, const Type *right);
+
+static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
+ QualType leftQT, QualType rightQT) {
+ const Type *left =
+ Context.getCanonicalType(leftQT).getUnqualifiedType().getTypePtr();
+ const Type *right =
+ Context.getCanonicalType(rightQT).getUnqualifiedType().getTypePtr();
+
+ if (left == right) return true;
+
+ // If we're doing a strict match, the types have to match exactly.
+ if (strategy == Sema::MMS_strict) return false;
+
+ if (left->isIncompleteType() || right->isIncompleteType()) return false;
+
+ // Otherwise, use this absurdly complicated algorithm to try to
+ // validate the basic, low-level compatibility of the two types.
+
+ // As a minimum, require the sizes and alignments to match.
+ if (Context.getTypeInfo(left) != Context.getTypeInfo(right))
+ return false;
+
+ // Consider all the kinds of non-dependent canonical types:
+ // - functions and arrays aren't possible as return and parameter types
+
+ // - vector types of equal size can be arbitrarily mixed
+ if (isa<VectorType>(left)) return isa<VectorType>(right);
+ if (isa<VectorType>(right)) return false;
+
+ // - references should only match references of identical type
+ // - structs, unions, and Objective-C objects must match more-or-less
+ // exactly
+ // - everything else should be a scalar
+ if (!left->isScalarType() || !right->isScalarType())
+ return tryMatchRecordTypes(Context, strategy, left, right);
+
+ // Make scalars agree in kind, except count bools as chars.
+ Type::ScalarTypeKind leftSK = left->getScalarTypeKind();
+ Type::ScalarTypeKind rightSK = right->getScalarTypeKind();
+ if (leftSK == Type::STK_Bool) leftSK = Type::STK_Integral;
+ if (rightSK == Type::STK_Bool) rightSK = Type::STK_Integral;
+
+ // Note that data member pointers and function member pointers don't
+ // intermix because of the size differences.
+
+ return (leftSK == rightSK);
+}
+
+static bool tryMatchRecordTypes(ASTContext &Context,
+ Sema::MethodMatchStrategy strategy,
+ const Type *lt, const Type *rt) {
+ assert(lt && rt && lt != rt);
+
+ if (!isa<RecordType>(lt) || !isa<RecordType>(rt)) return false;
+ RecordDecl *left = cast<RecordType>(lt)->getDecl();
+ RecordDecl *right = cast<RecordType>(rt)->getDecl();
+
+ // Require union-hood to match.
+ if (left->isUnion() != right->isUnion()) return false;
+
+ // Require an exact match if either is non-POD.
+ if ((isa<CXXRecordDecl>(left) && !cast<CXXRecordDecl>(left)->isPOD()) ||
+ (isa<CXXRecordDecl>(right) && !cast<CXXRecordDecl>(right)->isPOD()))
+ return false;
+
+ // Require size and alignment to match.
+ if (Context.getTypeInfo(lt) != Context.getTypeInfo(rt)) return false;
+
+ // Require fields to match.
+ RecordDecl::field_iterator li = left->field_begin(), le = left->field_end();
+ RecordDecl::field_iterator ri = right->field_begin(), re = right->field_end();
+ for (; li != le && ri != re; ++li, ++ri) {
+ if (!matchTypes(Context, strategy, li->getType(), ri->getType()))
+ return false;
+ }
+ return (li == le && ri == re);
+}
/// MatchTwoMethodDeclarations - Checks that two methods have matching type and
/// returns true, or false, accordingly.
/// TODO: Handle protocol list; such as id<p1,p2> in type comparisons
-bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
- const ObjCMethodDecl *PrevMethod,
- bool matchBasedOnSizeAndAlignment,
- bool matchBasedOnStrictEqulity) {
- QualType T1 = Context.getCanonicalType(Method->getResultType());
- QualType T2 = Context.getCanonicalType(PrevMethod->getResultType());
-
- if (T1 != T2) {
- // The result types are different.
- if (!matchBasedOnSizeAndAlignment || matchBasedOnStrictEqulity)
- return false;
- // Incomplete types don't have a size and alignment.
- if (T1->isIncompleteType() || T2->isIncompleteType())
- return false;
- // Check is based on size and alignment.
- if (Context.getTypeInfo(T1) != Context.getTypeInfo(T2))
- return false;
- }
+bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
+ const ObjCMethodDecl *right,
+ MethodMatchStrategy strategy) {
+ if (!matchTypes(Context, strategy,
+ left->getResultType(), right->getResultType()))
+ return false;
- ObjCMethodDecl::param_iterator ParamI = Method->param_begin(),
- E = Method->param_end();
- ObjCMethodDecl::param_iterator PrevI = PrevMethod->param_begin();
+ if (getLangOptions().ObjCAutoRefCount &&
+ (left->hasAttr<NSReturnsRetainedAttr>()
+ != right->hasAttr<NSReturnsRetainedAttr>() ||
+ left->hasAttr<NSConsumesSelfAttr>()
+ != right->hasAttr<NSConsumesSelfAttr>()))
+ return false;
- for (; ParamI != E; ++ParamI, ++PrevI) {
- assert(PrevI != PrevMethod->param_end() && "Param mismatch");
- T1 = Context.getCanonicalType((*ParamI)->getType());
- T2 = Context.getCanonicalType((*PrevI)->getType());
- if (T1 != T2) {
- // The result types are different.
- if (!matchBasedOnSizeAndAlignment || matchBasedOnStrictEqulity)
- return false;
- // Incomplete types don't have a size and alignment.
- if (T1->isIncompleteType() || T2->isIncompleteType())
- return false;
- // Check is based on size and alignment.
- if (Context.getTypeInfo(T1) != Context.getTypeInfo(T2))
- return false;
- }
+ ObjCMethodDecl::param_iterator
+ li = left->param_begin(), le = left->param_end(), ri = right->param_begin();
+
+ for (; li != le; ++li, ++ri) {
+ assert(ri != right->param_end() && "Param mismatch");
+ ParmVarDecl *lparm = *li, *rparm = *ri;
+
+ if (!matchTypes(Context, strategy, lparm->getType(), rparm->getType()))
+ return false;
+
+ if (getLangOptions().ObjCAutoRefCount &&
+ lparm->hasAttr<NSConsumedAttr>() != rparm->hasAttr<NSConsumedAttr>())
+ return false;
}
return true;
}
@@ -1505,8 +1818,10 @@ void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
// We've seen a method with this name, see if we have already seen this type
// signature.
- for (ObjCMethodList *List = &Entry; List; List = List->Next)
- if (MatchTwoMethodDeclarations(Method, List->Method)) {
+ for (ObjCMethodList *List = &Entry; List; List = List->Next) {
+ bool match = MatchTwoMethodDeclarations(Method, List->Method);
+
+ if (match) {
ObjCMethodDecl *PrevObjCMethod = List->Method;
PrevObjCMethod->setDefined(impl);
// If a method is deprecated, push it in the global pool.
@@ -1523,6 +1838,7 @@ void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
}
return;
}
+ }
// We have a new signature for an existing method - add it.
// This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
@@ -1530,6 +1846,25 @@ void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
Entry.Next = new (Mem) ObjCMethodList(Method, Entry.Next);
}
+/// Determines if this is an "acceptable" loose mismatch in the global
+/// method pool. This exists mostly as a hack to get around certain
+/// global mismatches which we can't afford to make warnings / errors.
+/// Really, what we want is a way to take a method out of the global
+/// method pool.
+static bool isAcceptableMethodMismatch(ObjCMethodDecl *chosen,
+ ObjCMethodDecl *other) {
+ if (!chosen->isInstanceMethod())
+ return false;
+
+ Selector sel = chosen->getSelector();
+ if (!sel.isUnarySelector() || sel.getNameForSlot(0) != "length")
+ return false;
+
+ // Don't complain about mismatches for -length if the method we
+ // chose has an integral result type.
+ return (chosen->getResultType()->isIntegerType());
+}
+
ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool warn, bool instance) {
@@ -1543,32 +1878,52 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
- bool strictSelectorMatch = receiverIdOrClass && warn &&
- (Diags.getDiagnosticLevel(diag::warn_strict_multiple_method_decl,
- R.getBegin()) !=
- Diagnostic::Ignored);
if (warn && MethList.Method && MethList.Next) {
- bool issueWarning = false;
+ bool issueDiagnostic = false, issueError = false;
+
+ // We support a warning which complains about *any* difference in
+ // method signature.
+ bool strictSelectorMatch =
+ (receiverIdOrClass && warn &&
+ (Diags.getDiagnosticLevel(diag::warn_strict_multiple_method_decl,
+ R.getBegin()) !=
+ Diagnostic::Ignored));
if (strictSelectorMatch)
for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next) {
- // This checks if the methods differ in type mismatch.
- if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method, false, true))
- issueWarning = true;
+ if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method,
+ MMS_strict)) {
+ issueDiagnostic = true;
+ break;
+ }
}
- if (!issueWarning)
+ // If we didn't see any strict differences, we won't see any loose
+ // differences. In ARC, however, we also need to check for loose
+ // mismatches, because most of them are errors.
+ if (!strictSelectorMatch ||
+ (issueDiagnostic && getLangOptions().ObjCAutoRefCount))
for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next) {
- // This checks if the methods differ by size & alignment.
- if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method, true))
- issueWarning = true;
+ // This checks if the methods differ in type mismatch.
+ if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method,
+ MMS_loose) &&
+ !isAcceptableMethodMismatch(MethList.Method, Next->Method)) {
+ issueDiagnostic = true;
+ if (getLangOptions().ObjCAutoRefCount)
+ issueError = true;
+ break;
+ }
}
- if (issueWarning) {
- if (strictSelectorMatch)
+ if (issueDiagnostic) {
+ if (issueError)
+ Diag(R.getBegin(), diag::err_arc_multiple_method_decl) << Sel << R;
+ else if (strictSelectorMatch)
Diag(R.getBegin(), diag::warn_strict_multiple_method_decl) << Sel << R;
else
Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R;
- Diag(MethList.Method->getLocStart(), diag::note_using)
+
+ Diag(MethList.Method->getLocStart(),
+ issueError ? diag::note_possibility : diag::note_using)
<< MethList.Method->getSourceRange();
for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next)
Diag(Next->Method->getLocStart(), diag::note_also_found)
@@ -1796,6 +2151,7 @@ void Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
DefaultSynthesizeProperties(S, IC, IDecl);
ImplMethodsVsClassMethods(S, IC, IDecl);
AtomicPropertySetterGetterRules(IC, IDecl);
+ DiagnoseOwningPropertyGetterSynthesis(IC);
if (LangOpts.ObjCNonFragileABI2)
while (IDecl->getSuperClass()) {
@@ -1968,7 +2324,7 @@ Decl *Sema::ActOnMethodDeclaration(
} else {
ArgType = GetTypeFromParser(ArgInfo[i].Type, &DI);
// Perform the default array/function conversions (C99 6.7.5.3p[7,8]).
- ArgType = adjustParameterType(ArgType);
+ ArgType = Context.getAdjustedParameterType(ArgType);
}
LookupResult R(*this, ArgInfo[i].Name, ArgInfo[i].NameLoc,
@@ -2015,7 +2371,7 @@ Decl *Sema::ActOnMethodDeclaration(
ArgType = Context.getObjCIdType();
else
// Perform the default array/function conversions (C99 6.7.5.3p[7,8]).
- ArgType = adjustParameterType(ArgType);
+ ArgType = Context.getAdjustedParameterType(ArgType);
if (ArgType->isObjCObjectType()) {
Diag(Param->getLocation(),
diag::err_object_cannot_be_passed_returned_by_value)
@@ -2104,7 +2460,11 @@ Decl *Sema::ActOnMethodDeclaration(
mergeObjCMethodDecls(ObjCMethod, InterfaceMD);
}
- if (!ObjCMethod->hasRelatedResultType() &&
+ bool ARCError = false;
+ if (getLangOptions().ObjCAutoRefCount)
+ ARCError = CheckARCMethodDecl(*this, ObjCMethod);
+
+ if (!ObjCMethod->hasRelatedResultType() && !ARCError &&
getLangOptions().ObjCInferRelatedResultType) {
bool InferRelatedResultType = false;
switch (ObjCMethod->getMethodFamily()) {
@@ -2114,6 +2474,7 @@ Decl *Sema::ActOnMethodDeclaration(
case OMF_mutableCopy:
case OMF_release:
case OMF_retainCount:
+ case OMF_performSelector:
break;
case OMF_alloc:
@@ -2255,15 +2616,8 @@ Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
if (getLangOptions().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
- TagDecl *OwnedDecl = 0;
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType ExceptionType = TInfo->getType();
-
- if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) {
- // Objective-C++: Types shall not be defined in exception types.
- Diag(OwnedDecl->getLocation(), diag::err_type_defined_in_param_type)
- << Context.getTypeDeclType(OwnedDecl);
- }
VarDecl *New = BuildObjCExceptionDecl(TInfo, ExceptionType,
D.getSourceRange().getBegin(),
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 0549e9499560..5efc36559d97 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -87,7 +87,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->isDeleted()) {
Diag(Loc, diag::err_deleted_function_use);
- Diag(D->getLocation(), diag::note_unavailable_here) << true;
+ Diag(D->getLocation(), diag::note_unavailable_here) << 1 << true;
return true;
}
}
@@ -104,17 +104,20 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
break;
case AR_Unavailable:
- if (Message.empty()) {
- if (!UnknownObjCClass)
- Diag(Loc, diag::err_unavailable) << D->getDeclName();
- else
- Diag(Loc, diag::warn_unavailable_fwdclass_message)
- << D->getDeclName();
+ if (cast<Decl>(CurContext)->getAvailability() != AR_Unavailable) {
+ if (Message.empty()) {
+ if (!UnknownObjCClass)
+ Diag(Loc, diag::err_unavailable) << D->getDeclName();
+ else
+ Diag(Loc, diag::warn_unavailable_fwdclass_message)
+ << D->getDeclName();
+ }
+ else
+ Diag(Loc, diag::err_unavailable_message)
+ << D->getDeclName() << Message;
+ Diag(D->getLocation(), diag::note_unavailable_here)
+ << isa<FunctionDecl>(D) << false;
}
- else
- Diag(Loc, diag::err_unavailable_message)
- << D->getDeclName() << Message;
- Diag(D->getLocation(), diag::note_unavailable_here) << 0;
break;
}
@@ -437,8 +440,12 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
/// will warn if the resulting type is not a POD type, and rejects ObjC
/// interfaces passed by value.
ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
- FunctionDecl *FDecl) {
- ExprResult ExprRes = DefaultArgumentPromotion(E);
+ FunctionDecl *FDecl) {
+ ExprResult ExprRes = CheckPlaceholderExpr(E);
+ if (ExprRes.isInvalid())
+ return ExprError();
+
+ ExprRes = DefaultArgumentPromotion(E);
if (ExprRes.isInvalid())
return ExprError();
E = ExprRes.take();
@@ -456,7 +463,7 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
<< E->getType() << CT))
return ExprError();
- if (!E->getType()->isPODType()) {
+ if (!E->getType().isPODType(Context)) {
// C++0x [expr.call]p7:
// Passing a potentially-evaluated argument of class type (Clause 9)
// having a non-trivial copy constructor, a non-trivial move constructor,
@@ -471,6 +478,11 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
TrivialEnough = true;
}
}
+
+ if (!TrivialEnough &&
+ getLangOptions().ObjCAutoRefCount &&
+ E->getType()->isObjCLifetimeType())
+ TrivialEnough = true;
if (TrivialEnough) {
// Nothing to diagnose. This is okay.
@@ -1004,7 +1016,6 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
// Pass &StringTokLocs[0], StringTokLocs.size() to factory!
return Owned(StringLiteral::Create(Context, Literal.GetString(),
- Literal.GetStringLength(),
Literal.AnyWide, Literal.Pascal, StrTy,
&StringTokLocs[0],
StringTokLocs.size()));
@@ -1271,125 +1282,6 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
return Owned(E);
}
-static ExprResult
-BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
- const CXXScopeSpec &SS, FieldDecl *Field,
- DeclAccessPair FoundDecl,
- const DeclarationNameInfo &MemberNameInfo);
-
-ExprResult
-Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
- SourceLocation loc,
- IndirectFieldDecl *indirectField,
- Expr *baseObjectExpr,
- SourceLocation opLoc) {
- // First, build the expression that refers to the base object.
-
- bool baseObjectIsPointer = false;
- Qualifiers baseQuals;
-
- // Case 1: the base of the indirect field is not a field.
- VarDecl *baseVariable = indirectField->getVarDecl();
- CXXScopeSpec EmptySS;
- if (baseVariable) {
- assert(baseVariable->getType()->isRecordType());
-
- // In principle we could have a member access expression that
- // accesses an anonymous struct/union that's a static member of
- // the base object's class. However, under the current standard,
- // static data members cannot be anonymous structs or unions.
- // Supporting this is as easy as building a MemberExpr here.
- assert(!baseObjectExpr && "anonymous struct/union is static data member?");
-
- DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
-
- ExprResult result =
- BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
- if (result.isInvalid()) return ExprError();
-
- baseObjectExpr = result.take();
- baseObjectIsPointer = false;
- baseQuals = baseObjectExpr->getType().getQualifiers();
-
- // Case 2: the base of the indirect field is a field and the user
- // wrote a member expression.
- } else if (baseObjectExpr) {
- // The caller provided the base object expression. Determine
- // whether its a pointer and whether it adds any qualifiers to the
- // anonymous struct/union fields we're looking into.
- QualType objectType = baseObjectExpr->getType();
-
- if (const PointerType *ptr = objectType->getAs<PointerType>()) {
- baseObjectIsPointer = true;
- objectType = ptr->getPointeeType();
- } else {
- baseObjectIsPointer = false;
- }
- baseQuals = objectType.getQualifiers();
-
- // Case 3: the base of the indirect field is a field and we should
- // build an implicit member access.
- } else {
- // We've found a member of an anonymous struct/union that is
- // inside a non-anonymous struct/union, so in a well-formed
- // program our base object expression is "this".
- QualType ThisTy = getAndCaptureCurrentThisType();
- if (ThisTy.isNull()) {
- Diag(loc, diag::err_invalid_member_use_in_static_method)
- << indirectField->getDeclName();
- return ExprError();
- }
-
- // Our base object expression is "this".
- baseObjectExpr =
- new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true);
- baseObjectIsPointer = true;
- baseQuals = ThisTy->castAs<PointerType>()->getPointeeType().getQualifiers();
- }
-
- // Build the implicit member references to the field of the
- // anonymous struct/union.
- Expr *result = baseObjectExpr;
- IndirectFieldDecl::chain_iterator
- FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
-
- // Build the first member access in the chain with full information.
- if (!baseVariable) {
- FieldDecl *field = cast<FieldDecl>(*FI);
-
- // FIXME: use the real found-decl info!
- DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
-
- // Make a nameInfo that properly uses the anonymous name.
- DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
-
- result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
- EmptySS, field, foundDecl,
- memberNameInfo).take();
- baseObjectIsPointer = false;
-
- // FIXME: check qualified member access
- }
-
- // In all cases, we should now skip the first declaration in the chain.
- ++FI;
-
- while (FI != FEnd) {
- FieldDecl *field = cast<FieldDecl>(*FI++);
-
- // FIXME: these are somewhat meaningless
- DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
- DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
-
- result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
- (FI == FEnd? SS : EmptySS), field,
- foundDecl, memberNameInfo)
- .take();
- }
-
- return Owned(result);
-}
-
/// Decomposes the given name into a DeclarationNameInfo, its location, and
/// possibly a list of template arguments.
///
@@ -1399,215 +1291,30 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
/// This actually loses a lot of source location information for
/// non-standard name kinds; we should consider preserving that in
/// some way.
-static void DecomposeUnqualifiedId(Sema &SemaRef,
- const UnqualifiedId &Id,
- TemplateArgumentListInfo &Buffer,
- DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *&TemplateArgs) {
+void Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
+ TemplateArgumentListInfo &Buffer,
+ DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *&TemplateArgs) {
if (Id.getKind() == UnqualifiedId::IK_TemplateId) {
Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc);
Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(SemaRef,
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
Id.TemplateId->getTemplateArgs(),
Id.TemplateId->NumArgs);
- SemaRef.translateTemplateArguments(TemplateArgsPtr, Buffer);
+ translateTemplateArguments(TemplateArgsPtr, Buffer);
TemplateArgsPtr.release();
TemplateName TName = Id.TemplateId->Template.get();
SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc;
- NameInfo = SemaRef.Context.getNameForTemplate(TName, TNameLoc);
+ NameInfo = Context.getNameForTemplate(TName, TNameLoc);
TemplateArgs = &Buffer;
} else {
- NameInfo = SemaRef.GetNameFromUnqualifiedId(Id);
+ NameInfo = GetNameFromUnqualifiedId(Id);
TemplateArgs = 0;
}
}
-/// Determines if the given class is provably not derived from all of
-/// the prospective base classes.
-static bool IsProvablyNotDerivedFrom(Sema &SemaRef,
- CXXRecordDecl *Record,
- const llvm::SmallPtrSet<CXXRecordDecl*, 4> &Bases) {
- if (Bases.count(Record->getCanonicalDecl()))
- return false;
-
- RecordDecl *RD = Record->getDefinition();
- if (!RD) return false;
- Record = cast<CXXRecordDecl>(RD);
-
- for (CXXRecordDecl::base_class_iterator I = Record->bases_begin(),
- E = Record->bases_end(); I != E; ++I) {
- CanQualType BaseT = SemaRef.Context.getCanonicalType((*I).getType());
- CanQual<RecordType> BaseRT = BaseT->getAs<RecordType>();
- if (!BaseRT) return false;
-
- CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
- if (!IsProvablyNotDerivedFrom(SemaRef, BaseRecord, Bases))
- return false;
- }
-
- return true;
-}
-
-enum IMAKind {
- /// The reference is definitely not an instance member access.
- IMA_Static,
-
- /// The reference may be an implicit instance member access.
- IMA_Mixed,
-
- /// The reference may be to an instance member, but it is invalid if
- /// so, because the context is not an instance method.
- IMA_Mixed_StaticContext,
-
- /// The reference may be to an instance member, but it is invalid if
- /// so, because the context is from an unrelated class.
- IMA_Mixed_Unrelated,
-
- /// The reference is definitely an implicit instance member access.
- IMA_Instance,
-
- /// The reference may be to an unresolved using declaration.
- IMA_Unresolved,
-
- /// The reference may be to an unresolved using declaration and the
- /// context is not an instance method.
- IMA_Unresolved_StaticContext,
-
- /// All possible referrents are instance members and the current
- /// context is not an instance method.
- IMA_Error_StaticContext,
-
- /// All possible referrents are instance members of an unrelated
- /// class.
- IMA_Error_Unrelated
-};
-
-/// The given lookup names class member(s) and is not being used for
-/// an address-of-member expression. Classify the type of access
-/// according to whether it's possible that this reference names an
-/// instance member. This is best-effort; it is okay to
-/// conservatively answer "yes", in which case some errors will simply
-/// not be caught until template-instantiation.
-static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
- Scope *CurScope,
- const LookupResult &R) {
- assert(!R.empty() && (*R.begin())->isCXXClassMember());
-
- DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
-
- bool isStaticContext =
- (!isa<CXXMethodDecl>(DC) ||
- cast<CXXMethodDecl>(DC)->isStatic());
-
- // C++0x [expr.prim]p4:
- // Otherwise, if a member-declarator declares a non-static data member
- // of a class X, the expression this is a prvalue of type "pointer to X"
- // within the optional brace-or-equal-initializer.
- if (CurScope->getFlags() & Scope::ThisScope)
- isStaticContext = false;
-
- if (R.isUnresolvableResult())
- return isStaticContext ? IMA_Unresolved_StaticContext : IMA_Unresolved;
-
- // Collect all the declaring classes of instance members we find.
- bool hasNonInstance = false;
- bool hasField = false;
- llvm::SmallPtrSet<CXXRecordDecl*, 4> Classes;
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
- NamedDecl *D = *I;
-
- if (D->isCXXInstanceMember()) {
- if (dyn_cast<FieldDecl>(D))
- hasField = true;
-
- CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
- Classes.insert(R->getCanonicalDecl());
- }
- else
- hasNonInstance = true;
- }
-
- // If we didn't find any instance members, it can't be an implicit
- // member reference.
- if (Classes.empty())
- return IMA_Static;
-
- // If the current context is not an instance method, it can't be
- // an implicit member reference.
- if (isStaticContext) {
- if (hasNonInstance)
- return IMA_Mixed_StaticContext;
-
- if (SemaRef.getLangOptions().CPlusPlus0x && hasField) {
- // C++0x [expr.prim.general]p10:
- // An id-expression that denotes a non-static data member or non-static
- // member function of a class can only be used:
- // (...)
- // - if that id-expression denotes a non-static data member and it appears in an unevaluated operand.
- const Sema::ExpressionEvaluationContextRecord& record = SemaRef.ExprEvalContexts.back();
- bool isUnevaluatedExpression = record.Context == Sema::Unevaluated;
- if (isUnevaluatedExpression)
- return IMA_Mixed_StaticContext;
- }
-
- return IMA_Error_StaticContext;
- }
-
- CXXRecordDecl *contextClass;
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
- contextClass = MD->getParent()->getCanonicalDecl();
- else
- contextClass = cast<CXXRecordDecl>(DC);
-
- // [class.mfct.non-static]p3:
- // ...is used in the body of a non-static member function of class X,
- // if name lookup (3.4.1) resolves the name in the id-expression to a
- // non-static non-type member of some class C [...]
- // ...if C is not X or a base class of X, the class member access expression
- // is ill-formed.
- if (R.getNamingClass() &&
- contextClass != R.getNamingClass()->getCanonicalDecl() &&
- contextClass->isProvablyNotDerivedFrom(R.getNamingClass()))
- return (hasNonInstance ? IMA_Mixed_Unrelated : IMA_Error_Unrelated);
-
- // If we can prove that the current context is unrelated to all the
- // declaring classes, it can't be an implicit member reference (in
- // which case it's an error if any of those members are selected).
- if (IsProvablyNotDerivedFrom(SemaRef, contextClass, Classes))
- return (hasNonInstance ? IMA_Mixed_Unrelated : IMA_Error_Unrelated);
-
- return (hasNonInstance ? IMA_Mixed : IMA_Instance);
-}
-
-/// Diagnose a reference to a field with no object available.
-static void DiagnoseInstanceReference(Sema &SemaRef,
- const CXXScopeSpec &SS,
- NamedDecl *rep,
- const DeclarationNameInfo &nameInfo) {
- SourceLocation Loc = nameInfo.getLoc();
- SourceRange Range(Loc);
- if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
-
- if (isa<FieldDecl>(rep) || isa<IndirectFieldDecl>(rep)) {
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(SemaRef.CurContext)) {
- if (MD->isStatic()) {
- // "invalid use of member 'x' in static member function"
- SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
- << Range << nameInfo.getName();
- return;
- }
- }
-
- SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
- << nameInfo.getName() << Range;
- return;
- }
-
- SemaRef.Diag(Loc, diag::err_member_call_without_object) << Range;
-}
-
/// Diagnose an empty lookup.
///
/// \return false if new lookup candidates were found
@@ -1690,39 +1397,43 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
}
// We didn't find anything, so try to correct for a typo.
- DeclarationName Corrected;
- if (S && (Corrected = CorrectTypo(R, S, &SS, 0, false, CTC))) {
- if (!R.empty()) {
- if (isa<ValueDecl>(*R.begin()) || isa<FunctionTemplateDecl>(*R.begin())) {
+ TypoCorrection Corrected;
+ if (S && (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
+ S, &SS, NULL, false, CTC))) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
+ R.setLookupName(Corrected.getCorrection());
+
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
+ R.addDecl(ND);
+ if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
if (SS.isEmpty())
- Diag(R.getNameLoc(), diagnostic_suggest) << Name << R.getLookupName()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- R.getLookupName().getAsString());
+ Diag(R.getNameLoc(), diagnostic_suggest) << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
else
Diag(R.getNameLoc(), diag::err_no_member_suggest)
- << Name << computeDeclContext(SS, false) << R.getLookupName()
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
<< SS.getRange()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- R.getLookupName().getAsString());
- if (NamedDecl *ND = R.getAsSingle<NamedDecl>())
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ if (ND)
Diag(ND->getLocation(), diag::note_previous_decl)
- << ND->getDeclName();
+ << CorrectedQuotedStr;
// Tell the callee to try to recover.
return false;
}
- if (isa<TypeDecl>(*R.begin()) || isa<ObjCInterfaceDecl>(*R.begin())) {
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) {
// FIXME: If we ended up with a typo for a type name or
// Objective-C class name, we're in trouble because the parser
// is in the wrong place to recover. Suggest the typo
// correction, but don't make it a fix-it since we're not going
// to recover well anyway.
if (SS.isEmpty())
- Diag(R.getNameLoc(), diagnostic_suggest) << Name << R.getLookupName();
+ Diag(R.getNameLoc(), diagnostic_suggest) << Name << CorrectedQuotedStr;
else
Diag(R.getNameLoc(), diag::err_no_member_suggest)
- << Name << computeDeclContext(SS, false) << R.getLookupName()
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
<< SS.getRange();
// Don't try to recover; it won't work.
@@ -1732,15 +1443,15 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// FIXME: We found a keyword. Suggest it, but don't provide a fix-it
// because we aren't able to recover.
if (SS.isEmpty())
- Diag(R.getNameLoc(), diagnostic_suggest) << Name << Corrected;
+ Diag(R.getNameLoc(), diagnostic_suggest) << Name << CorrectedQuotedStr;
else
Diag(R.getNameLoc(), diag::err_no_member_suggest)
- << Name << computeDeclContext(SS, false) << Corrected
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
<< SS.getRange();
return true;
}
- R.clear();
}
+ R.clear();
// Emit a special diagnostic for failed member lookups.
// FIXME: computing the declaration context might fail here (?)
@@ -1856,7 +1567,7 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
// Decompose the UnqualifiedId into the following data.
DeclarationNameInfo NameInfo;
const TemplateArgumentListInfo *TemplateArgs;
- DecomposeUnqualifiedId(*this, Id, TemplateArgsBuffer, NameInfo, TemplateArgs);
+ DecomposeUnqualifiedId(Id, TemplateArgsBuffer, NameInfo, TemplateArgs);
DeclarationName Name = NameInfo.getName();
IdentifierInfo *II = Name.getAsIdentifierInfo();
@@ -1892,7 +1603,9 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
bool IvarLookupFollowUp = false;
// Perform the required lookup.
- LookupResult R(*this, NameInfo, LookupOrdinaryName);
+ LookupResult R(*this, NameInfo,
+ (Id.getKind() == UnqualifiedId::IK_ImplicitSelfParam)
+ ? LookupObjCImplicitSelfParam : LookupOrdinaryName);
if (TemplateArgs) {
// Lookup the template name again to correctly establish the context in
// which it was found. This is really unfortunate as we already did the
@@ -2032,38 +1745,6 @@ ExprResult Sema::ActOnIdExpression(Scope *S,
return BuildDeclarationNameExpr(SS, R, ADL);
}
-/// Builds an expression which might be an implicit member expression.
-ExprResult
-Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
- LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs) {
- switch (ClassifyImplicitMemberAccess(*this, CurScope, R)) {
- case IMA_Instance:
- return BuildImplicitMemberExpr(SS, R, TemplateArgs, true);
-
- case IMA_Mixed:
- case IMA_Mixed_Unrelated:
- case IMA_Unresolved:
- return BuildImplicitMemberExpr(SS, R, TemplateArgs, false);
-
- case IMA_Static:
- case IMA_Mixed_StaticContext:
- case IMA_Unresolved_StaticContext:
- if (TemplateArgs)
- return BuildTemplateIdExpr(SS, R, false, *TemplateArgs);
- return BuildDeclarationNameExpr(SS, R, false);
-
- case IMA_Error_StaticContext:
- case IMA_Error_Unrelated:
- DiagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
- R.getLookupNameInfo());
- return ExprError();
- }
-
- llvm_unreachable("unexpected instance member access kind");
- return ExprError();
-}
-
/// BuildQualifiedDeclarationNameExpr - Build a C++ qualified
/// declaration name, generally during template instantiation.
/// There's a large number of things which don't need to be done along
@@ -2155,6 +1836,7 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo &II = Context.Idents.get("self");
UnqualifiedId SelfName;
SelfName.setIdentifier(&II, SourceLocation());
+ SelfName.setKind(UnqualifiedId::IK_ImplicitSelfParam);
CXXScopeSpec SelfScopeSpec;
ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec,
SelfName, false, false);
@@ -2166,27 +1848,6 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
return ExprError();
MarkDeclarationReferenced(Loc, IV);
- Expr *base = SelfExpr.take();
- base = base->IgnoreParenImpCasts();
- if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(base)) {
- const NamedDecl *ND = DE->getDecl();
- if (!isa<ImplicitParamDecl>(ND)) {
- // relax the rule such that it is allowed to have a shadow 'self'
- // where stand-alone ivar can be found in this 'self' object.
- // This is to match gcc's behavior.
- ObjCInterfaceDecl *selfIFace = 0;
- if (const ObjCObjectPointerType *OPT =
- base->getType()->getAsObjCInterfacePointerType())
- selfIFace = OPT->getInterfaceDecl();
- if (!selfIFace ||
- !selfIFace->lookupInstanceVariable(IV->getIdentifier())) {
- Diag(Loc, diag::error_implicit_ivar_access)
- << IV->getDeclName();
- Diag(ND->getLocation(), diag::note_declared_at);
- return ExprError();
- }
- }
- }
return Owned(new (Context)
ObjCIvarRefExpr(IV, IV->getType(), Loc,
SelfExpr.take(), true, true));
@@ -2385,120 +2046,6 @@ Sema::PerformObjectMemberConversion(Expr *From,
VK, &BasePath);
}
-/// \brief Build a MemberExpr AST node.
-static MemberExpr *BuildMemberExpr(ASTContext &C, Expr *Base, bool isArrow,
- const CXXScopeSpec &SS, ValueDecl *Member,
- DeclAccessPair FoundDecl,
- const DeclarationNameInfo &MemberNameInfo,
- QualType Ty,
- ExprValueKind VK, ExprObjectKind OK,
- const TemplateArgumentListInfo *TemplateArgs = 0) {
- return MemberExpr::Create(C, Base, isArrow, SS.getWithLocInContext(C),
- Member, FoundDecl, MemberNameInfo,
- TemplateArgs, Ty, VK, OK);
-}
-
-static ExprResult
-BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
- const CXXScopeSpec &SS, FieldDecl *Field,
- DeclAccessPair FoundDecl,
- const DeclarationNameInfo &MemberNameInfo) {
- // x.a is an l-value if 'a' has a reference type. Otherwise:
- // x.a is an l-value/x-value/pr-value if the base is (and note
- // that *x is always an l-value), except that if the base isn't
- // an ordinary object then we must have an rvalue.
- ExprValueKind VK = VK_LValue;
- ExprObjectKind OK = OK_Ordinary;
- if (!IsArrow) {
- if (BaseExpr->getObjectKind() == OK_Ordinary)
- VK = BaseExpr->getValueKind();
- else
- VK = VK_RValue;
- }
- if (VK != VK_RValue && Field->isBitField())
- OK = OK_BitField;
-
- // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
- QualType MemberType = Field->getType();
- if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) {
- MemberType = Ref->getPointeeType();
- VK = VK_LValue;
- } else {
- QualType BaseType = BaseExpr->getType();
- if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();
-
- Qualifiers BaseQuals = BaseType.getQualifiers();
-
- // GC attributes are never picked up by members.
- BaseQuals.removeObjCGCAttr();
-
- // CVR attributes from the base are picked up by members,
- // except that 'mutable' members don't pick up 'const'.
- if (Field->isMutable()) BaseQuals.removeConst();
-
- Qualifiers MemberQuals
- = S.Context.getCanonicalType(MemberType).getQualifiers();
-
- // TR 18037 does not allow fields to be declared with address spaces.
- assert(!MemberQuals.hasAddressSpace());
-
- Qualifiers Combined = BaseQuals + MemberQuals;
- if (Combined != MemberQuals)
- MemberType = S.Context.getQualifiedType(MemberType, Combined);
- }
-
- S.MarkDeclarationReferenced(MemberNameInfo.getLoc(), Field);
- ExprResult Base =
- S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
- FoundDecl, Field);
- if (Base.isInvalid())
- return ExprError();
- return S.Owned(BuildMemberExpr(S.Context, Base.take(), IsArrow, SS,
- Field, FoundDecl, MemberNameInfo,
- MemberType, VK, OK));
-}
-
-/// Builds an implicit member access expression. The current context
-/// is known to be an instance method, and the given unqualified lookup
-/// set is known to contain only instance members, at least one of which
-/// is from an appropriate type.
-ExprResult
-Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
- LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs,
- bool IsKnownInstance) {
- assert(!R.empty() && !R.isAmbiguous());
-
- SourceLocation loc = R.getNameLoc();
-
- // We may have found a field within an anonymous union or struct
- // (C++ [class.union]).
- // FIXME: template-ids inside anonymous structs?
- if (IndirectFieldDecl *FD = R.getAsSingle<IndirectFieldDecl>())
- return BuildAnonymousStructUnionMemberReference(SS, R.getNameLoc(), FD);
-
- // If this is known to be an instance access, go ahead and build an
- // implicit 'this' expression now.
- // 'this' expression now.
- QualType ThisTy = getAndCaptureCurrentThisType();
- assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'");
-
- Expr *baseExpr = 0; // null signifies implicit access
- if (IsKnownInstance) {
- SourceLocation Loc = R.getNameLoc();
- if (SS.getRange().isValid())
- Loc = SS.getRange().getBegin();
- baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true);
- }
-
- return BuildMemberReferenceExpr(baseExpr, ThisTy,
- /*OpLoc*/ SourceLocation(),
- /*IsArrow*/ true,
- SS,
- /*FirstQualifierInScope*/ 0,
- R, TemplateArgs);
-}
-
bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen) {
@@ -3160,6 +2707,20 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *Op,
Op->getSourceRange(), ExprKind))
return true;
+ if (ExprKind == UETT_SizeOf) {
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(Op->IgnoreParens())) {
+ if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
+ QualType OType = PVD->getOriginalType();
+ QualType Type = PVD->getType();
+ if (Type->isPointerType() && OType->isArrayType()) {
+ Diag(Op->getExprLoc(), diag::warn_sizeof_array_param)
+ << Type << OType;
+ Diag(PVD->getLocation(), diag::note_declared_at);
+ }
+ }
+ }
+ }
+
return false;
}
@@ -3274,6 +2835,12 @@ Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
ExprResult
Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind) {
+ ExprResult PE = CheckPlaceholderExpr(E);
+ if (PE.isInvalid())
+ return ExprError();
+
+ E = PE.get();
+
// Verify that the operand is valid.
bool isInvalid = false;
if (E->isTypeDependent()) {
@@ -3285,10 +2852,6 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
} else if (E->getBitField()) { // C99 6.5.3.4p1.
Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield) << 0;
isInvalid = true;
- } else if (E->getType()->isPlaceholderType()) {
- ExprResult PE = CheckPlaceholderExpr(E);
- if (PE.isInvalid()) return ExprError();
- return CreateUnaryExprOrTypeTraitExpr(PE.take(), OpLoc, ExprKind);
} else {
isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf);
}
@@ -3372,19 +2935,6 @@ Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
return BuildUnaryOp(S, OpLoc, Opc, Input);
}
-/// Expressions of certain arbitrary types are forbidden by C from
-/// having l-value type. These are:
-/// - 'void', but not qualified void
-/// - function types
-///
-/// The exact rule here is C99 6.3.2.1:
-/// An lvalue is an expression with an object type or an incomplete
-/// type other than void.
-static bool IsCForbiddenLValueType(ASTContext &C, QualType T) {
- return ((T->isVoidType() && !T.hasQualifiers()) ||
- T->isFunctionType());
-}
-
ExprResult
Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc) {
@@ -3528,7 +3078,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (ResultType->isVoidType() && !getLangOptions().CPlusPlus) {
// GNU extension: subscripting on pointer to void
- Diag(LLoc, diag::ext_gnu_void_ptr)
+ Diag(LLoc, diag::ext_gnu_subscript_void_type)
<< BaseExpr->getSourceRange();
// C forbids expressions of unqualified void type from being l-values.
@@ -3548,1109 +3098,12 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
}
assert(VK == VK_RValue || LangOpts.CPlusPlus ||
- !IsCForbiddenLValueType(Context, ResultType));
+ !ResultType.isCForbiddenLValueType());
return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
ResultType, VK, OK, RLoc));
}
-/// Check an ext-vector component access expression.
-///
-/// VK should be set in advance to the value kind of the base
-/// expression.
-static QualType
-CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
- SourceLocation OpLoc, const IdentifierInfo *CompName,
- SourceLocation CompLoc) {
- // FIXME: Share logic with ExtVectorElementExpr::containsDuplicateElements,
- // see FIXME there.
- //
- // FIXME: This logic can be greatly simplified by splitting it along
- // halving/not halving and reworking the component checking.
- const ExtVectorType *vecType = baseType->getAs<ExtVectorType>();
-
- // The vector accessor can't exceed the number of elements.
- const char *compStr = CompName->getNameStart();
-
- // This flag determines whether or not the component is one of the four
- // special names that indicate a subset of exactly half the elements are
- // to be selected.
- bool HalvingSwizzle = false;
-
- // This flag determines whether or not CompName has an 's' char prefix,
- // indicating that it is a string of hex values to be used as vector indices.
- bool HexSwizzle = *compStr == 's' || *compStr == 'S';
-
- bool HasRepeated = false;
- bool HasIndex[16] = {};
-
- int Idx;
-
- // Check that we've found one of the special components, or that the component
- // names must come from the same set.
- if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
- !strcmp(compStr, "even") || !strcmp(compStr, "odd")) {
- HalvingSwizzle = true;
- } else if (!HexSwizzle &&
- (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
- do {
- if (HasIndex[Idx]) HasRepeated = true;
- HasIndex[Idx] = true;
- compStr++;
- } while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
- } else {
- if (HexSwizzle) compStr++;
- while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) {
- if (HasIndex[Idx]) HasRepeated = true;
- HasIndex[Idx] = true;
- compStr++;
- }
- }
-
- if (!HalvingSwizzle && *compStr) {
- // We didn't get to the end of the string. This means the component names
- // didn't come from the same set *or* we encountered an illegal name.
- S.Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
- << llvm::StringRef(compStr, 1) << SourceRange(CompLoc);
- return QualType();
- }
-
- // Ensure no component accessor exceeds the width of the vector type it
- // operates on.
- if (!HalvingSwizzle) {
- compStr = CompName->getNameStart();
-
- if (HexSwizzle)
- compStr++;
-
- while (*compStr) {
- if (!vecType->isAccessorWithinNumElements(*compStr++)) {
- S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
- << baseType << SourceRange(CompLoc);
- return QualType();
- }
- }
- }
-
- // The component accessor looks fine - now we need to compute the actual type.
- // The vector type is implied by the component accessor. For example,
- // vec4.b is a float, vec4.xy is a vec2, vec4.rgb is a vec3, etc.
- // vec4.s0 is a float, vec4.s23 is a vec3, etc.
- // vec4.hi, vec4.lo, vec4.e, and vec4.o all return vec2.
- unsigned CompSize = HalvingSwizzle ? (vecType->getNumElements() + 1) / 2
- : CompName->getLength();
- if (HexSwizzle)
- CompSize--;
-
- if (CompSize == 1)
- return vecType->getElementType();
-
- if (HasRepeated) VK = VK_RValue;
-
- QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
- // Now look up the TypeDefDecl from the vector type. Without this,
- // diagostics look bad. We want extended vector types to appear built-in.
- for (unsigned i = 0, E = S.ExtVectorDecls.size(); i != E; ++i) {
- if (S.ExtVectorDecls[i]->getUnderlyingType() == VT)
- return S.Context.getTypedefType(S.ExtVectorDecls[i]);
- }
- return VT; // should never get here (a typedef type should always be found).
-}
-
-static Decl *FindGetterSetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
- IdentifierInfo *Member,
- const Selector &Sel,
- ASTContext &Context) {
- if (Member)
- if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member))
- return PD;
- if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel))
- return OMD;
-
- for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
- E = PDecl->protocol_end(); I != E; ++I) {
- if (Decl *D = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
- Context))
- return D;
- }
- return 0;
-}
-
-static Decl *FindGetterSetterNameDecl(const ObjCObjectPointerType *QIdTy,
- IdentifierInfo *Member,
- const Selector &Sel,
- ASTContext &Context) {
- // Check protocols on qualified interfaces.
- Decl *GDecl = 0;
- for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
- E = QIdTy->qual_end(); I != E; ++I) {
- if (Member)
- if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
- GDecl = PD;
- break;
- }
- // Also must look for a getter or setter name which uses property syntax.
- if (ObjCMethodDecl *OMD = (*I)->getInstanceMethod(Sel)) {
- GDecl = OMD;
- break;
- }
- }
- if (!GDecl) {
- for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
- E = QIdTy->qual_end(); I != E; ++I) {
- // Search in the protocol-qualifier list of current protocol.
- GDecl = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
- Context);
- if (GDecl)
- return GDecl;
- }
- }
- return GDecl;
-}
-
-ExprResult
-Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
- bool IsArrow, SourceLocation OpLoc,
- const CXXScopeSpec &SS,
- NamedDecl *FirstQualifierInScope,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
- // Even in dependent contexts, try to diagnose base expressions with
- // obviously wrong types, e.g.:
- //
- // T* t;
- // t.f;
- //
- // In Obj-C++, however, the above expression is valid, since it could be
- // accessing the 'f' property if T is an Obj-C interface. The extra check
- // allows this, while still reporting an error if T is a struct pointer.
- if (!IsArrow) {
- const PointerType *PT = BaseType->getAs<PointerType>();
- if (PT && (!getLangOptions().ObjC1 ||
- PT->getPointeeType()->isRecordType())) {
- assert(BaseExpr && "cannot happen with implicit member accesses");
- Diag(NameInfo.getLoc(), diag::err_typecheck_member_reference_struct_union)
- << BaseType << BaseExpr->getSourceRange();
- return ExprError();
- }
- }
-
- assert(BaseType->isDependentType() ||
- NameInfo.getName().isDependentName() ||
- isDependentScopeSpecifier(SS));
-
- // Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
- // must have pointer type, and the accessed type is the pointee.
- return Owned(CXXDependentScopeMemberExpr::Create(Context, BaseExpr, BaseType,
- IsArrow, OpLoc,
- SS.getWithLocInContext(Context),
- FirstQualifierInScope,
- NameInfo, TemplateArgs));
-}
-
-/// We know that the given qualified member reference points only to
-/// declarations which do not belong to the static type of the base
-/// expression. Diagnose the problem.
-static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
- Expr *BaseExpr,
- QualType BaseType,
- const CXXScopeSpec &SS,
- NamedDecl *rep,
- const DeclarationNameInfo &nameInfo) {
- // If this is an implicit member access, use a different set of
- // diagnostics.
- if (!BaseExpr)
- return DiagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
-
- SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated)
- << SS.getRange() << rep << BaseType;
-}
-
-// Check whether the declarations we found through a nested-name
-// specifier in a member expression are actually members of the base
-// type. The restriction here is:
-//
-// C++ [expr.ref]p2:
-// ... In these cases, the id-expression shall name a
-// member of the class or of one of its base classes.
-//
-// So it's perfectly legitimate for the nested-name specifier to name
-// an unrelated class, and for us to find an overload set including
-// decls from classes which are not superclasses, as long as the decl
-// we actually pick through overload resolution is from a superclass.
-bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
- QualType BaseType,
- const CXXScopeSpec &SS,
- const LookupResult &R) {
- const RecordType *BaseRT = BaseType->getAs<RecordType>();
- if (!BaseRT) {
- // We can't check this yet because the base type is still
- // dependent.
- assert(BaseType->isDependentType());
- return false;
- }
- CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
-
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
- // If this is an implicit member reference and we find a
- // non-instance member, it's not an error.
- if (!BaseExpr && !(*I)->isCXXInstanceMember())
- return false;
-
- // Note that we use the DC of the decl, not the underlying decl.
- DeclContext *DC = (*I)->getDeclContext();
- while (DC->isTransparentContext())
- DC = DC->getParent();
-
- if (!DC->isRecord())
- continue;
-
- llvm::SmallPtrSet<CXXRecordDecl*,4> MemberRecord;
- MemberRecord.insert(cast<CXXRecordDecl>(DC)->getCanonicalDecl());
-
- if (!IsProvablyNotDerivedFrom(*this, BaseRecord, MemberRecord))
- return false;
- }
-
- DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS,
- R.getRepresentativeDecl(),
- R.getLookupNameInfo());
- return true;
-}
-
-static bool
-LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
- SourceRange BaseRange, const RecordType *RTy,
- SourceLocation OpLoc, CXXScopeSpec &SS,
- bool HasTemplateArgs) {
- RecordDecl *RDecl = RTy->getDecl();
- if (SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
- SemaRef.PDiag(diag::err_typecheck_incomplete_tag)
- << BaseRange))
- return true;
-
- if (HasTemplateArgs) {
- // LookupTemplateName doesn't expect these both to exist simultaneously.
- QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
-
- bool MOUS;
- SemaRef.LookupTemplateName(R, 0, SS, ObjectType, false, MOUS);
- return false;
- }
-
- DeclContext *DC = RDecl;
- if (SS.isSet()) {
- // If the member name was a qualified-id, look into the
- // nested-name-specifier.
- DC = SemaRef.computeDeclContext(SS, false);
-
- if (SemaRef.RequireCompleteDeclContext(SS, DC)) {
- SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag)
- << SS.getRange() << DC;
- return true;
- }
-
- assert(DC && "Cannot handle non-computable dependent contexts in lookup");
-
- if (!isa<TypeDecl>(DC)) {
- SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass)
- << DC << SS.getRange();
- return true;
- }
- }
-
- // The record definition is complete, now look up the member.
- SemaRef.LookupQualifiedName(R, DC);
-
- if (!R.empty())
- return false;
-
- // We didn't find anything with the given name, so try to correct
- // for typos.
- DeclarationName Name = R.getLookupName();
- if (SemaRef.CorrectTypo(R, 0, &SS, DC, false, Sema::CTC_MemberLookup) &&
- !R.empty() &&
- (isa<ValueDecl>(*R.begin()) || isa<FunctionTemplateDecl>(*R.begin()))) {
- SemaRef.Diag(R.getNameLoc(), diag::err_no_member_suggest)
- << Name << DC << R.getLookupName() << SS.getRange()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- R.getLookupName().getAsString());
- if (NamedDecl *ND = R.getAsSingle<NamedDecl>())
- SemaRef.Diag(ND->getLocation(), diag::note_previous_decl)
- << ND->getDeclName();
- return false;
- } else {
- R.clear();
- R.setLookupName(Name);
- }
-
- return false;
-}
-
-ExprResult
-Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
- SourceLocation OpLoc, bool IsArrow,
- CXXScopeSpec &SS,
- NamedDecl *FirstQualifierInScope,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
- if (BaseType->isDependentType() ||
- (SS.isSet() && isDependentScopeSpecifier(SS)))
- return ActOnDependentMemberExpr(Base, BaseType,
- IsArrow, OpLoc,
- SS, FirstQualifierInScope,
- NameInfo, TemplateArgs);
-
- LookupResult R(*this, NameInfo, LookupMemberName);
-
- // Implicit member accesses.
- if (!Base) {
- QualType RecordTy = BaseType;
- if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
- if (LookupMemberExprInRecord(*this, R, SourceRange(),
- RecordTy->getAs<RecordType>(),
- OpLoc, SS, TemplateArgs != 0))
- return ExprError();
-
- // Explicit member accesses.
- } else {
- ExprResult BaseResult = Owned(Base);
- ExprResult Result =
- LookupMemberExpr(R, BaseResult, IsArrow, OpLoc,
- SS, /*ObjCImpDecl*/ 0, TemplateArgs != 0);
-
- if (BaseResult.isInvalid())
- return ExprError();
- Base = BaseResult.take();
-
- if (Result.isInvalid()) {
- Owned(Base);
- return ExprError();
- }
-
- if (Result.get())
- return move(Result);
-
- // LookupMemberExpr can modify Base, and thus change BaseType
- BaseType = Base->getType();
- }
-
- return BuildMemberReferenceExpr(Base, BaseType,
- OpLoc, IsArrow, SS, FirstQualifierInScope,
- R, TemplateArgs);
-}
-
-ExprResult
-Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
- SourceLocation OpLoc, bool IsArrow,
- const CXXScopeSpec &SS,
- NamedDecl *FirstQualifierInScope,
- LookupResult &R,
- const TemplateArgumentListInfo *TemplateArgs,
- bool SuppressQualifierCheck) {
- QualType BaseType = BaseExprType;
- if (IsArrow) {
- assert(BaseType->isPointerType());
- BaseType = BaseType->getAs<PointerType>()->getPointeeType();
- }
- R.setBaseObjectType(BaseType);
-
- const DeclarationNameInfo &MemberNameInfo = R.getLookupNameInfo();
- DeclarationName MemberName = MemberNameInfo.getName();
- SourceLocation MemberLoc = MemberNameInfo.getLoc();
-
- if (R.isAmbiguous())
- return ExprError();
-
- if (R.empty()) {
- // Rederive where we looked up.
- DeclContext *DC = (SS.isSet()
- ? computeDeclContext(SS, false)
- : BaseType->getAs<RecordType>()->getDecl());
-
- Diag(R.getNameLoc(), diag::err_no_member)
- << MemberName << DC
- << (BaseExpr ? BaseExpr->getSourceRange() : SourceRange());
- return ExprError();
- }
-
- // Diagnose lookups that find only declarations from a non-base
- // type. This is possible for either qualified lookups (which may
- // have been qualified with an unrelated type) or implicit member
- // expressions (which were found with unqualified lookup and thus
- // may have come from an enclosing scope). Note that it's okay for
- // lookup to find declarations from a non-base type as long as those
- // aren't the ones picked by overload resolution.
- if ((SS.isSet() || !BaseExpr ||
- (isa<CXXThisExpr>(BaseExpr) &&
- cast<CXXThisExpr>(BaseExpr)->isImplicit())) &&
- !SuppressQualifierCheck &&
- CheckQualifiedMemberReference(BaseExpr, BaseType, SS, R))
- return ExprError();
-
- // Construct an unresolved result if we in fact got an unresolved
- // result.
- if (R.isOverloadedResult() || R.isUnresolvableResult()) {
- // Suppress any lookup-related diagnostics; we'll do these when we
- // pick a member.
- R.suppressDiagnostics();
-
- UnresolvedMemberExpr *MemExpr
- = UnresolvedMemberExpr::Create(Context, R.isUnresolvableResult(),
- BaseExpr, BaseExprType,
- IsArrow, OpLoc,
- SS.getWithLocInContext(Context),
- MemberNameInfo,
- TemplateArgs, R.begin(), R.end());
-
- return Owned(MemExpr);
- }
-
- assert(R.isSingleResult());
- DeclAccessPair FoundDecl = R.begin().getPair();
- NamedDecl *MemberDecl = R.getFoundDecl();
-
- // FIXME: diagnose the presence of template arguments now.
-
- // If the decl being referenced had an error, return an error for this
- // sub-expr without emitting another error, in order to avoid cascading
- // error cases.
- if (MemberDecl->isInvalidDecl())
- return ExprError();
-
- // Handle the implicit-member-access case.
- if (!BaseExpr) {
- // If this is not an instance member, convert to a non-member access.
- if (!MemberDecl->isCXXInstanceMember())
- return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), MemberDecl);
-
- SourceLocation Loc = R.getNameLoc();
- if (SS.getRange().isValid())
- Loc = SS.getRange().getBegin();
- BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
- }
-
- bool ShouldCheckUse = true;
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MemberDecl)) {
- // Don't diagnose the use of a virtual member function unless it's
- // explicitly qualified.
- if (MD->isVirtual() && !SS.isSet())
- ShouldCheckUse = false;
- }
-
- // Check the use of this member.
- if (ShouldCheckUse && DiagnoseUseOfDecl(MemberDecl, MemberLoc)) {
- Owned(BaseExpr);
- return ExprError();
- }
-
- // Perform a property load on the base regardless of whether we
- // actually need it for the declaration.
- if (BaseExpr->getObjectKind() == OK_ObjCProperty) {
- ExprResult Result = ConvertPropertyForRValue(BaseExpr);
- if (Result.isInvalid())
- return ExprError();
- BaseExpr = Result.take();
- }
-
- if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
- return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow,
- SS, FD, FoundDecl, MemberNameInfo);
-
- if (IndirectFieldDecl *FD = dyn_cast<IndirectFieldDecl>(MemberDecl))
- // We may have found a field within an anonymous union or struct
- // (C++ [class.union]).
- return BuildAnonymousStructUnionMemberReference(SS, MemberLoc, FD,
- BaseExpr, OpLoc);
-
- if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
- MarkDeclarationReferenced(MemberLoc, Var);
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- Var, FoundDecl, MemberNameInfo,
- Var->getType().getNonReferenceType(),
- VK_LValue, OK_Ordinary));
- }
-
- if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) {
- ExprValueKind valueKind;
- QualType type;
- if (MemberFn->isInstance()) {
- valueKind = VK_RValue;
- type = Context.BoundMemberTy;
- } else {
- valueKind = VK_LValue;
- type = MemberFn->getType();
- }
-
- MarkDeclarationReferenced(MemberLoc, MemberDecl);
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- MemberFn, FoundDecl, MemberNameInfo,
- type, valueKind, OK_Ordinary));
- }
- assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
-
- if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
- MarkDeclarationReferenced(MemberLoc, MemberDecl);
- return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
- Enum, FoundDecl, MemberNameInfo,
- Enum->getType(), VK_RValue, OK_Ordinary));
- }
-
- Owned(BaseExpr);
-
- // We found something that we didn't expect. Complain.
- if (isa<TypeDecl>(MemberDecl))
- Diag(MemberLoc, diag::err_typecheck_member_reference_type)
- << MemberName << BaseType << int(IsArrow);
- else
- Diag(MemberLoc, diag::err_typecheck_member_reference_unknown)
- << MemberName << BaseType << int(IsArrow);
-
- Diag(MemberDecl->getLocation(), diag::note_member_declared_here)
- << MemberName;
- R.suppressDiagnostics();
- return ExprError();
-}
-
-/// Given that normal member access failed on the given expression,
-/// and given that the expression's type involves builtin-id or
-/// builtin-Class, decide whether substituting in the redefinition
-/// types would be profitable. The redefinition type is whatever
-/// this translation unit tried to typedef to id/Class; we store
-/// it to the side and then re-use it in places like this.
-static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) {
- const ObjCObjectPointerType *opty
- = base.get()->getType()->getAs<ObjCObjectPointerType>();
- if (!opty) return false;
-
- const ObjCObjectType *ty = opty->getObjectType();
-
- QualType redef;
- if (ty->isObjCId()) {
- redef = S.Context.ObjCIdRedefinitionType;
- } else if (ty->isObjCClass()) {
- redef = S.Context.ObjCClassRedefinitionType;
- } else {
- return false;
- }
-
- // Do the substitution as long as the redefinition type isn't just a
- // possibly-qualified pointer to builtin-id or builtin-Class again.
- opty = redef->getAs<ObjCObjectPointerType>();
- if (opty && !opty->getObjectType()->getInterface() != 0)
- return false;
-
- base = S.ImpCastExprToType(base.take(), redef, CK_BitCast);
- return true;
-}
-
-/// Look up the given member of the given non-type-dependent
-/// expression. This can return in one of two ways:
-/// * If it returns a sentinel null-but-valid result, the caller will
-/// assume that lookup was performed and the results written into
-/// the provided structure. It will take over from there.
-/// * Otherwise, the returned expression will be produced in place of
-/// an ordinary member expression.
-///
-/// The ObjCImpDecl bit is a gross hack that will need to be properly
-/// fixed for ObjC++.
-ExprResult
-Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
- bool &IsArrow, SourceLocation OpLoc,
- CXXScopeSpec &SS,
- Decl *ObjCImpDecl, bool HasTemplateArgs) {
- assert(BaseExpr.get() && "no base expression");
-
- // Perform default conversions.
- BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
-
- if (IsArrow) {
- BaseExpr = DefaultLvalueConversion(BaseExpr.take());
- if (BaseExpr.isInvalid())
- return ExprError();
- }
-
- QualType BaseType = BaseExpr.get()->getType();
- assert(!BaseType->isDependentType());
-
- DeclarationName MemberName = R.getLookupName();
- SourceLocation MemberLoc = R.getNameLoc();
-
- // For later type-checking purposes, turn arrow accesses into dot
- // accesses. The only access type we support that doesn't follow
- // the C equivalence "a->b === (*a).b" is ObjC property accesses,
- // and those never use arrows, so this is unaffected.
- if (IsArrow) {
- if (const PointerType *Ptr = BaseType->getAs<PointerType>())
- BaseType = Ptr->getPointeeType();
- else if (const ObjCObjectPointerType *Ptr
- = BaseType->getAs<ObjCObjectPointerType>())
- BaseType = Ptr->getPointeeType();
- else if (BaseType->isRecordType()) {
- // Recover from arrow accesses to records, e.g.:
- // struct MyRecord foo;
- // foo->bar
- // This is actually well-formed in C++ if MyRecord has an
- // overloaded operator->, but that should have been dealt with
- // by now.
- Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
- << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
- << FixItHint::CreateReplacement(OpLoc, ".");
- IsArrow = false;
- } else if (BaseType == Context.BoundMemberTy) {
- goto fail;
- } else {
- Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
- << BaseType << BaseExpr.get()->getSourceRange();
- return ExprError();
- }
- }
-
- // Handle field access to simple records.
- if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
- if (LookupMemberExprInRecord(*this, R, BaseExpr.get()->getSourceRange(),
- RTy, OpLoc, SS, HasTemplateArgs))
- return ExprError();
-
- // Returning valid-but-null is how we indicate to the caller that
- // the lookup result was filled in.
- return Owned((Expr*) 0);
- }
-
- // Handle ivar access to Objective-C objects.
- if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) {
- IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
-
- // There are three cases for the base type:
- // - builtin id (qualified or unqualified)
- // - builtin Class (qualified or unqualified)
- // - an interface
- ObjCInterfaceDecl *IDecl = OTy->getInterface();
- if (!IDecl) {
- // There's an implicit 'isa' ivar on all objects.
- // But we only actually find it this way on objects of type 'id',
- // apparently.
- if (OTy->isObjCId() && Member->isStr("isa"))
- return Owned(new (Context) ObjCIsaExpr(BaseExpr.take(), IsArrow, MemberLoc,
- Context.getObjCClassType()));
-
- if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
- goto fail;
- }
-
- ObjCInterfaceDecl *ClassDeclared;
- ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
-
- if (!IV) {
- // Attempt to correct for typos in ivar names.
- LookupResult Res(*this, R.getLookupName(), R.getNameLoc(),
- LookupMemberName);
- if (CorrectTypo(Res, 0, 0, IDecl, false,
- IsArrow ? CTC_ObjCIvarLookup
- : CTC_ObjCPropertyLookup) &&
- (IV = Res.getAsSingle<ObjCIvarDecl>())) {
- Diag(R.getNameLoc(),
- diag::err_typecheck_member_reference_ivar_suggest)
- << IDecl->getDeclName() << MemberName << IV->getDeclName()
- << FixItHint::CreateReplacement(R.getNameLoc(),
- IV->getNameAsString());
- Diag(IV->getLocation(), diag::note_previous_decl)
- << IV->getDeclName();
- } else {
- Res.clear();
- Res.setLookupName(Member);
-
- Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
- << IDecl->getDeclName() << MemberName
- << BaseExpr.get()->getSourceRange();
- return ExprError();
- }
- }
-
- // If the decl being referenced had an error, return an error for this
- // sub-expr without emitting another error, in order to avoid cascading
- // error cases.
- if (IV->isInvalidDecl())
- return ExprError();
-
- // Check whether we can reference this field.
- if (DiagnoseUseOfDecl(IV, MemberLoc))
- return ExprError();
- if (IV->getAccessControl() != ObjCIvarDecl::Public &&
- IV->getAccessControl() != ObjCIvarDecl::Package) {
- ObjCInterfaceDecl *ClassOfMethodDecl = 0;
- if (ObjCMethodDecl *MD = getCurMethodDecl())
- ClassOfMethodDecl = MD->getClassInterface();
- else if (ObjCImpDecl && getCurFunctionDecl()) {
- // Case of a c-function declared inside an objc implementation.
- // FIXME: For a c-style function nested inside an objc implementation
- // class, there is no implementation context available, so we pass
- // down the context as argument to this routine. Ideally, this context
- // need be passed down in the AST node and somehow calculated from the
- // AST for a function decl.
- if (ObjCImplementationDecl *IMPD =
- dyn_cast<ObjCImplementationDecl>(ObjCImpDecl))
- ClassOfMethodDecl = IMPD->getClassInterface();
- else if (ObjCCategoryImplDecl* CatImplClass =
- dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
- ClassOfMethodDecl = CatImplClass->getClassInterface();
- }
-
- if (IV->getAccessControl() == ObjCIvarDecl::Private) {
- if (ClassDeclared != IDecl ||
- ClassOfMethodDecl != ClassDeclared)
- Diag(MemberLoc, diag::error_private_ivar_access)
- << IV->getDeclName();
- } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
- // @protected
- Diag(MemberLoc, diag::error_protected_ivar_access)
- << IV->getDeclName();
- }
-
- return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
- MemberLoc, BaseExpr.take(),
- IsArrow));
- }
-
- // Objective-C property access.
- const ObjCObjectPointerType *OPT;
- if (!IsArrow && (OPT = BaseType->getAs<ObjCObjectPointerType>())) {
- // This actually uses the base as an r-value.
- BaseExpr = DefaultLvalueConversion(BaseExpr.take());
- if (BaseExpr.isInvalid())
- return ExprError();
-
- assert(Context.hasSameUnqualifiedType(BaseType, BaseExpr.get()->getType()));
-
- IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
-
- const ObjCObjectType *OT = OPT->getObjectType();
-
- // id, with and without qualifiers.
- if (OT->isObjCId()) {
- // Check protocols on qualified interfaces.
- Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
- if (Decl *PMDecl = FindGetterSetterNameDecl(OPT, Member, Sel, Context)) {
- if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
- // Check the use of this declaration
- if (DiagnoseUseOfDecl(PD, MemberLoc))
- return ExprError();
-
- QualType T = PD->getType();
- if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
- T = getMessageSendResultType(BaseType, Getter, false, false);
-
- return Owned(new (Context) ObjCPropertyRefExpr(PD, T,
- VK_LValue,
- OK_ObjCProperty,
- MemberLoc,
- BaseExpr.take()));
- }
-
- if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
- // Check the use of this method.
- if (DiagnoseUseOfDecl(OMD, MemberLoc))
- return ExprError();
- Selector SetterSel =
- SelectorTable::constructSetterName(PP.getIdentifierTable(),
- PP.getSelectorTable(), Member);
- ObjCMethodDecl *SMD = 0;
- if (Decl *SDecl = FindGetterSetterNameDecl(OPT, /*Property id*/0,
- SetterSel, Context))
- SMD = dyn_cast<ObjCMethodDecl>(SDecl);
- QualType PType = getMessageSendResultType(BaseType, OMD, false,
- false);
-
- ExprValueKind VK = VK_LValue;
- if (!getLangOptions().CPlusPlus &&
- IsCForbiddenLValueType(Context, PType))
- VK = VK_RValue;
- ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
-
- return Owned(new (Context) ObjCPropertyRefExpr(OMD, SMD, PType,
- VK, OK,
- MemberLoc, BaseExpr.take()));
- }
- }
- // Use of id.member can only be for a property reference. Do not
- // use the 'id' redefinition in this case.
- if (IsArrow && ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
-
- return ExprError(Diag(MemberLoc, diag::err_property_not_found)
- << MemberName << BaseType);
- }
-
- // 'Class', unqualified only.
- if (OT->isObjCClass()) {
- // Only works in a method declaration (??!).
- ObjCMethodDecl *MD = getCurMethodDecl();
- if (!MD) {
- if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
-
- goto fail;
- }
-
- // Also must look for a getter name which uses property syntax.
- Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
- ObjCInterfaceDecl *IFace = MD->getClassInterface();
- ObjCMethodDecl *Getter;
- if ((Getter = IFace->lookupClassMethod(Sel))) {
- // Check the use of this method.
- if (DiagnoseUseOfDecl(Getter, MemberLoc))
- return ExprError();
- } else
- Getter = IFace->lookupPrivateMethod(Sel, false);
- // If we found a getter then this may be a valid dot-reference, we
- // will look for the matching setter, in case it is needed.
- Selector SetterSel =
- SelectorTable::constructSetterName(PP.getIdentifierTable(),
- PP.getSelectorTable(), Member);
- ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
- if (!Setter) {
- // If this reference is in an @implementation, also check for 'private'
- // methods.
- Setter = IFace->lookupPrivateMethod(SetterSel, false);
- }
- // Look through local category implementations associated with the class.
- if (!Setter)
- Setter = IFace->getCategoryClassMethod(SetterSel);
-
- if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
- return ExprError();
-
- if (Getter || Setter) {
- QualType PType;
-
- ExprValueKind VK = VK_LValue;
- if (Getter) {
- PType = getMessageSendResultType(QualType(OT, 0), Getter, true,
- false);
- if (!getLangOptions().CPlusPlus &&
- IsCForbiddenLValueType(Context, PType))
- VK = VK_RValue;
- } else {
- // Get the expression type from Setter's incoming parameter.
- PType = (*(Setter->param_end() -1))->getType();
- }
- ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
-
- // FIXME: we must check that the setter has property type.
- return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
- PType, VK, OK,
- MemberLoc, BaseExpr.take()));
- }
-
- if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
-
- return ExprError(Diag(MemberLoc, diag::err_property_not_found)
- << MemberName << BaseType);
- }
-
- // Normal property access.
- return HandleExprPropertyRefExpr(OPT, BaseExpr.get(), MemberName, MemberLoc,
- SourceLocation(), QualType(), false);
- }
-
- // Handle 'field access' to vectors, such as 'V.xx'.
- if (BaseType->isExtVectorType()) {
- // FIXME: this expr should store IsArrow.
- IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
- ExprValueKind VK = (IsArrow ? VK_LValue : BaseExpr.get()->getValueKind());
- QualType ret = CheckExtVectorComponent(*this, BaseType, VK, OpLoc,
- Member, MemberLoc);
- if (ret.isNull())
- return ExprError();
-
- return Owned(new (Context) ExtVectorElementExpr(ret, VK, BaseExpr.take(),
- *Member, MemberLoc));
- }
-
- // Adjust builtin-sel to the appropriate redefinition type if that's
- // not just a pointer to builtin-sel again.
- if (IsArrow &&
- BaseType->isSpecificBuiltinType(BuiltinType::ObjCSel) &&
- !Context.ObjCSelRedefinitionType->isObjCSelType()) {
- BaseExpr = ImpCastExprToType(BaseExpr.take(), Context.ObjCSelRedefinitionType,
- CK_BitCast);
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
- }
-
- // Failure cases.
- fail:
-
- // Recover from dot accesses to pointers, e.g.:
- // type *foo;
- // foo.bar
- // This is actually well-formed in two cases:
- // - 'type' is an Objective C type
- // - 'bar' is a pseudo-destructor name which happens to refer to
- // the appropriate pointer type
- if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
- if (!IsArrow && Ptr->getPointeeType()->isRecordType() &&
- MemberName.getNameKind() != DeclarationName::CXXDestructorName) {
- Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
- << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
- << FixItHint::CreateReplacement(OpLoc, "->");
-
- // Recurse as an -> access.
- IsArrow = true;
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
- }
- }
-
- // If the user is trying to apply -> or . to a function name, it's probably
- // because they forgot parentheses to call that function.
- QualType ZeroArgCallTy;
- UnresolvedSet<4> Overloads;
- if (isExprCallable(*BaseExpr.get(), ZeroArgCallTy, Overloads)) {
- if (ZeroArgCallTy.isNull()) {
- Diag(BaseExpr.get()->getExprLoc(), diag::err_member_reference_needs_call)
- << (Overloads.size() > 1) << 0 << BaseExpr.get()->getSourceRange();
- UnresolvedSet<2> PlausibleOverloads;
- for (OverloadExpr::decls_iterator It = Overloads.begin(),
- DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
- const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
- QualType OverloadResultTy = OverloadDecl->getResultType();
- if ((!IsArrow && OverloadResultTy->isRecordType()) ||
- (IsArrow && OverloadResultTy->isPointerType() &&
- OverloadResultTy->getPointeeType()->isRecordType()))
- PlausibleOverloads.addDecl(It.getDecl());
- }
- NoteOverloads(PlausibleOverloads, BaseExpr.get()->getExprLoc());
- return ExprError();
- }
- if ((!IsArrow && ZeroArgCallTy->isRecordType()) ||
- (IsArrow && ZeroArgCallTy->isPointerType() &&
- ZeroArgCallTy->getPointeeType()->isRecordType())) {
- // At this point, we know BaseExpr looks like it's potentially callable
- // with 0 arguments, and that it returns something of a reasonable type,
- // so we can emit a fixit and carry on pretending that BaseExpr was
- // actually a CallExpr.
- SourceLocation ParenInsertionLoc =
- PP.getLocForEndOfToken(BaseExpr.get()->getLocEnd());
- Diag(BaseExpr.get()->getExprLoc(), diag::err_member_reference_needs_call)
- << (Overloads.size() > 1) << 1 << BaseExpr.get()->getSourceRange()
- << FixItHint::CreateInsertion(ParenInsertionLoc, "()");
- // FIXME: Try this before emitting the fixit, and suppress diagnostics
- // while doing so.
- ExprResult NewBase =
- ActOnCallExpr(0, BaseExpr.take(), ParenInsertionLoc,
- MultiExprArg(*this, 0, 0),
- ParenInsertionLoc.getFileLocWithOffset(1));
- if (NewBase.isInvalid())
- return ExprError();
- BaseExpr = NewBase;
- BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
- return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
- ObjCImpDecl, HasTemplateArgs);
- }
- }
-
- Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
- << BaseType << BaseExpr.get()->getSourceRange();
-
- return ExprError();
-}
-
-/// The main callback when the parser finds something like
-/// expression . [nested-name-specifier] identifier
-/// expression -> [nested-name-specifier] identifier
-/// where 'identifier' encompasses a fairly broad spectrum of
-/// possibilities, including destructor and operator references.
-///
-/// \param OpKind either tok::arrow or tok::period
-/// \param HasTrailingLParen whether the next token is '(', which
-/// is used to diagnose mis-uses of special members that can
-/// only be called
-/// \param ObjCImpDecl the current ObjC @implementation decl;
-/// this is an ugly hack around the fact that ObjC @implementations
-/// aren't properly put in the context chain
-ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
- SourceLocation OpLoc,
- tok::TokenKind OpKind,
- CXXScopeSpec &SS,
- UnqualifiedId &Id,
- Decl *ObjCImpDecl,
- bool HasTrailingLParen) {
- if (SS.isSet() && SS.isInvalid())
- return ExprError();
-
- // Warn about the explicit constructor calls Microsoft extension.
- if (getLangOptions().Microsoft &&
- Id.getKind() == UnqualifiedId::IK_ConstructorName)
- Diag(Id.getSourceRange().getBegin(),
- diag::ext_ms_explicit_constructor_call);
-
- TemplateArgumentListInfo TemplateArgsBuffer;
-
- // Decompose the name into its component parts.
- DeclarationNameInfo NameInfo;
- const TemplateArgumentListInfo *TemplateArgs;
- DecomposeUnqualifiedId(*this, Id, TemplateArgsBuffer,
- NameInfo, TemplateArgs);
-
- DeclarationName Name = NameInfo.getName();
- bool IsArrow = (OpKind == tok::arrow);
-
- NamedDecl *FirstQualifierInScope
- = (!SS.isSet() ? 0 : FindFirstQualifierInScope(S,
- static_cast<NestedNameSpecifier*>(SS.getScopeRep())));
-
- // This is a postfix expression, so get rid of ParenListExprs.
- ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
- if (Result.isInvalid()) return ExprError();
- Base = Result.take();
-
- if (Base->getType()->isDependentType() || Name.isDependentName() ||
- isDependentScopeSpecifier(SS)) {
- Result = ActOnDependentMemberExpr(Base, Base->getType(),
- IsArrow, OpLoc,
- SS, FirstQualifierInScope,
- NameInfo, TemplateArgs);
- } else {
- LookupResult R(*this, NameInfo, LookupMemberName);
- ExprResult BaseResult = Owned(Base);
- Result = LookupMemberExpr(R, BaseResult, IsArrow, OpLoc,
- SS, ObjCImpDecl, TemplateArgs != 0);
- if (BaseResult.isInvalid())
- return ExprError();
- Base = BaseResult.take();
-
- if (Result.isInvalid()) {
- Owned(Base);
- return ExprError();
- }
-
- if (Result.get()) {
- // The only way a reference to a destructor can be used is to
- // immediately call it, which falls into this case. If the
- // next token is not a '(', produce a diagnostic and build the
- // call now.
- if (!HasTrailingLParen &&
- Id.getKind() == UnqualifiedId::IK_DestructorName)
- return DiagnoseDtorReference(NameInfo.getLoc(), Result.get());
-
- return move(Result);
- }
-
- Result = BuildMemberReferenceExpr(Base, Base->getType(),
- OpLoc, IsArrow, SS, FirstQualifierInScope,
- R, TemplateArgs);
- }
-
- return move(Result);
-}
-
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param) {
@@ -4716,6 +3169,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
MarkDeclarationReferenced(Param->getDefaultArg()->getLocStart(),
const_cast<CXXDestructorDecl*>(Temporary->getDestructor()));
ExprTemporaries.push_back(Temporary);
+ ExprNeedsCleanups = true;
}
// We already type-checked the argument, so we know it works.
@@ -4834,7 +3288,8 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
InitializedEntity Entity =
Param? InitializedEntity::InitializeParameter(Context, Param)
- : InitializedEntity::InitializeParameter(Context, ProtoArgType);
+ : InitializedEntity::InitializeParameter(Context, ProtoArgType,
+ Proto->isArgConsumed(i));
ExprResult ArgE = PerformCopyInitialization(Entity,
SourceLocation(),
Owned(Arg));
@@ -5157,7 +3612,8 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (Proto && i < Proto->getNumArgs()) {
InitializedEntity Entity
= InitializedEntity::InitializeParameter(Context,
- Proto->getArgType(i));
+ Proto->getArgType(i),
+ Proto->isArgConsumed(i));
ExprResult ArgE = PerformCopyInitialization(Entity,
SourceLocation(),
Owned(Arg));
@@ -5248,8 +3704,8 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
InitializedEntity Entity
= InitializedEntity::InitializeTemporary(literalType);
InitializationKind Kind
- = InitializationKind::CreateCast(SourceRange(LParenLoc, RParenLoc),
- /*IsCStyleCast=*/true);
+ = InitializationKind::CreateCStyleCast(LParenLoc,
+ SourceRange(LParenLoc, RParenLoc));
InitializationSequence InitSeq(*this, Entity, Kind, &literalExpr, 1);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
MultiExprArg(*this, &literalExpr, 1),
@@ -5267,8 +3723,9 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
// In C, compound literals are l-values for some reason.
ExprValueKind VK = getLangOptions().CPlusPlus ? VK_RValue : VK_LValue;
- return Owned(new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
- VK, literalExpr, isFileScope));
+ return MaybeBindToTemporary(
+ new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ VK, literalExpr, isFileScope));
}
ExprResult
@@ -5426,14 +3883,15 @@ static CastKind PrepareScalarCast(Sema &S, ExprResult &Src, QualType DestTy) {
}
/// CheckCastTypes - Check type constraints for casting between types.
-ExprResult Sema::CheckCastTypes(SourceRange TyR, QualType castType,
- Expr *castExpr, CastKind& Kind, ExprValueKind &VK,
+ExprResult Sema::CheckCastTypes(SourceLocation CastStartLoc, SourceRange TyR,
+ QualType castType, Expr *castExpr,
+ CastKind& Kind, ExprValueKind &VK,
CXXCastPath &BasePath, bool FunctionalStyle) {
if (castExpr->getType() == Context.UnknownAnyTy)
return checkUnknownAnyCast(TyR, castType, castExpr, Kind, VK, BasePath);
if (getLangOptions().CPlusPlus)
- return CXXCheckCStyleCast(SourceRange(TyR.getBegin(),
+ return CXXCheckCStyleCast(SourceRange(CastStartLoc,
castExpr->getLocEnd()),
castType, VK, castExpr, Kind, BasePath,
FunctionalStyle);
@@ -5551,8 +4009,8 @@ ExprResult Sema::CheckCastTypes(SourceRange TyR, QualType castType,
// If either type is a pointer, the other type has to be either an
// integer or a pointer.
+ QualType castExprType = castExpr->getType();
if (!castType->isArithmeticType()) {
- QualType castExprType = castExpr->getType();
if (!castExprType->isIntegralType(Context) &&
castExprType->isArithmeticType()) {
Diag(castExpr->getLocStart(),
@@ -5568,6 +4026,36 @@ ExprResult Sema::CheckCastTypes(SourceRange TyR, QualType castType,
}
}
+ if (getLangOptions().ObjCAutoRefCount) {
+ // Diagnose problems with Objective-C casts involving lifetime qualifiers.
+ CheckObjCARCConversion(SourceRange(CastStartLoc, castExpr->getLocEnd()),
+ castType, castExpr, CCK_CStyleCast);
+
+ if (const PointerType *CastPtr = castType->getAs<PointerType>()) {
+ if (const PointerType *ExprPtr = castExprType->getAs<PointerType>()) {
+ Qualifiers CastQuals = CastPtr->getPointeeType().getQualifiers();
+ Qualifiers ExprQuals = ExprPtr->getPointeeType().getQualifiers();
+ if (CastPtr->getPointeeType()->isObjCLifetimeType() &&
+ ExprPtr->getPointeeType()->isObjCLifetimeType() &&
+ !CastQuals.compatiblyIncludesObjCLifetime(ExprQuals)) {
+ Diag(castExpr->getLocStart(),
+ diag::err_typecheck_incompatible_ownership)
+ << castExprType << castType << AA_Casting
+ << castExpr->getSourceRange();
+
+ return ExprError();
+ }
+ }
+ }
+ else if (!CheckObjCARCUnavailableWeakConversion(castType, castExprType)) {
+ Diag(castExpr->getLocStart(),
+ diag::err_arc_convesion_of_weak_unavailable) << 1
+ << castExprType << castType
+ << castExpr->getSourceRange();
+ return ExprError();
+ }
+ }
+
castExprRes = Owned(castExpr);
Kind = PrepareScalarCast(*this, castExprRes, castType);
if (castExprRes.isInvalid())
@@ -5638,20 +4126,57 @@ ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy,
}
ExprResult
-Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc, ParsedType Ty,
+Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
+ Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *castExpr) {
- assert((Ty != 0) && (castExpr != 0) &&
+ assert(!D.isInvalidType() && (castExpr != 0) &&
"ActOnCastExpr(): missing type or expr");
- TypeSourceInfo *castTInfo;
- QualType castType = GetTypeFromParser(Ty, &castTInfo);
- if (!castTInfo)
- castTInfo = Context.getTrivialTypeSourceInfo(castType);
+ TypeSourceInfo *castTInfo = GetTypeForDeclaratorCast(D, castExpr->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ if (getLangOptions().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ QualType castType = castTInfo->getType();
+ Ty = CreateParsedType(castType, castTInfo);
+
+ bool isVectorLiteral = false;
+
+ // Check for an altivec or OpenCL literal,
+ // i.e. all the elements are integer constants.
+ ParenExpr *PE = dyn_cast<ParenExpr>(castExpr);
+ ParenListExpr *PLE = dyn_cast<ParenListExpr>(castExpr);
+ if (getLangOptions().AltiVec && castType->isVectorType() && (PE || PLE)) {
+ if (PLE && PLE->getNumExprs() == 0) {
+ Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer);
+ return ExprError();
+ }
+ if (PE || PLE->getNumExprs() == 1) {
+ Expr *E = (PE ? PE->getSubExpr() : PLE->getExpr(0));
+ if (!E->getType()->isVectorType())
+ isVectorLiteral = true;
+ }
+ else
+ isVectorLiteral = true;
+ }
+
+ // If this is a vector initializer, '(' type ')' '(' init, ..., init ')'
+ // then handle it as such.
+ if (isVectorLiteral)
+ return BuildVectorLiteral(LParenLoc, RParenLoc, castExpr, castTInfo);
// If the Expr being casted is a ParenListExpr, handle it specially.
- if (isa<ParenListExpr>(castExpr))
- return ActOnCastOfParenListExpr(S, LParenLoc, RParenLoc, castExpr,
- castTInfo);
+ // This is not an AltiVec-style cast, so turn the ParenListExpr into a
+ // sequence of BinOp comma operators.
+ if (isa<ParenListExpr>(castExpr)) {
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, castExpr);
+ if (Result.isInvalid()) return ExprError();
+ castExpr = Result.take();
+ }
return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, castExpr);
}
@@ -5663,8 +4188,8 @@ Sema::BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty,
ExprValueKind VK = VK_RValue;
CXXCastPath BasePath;
ExprResult CastResult =
- CheckCastTypes(SourceRange(LParenLoc, RParenLoc), Ty->getType(), castExpr,
- Kind, VK, BasePath);
+ CheckCastTypes(LParenLoc, SourceRange(LParenLoc, RParenLoc), Ty->getType(),
+ castExpr, Kind, VK, BasePath);
if (CastResult.isInvalid())
return ExprError();
castExpr = CastResult.take();
@@ -5675,6 +4200,80 @@ Sema::BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty,
LParenLoc, RParenLoc));
}
+ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
+ SourceLocation RParenLoc, Expr *E,
+ TypeSourceInfo *TInfo) {
+ assert((isa<ParenListExpr>(E) || isa<ParenExpr>(E)) &&
+ "Expected paren or paren list expression");
+
+ Expr **exprs;
+ unsigned numExprs;
+ Expr *subExpr;
+ if (ParenListExpr *PE = dyn_cast<ParenListExpr>(E)) {
+ exprs = PE->getExprs();
+ numExprs = PE->getNumExprs();
+ } else {
+ subExpr = cast<ParenExpr>(E)->getSubExpr();
+ exprs = &subExpr;
+ numExprs = 1;
+ }
+
+ QualType Ty = TInfo->getType();
+ assert(Ty->isVectorType() && "Expected vector type");
+
+ llvm::SmallVector<Expr *, 8> initExprs;
+ const VectorType *VTy = Ty->getAs<VectorType>();
+ unsigned numElems = Ty->getAs<VectorType>()->getNumElements();
+
+ // '(...)' form of vector initialization in AltiVec: the number of
+ // initializers must be one or must match the size of the vector.
+ // If a single value is specified in the initializer then it will be
+ // replicated to all the components of the vector
+ if (VTy->getVectorKind() == VectorType::AltiVecVector) {
+ // The number of initializers must be one or must match the size of the
+ // vector. If a single value is specified in the initializer then it will
+ // be replicated to all the components of the vector
+ if (numExprs == 1) {
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ ExprResult Literal = Owned(exprs[0]);
+ Literal = ImpCastExprToType(Literal.take(), ElemTy,
+ PrepareScalarCast(*this, Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
+ }
+ else if (numExprs < numElems) {
+ Diag(E->getExprLoc(),
+ diag::err_incorrect_number_of_vector_initializers);
+ return ExprError();
+ }
+ else
+ for (unsigned i = 0, e = numExprs; i != e; ++i)
+ initExprs.push_back(exprs[i]);
+ }
+ else {
+ // For OpenCL, when the number of initializers is a single value,
+ // it will be replicated to all components of the vector.
+ if (getLangOptions().OpenCL &&
+ VTy->getVectorKind() == VectorType::GenericVector &&
+ numExprs == 1) {
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ ExprResult Literal = Owned(exprs[0]);
+ Literal = ImpCastExprToType(Literal.take(), ElemTy,
+ PrepareScalarCast(*this, Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
+ }
+
+ for (unsigned i = 0, e = numExprs; i != e; ++i)
+ initExprs.push_back(exprs[i]);
+ }
+ // FIXME: This means that pretty-printing the final AST will produce curly
+ // braces instead of the original commas.
+ InitListExpr *initE = new (Context) InitListExpr(Context, LParenLoc,
+ &initExprs[0],
+ initExprs.size(), RParenLoc);
+ initE->setType(Ty);
+ return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE);
+}
+
/// This is not an AltiVec-style cast, so turn the ParenListExpr into a sequence
/// of comma binary operators.
ExprResult
@@ -5694,91 +4293,18 @@ Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *expr) {
return ActOnParenExpr(E->getLParenLoc(), E->getRParenLoc(), Result.get());
}
-ExprResult
-Sema::ActOnCastOfParenListExpr(Scope *S, SourceLocation LParenLoc,
- SourceLocation RParenLoc, Expr *Op,
- TypeSourceInfo *TInfo) {
- ParenListExpr *PE = cast<ParenListExpr>(Op);
- QualType Ty = TInfo->getType();
- bool isVectorLiteral = false;
-
- // Check for an altivec or OpenCL literal,
- // i.e. all the elements are integer constants.
- if (getLangOptions().AltiVec && Ty->isVectorType()) {
- if (PE->getNumExprs() == 0) {
- Diag(PE->getExprLoc(), diag::err_altivec_empty_initializer);
- return ExprError();
- }
- if (PE->getNumExprs() == 1) {
- if (!PE->getExpr(0)->getType()->isVectorType())
- isVectorLiteral = true;
- }
- else
- isVectorLiteral = true;
- }
-
- // If this is a vector initializer, '(' type ')' '(' init, ..., init ')'
- // then handle it as such.
- if (isVectorLiteral) {
- llvm::SmallVector<Expr *, 8> initExprs;
- // '(...)' form of vector initialization in AltiVec: the number of
- // initializers must be one or must match the size of the vector.
- // If a single value is specified in the initializer then it will be
- // replicated to all the components of the vector
- if (Ty->getAs<VectorType>()->getVectorKind() ==
- VectorType::AltiVecVector) {
- unsigned numElems = Ty->getAs<VectorType>()->getNumElements();
- // The number of initializers must be one or must match the size of the
- // vector. If a single value is specified in the initializer then it will
- // be replicated to all the components of the vector
- if (PE->getNumExprs() == 1) {
- QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
- ExprResult Literal = Owned(PE->getExpr(0));
- Literal = ImpCastExprToType(Literal.take(), ElemTy,
- PrepareScalarCast(*this, Literal, ElemTy));
- return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
- }
- else if (PE->getNumExprs() < numElems) {
- Diag(PE->getExprLoc(),
- diag::err_incorrect_number_of_vector_initializers);
- return ExprError();
- }
- else
- for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i)
- initExprs.push_back(PE->getExpr(i));
- }
- else
- for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i)
- initExprs.push_back(PE->getExpr(i));
-
- // FIXME: This means that pretty-printing the final AST will produce curly
- // braces instead of the original commas.
- InitListExpr *E = new (Context) InitListExpr(Context, LParenLoc,
- &initExprs[0],
- initExprs.size(), RParenLoc);
- E->setType(Ty);
- return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, E);
- } else {
- // This is not an AltiVec-style cast, so turn the ParenListExpr into a
- // sequence of BinOp comma operators.
- ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Op);
- if (Result.isInvalid()) return ExprError();
- return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Result.take());
- }
-}
-
ExprResult Sema::ActOnParenOrParenListExpr(SourceLocation L,
SourceLocation R,
- MultiExprArg Val,
- ParsedType TypeOfCast) {
+ MultiExprArg Val) {
unsigned nexprs = Val.size();
Expr **exprs = reinterpret_cast<Expr**>(Val.release());
assert((exprs != 0) && "ActOnParenOrParenListExpr() missing expr list");
Expr *expr;
- if (nexprs == 1 && TypeOfCast && !TypeIsVectorType(TypeOfCast))
+ if (nexprs == 1)
expr = new (Context) ParenExpr(L, R, exprs[0]);
else
- expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R);
+ expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R,
+ exprs[nexprs-1]->getType());
return Owned(expr);
}
@@ -5876,7 +4402,7 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprR
// Now check the two expressions.
if (LHSTy->isVectorType() || RHSTy->isVectorType())
- return CheckVectorOperands(QuestionLoc, LHS, RHS);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
// OpenCL: If the condition is a vector, and both operands are scalar,
// attempt to implicity convert them to the vector type to act like the
@@ -6203,43 +4729,21 @@ QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
-/// SuggestParentheses - Emit a diagnostic together with a fixit hint that wraps
+/// SuggestParentheses - Emit a note with a fixit hint that wraps
/// ParenRange in parentheses.
static void SuggestParentheses(Sema &Self, SourceLocation Loc,
- const PartialDiagnostic &PD,
- const PartialDiagnostic &FirstNote,
- SourceRange FirstParenRange,
- const PartialDiagnostic &SecondNote,
- SourceRange SecondParenRange) {
- Self.Diag(Loc, PD);
-
- if (!FirstNote.getDiagID())
- return;
-
- SourceLocation EndLoc = Self.PP.getLocForEndOfToken(FirstParenRange.getEnd());
- if (!FirstParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
- // We can't display the parentheses, so just return.
- return;
- }
-
- Self.Diag(Loc, FirstNote)
- << FixItHint::CreateInsertion(FirstParenRange.getBegin(), "(")
- << FixItHint::CreateInsertion(EndLoc, ")");
-
- if (!SecondNote.getDiagID())
- return;
-
- EndLoc = Self.PP.getLocForEndOfToken(SecondParenRange.getEnd());
- if (!SecondParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
- // We can't display the parentheses, so just dig the
- // warning/error and return.
- Self.Diag(Loc, SecondNote);
- return;
+ const PartialDiagnostic &Note,
+ SourceRange ParenRange) {
+ SourceLocation EndLoc = Self.PP.getLocForEndOfToken(ParenRange.getEnd());
+ if (ParenRange.getBegin().isFileID() && ParenRange.getEnd().isFileID() &&
+ EndLoc.isValid()) {
+ Self.Diag(Loc, Note)
+ << FixItHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+ } else {
+ // We can't display the parentheses, so just show the bare note.
+ Self.Diag(Loc, Note) << ParenRange;
}
-
- Self.Diag(Loc, SecondNote)
- << FixItHint::CreateInsertion(SecondParenRange.getBegin(), "(")
- << FixItHint::CreateInsertion(EndLoc, ")");
}
static bool IsArithmeticOp(BinaryOperatorKind Opc) {
@@ -6312,13 +4816,13 @@ static bool ExprLooksBoolean(Expr *E) {
/// "int x = a + someBinaryCondition ? 1 : 2".
static void DiagnoseConditionalPrecedence(Sema &Self,
SourceLocation OpLoc,
- Expr *cond,
- Expr *lhs,
- Expr *rhs) {
+ Expr *Condition,
+ Expr *LHS,
+ Expr *RHS) {
BinaryOperatorKind CondOpcode;
Expr *CondRHS;
- if (!IsArithmeticBinaryExpr(cond, &CondOpcode, &CondRHS))
+ if (!IsArithmeticBinaryExpr(Condition, &CondOpcode, &CondRHS))
return;
if (!ExprLooksBoolean(CondRHS))
return;
@@ -6326,25 +4830,18 @@ static void DiagnoseConditionalPrecedence(Sema &Self,
// The condition is an arithmetic binary expression, with a right-
// hand side that looks boolean, so warn.
- PartialDiagnostic Warn = Self.PDiag(diag::warn_precedence_conditional)
- << cond->getSourceRange()
- << BinaryOperator::getOpcodeStr(CondOpcode);
-
- PartialDiagnostic FirstNote =
- Self.PDiag(diag::note_precedence_conditional_silence)
+ Self.Diag(OpLoc, diag::warn_precedence_conditional)
+ << Condition->getSourceRange()
<< BinaryOperator::getOpcodeStr(CondOpcode);
- SourceRange FirstParenRange(cond->getLocStart(),
- cond->getLocEnd());
-
- PartialDiagnostic SecondNote =
- Self.PDiag(diag::note_precedence_conditional_first);
-
- SourceRange SecondParenRange(CondRHS->getLocStart(),
- rhs->getLocEnd());
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_conditional_silence)
+ << BinaryOperator::getOpcodeStr(CondOpcode),
+ SourceRange(Condition->getLocStart(), Condition->getLocEnd()));
- SuggestParentheses(Self, OpLoc, Warn, FirstNote, FirstParenRange,
- SecondNote, SecondParenRange);
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_conditional_first),
+ SourceRange(CondRHS->getLocStart(), RHS->getLocEnd()));
}
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
@@ -6427,17 +4924,31 @@ checkPointerTypesForAssignment(Sema &S, QualType lhsType, QualType rhsType) {
// qualifiers of the type *pointed to* by the right;
Qualifiers lq;
+ // As a special case, 'non-__weak A *' -> 'non-__weak const *' is okay.
+ if (lhq.getObjCLifetime() != rhq.getObjCLifetime() &&
+ lhq.compatiblyIncludesObjCLifetime(rhq)) {
+ // Ignore lifetime for further calculation.
+ lhq.removeObjCLifetime();
+ rhq.removeObjCLifetime();
+ }
+
if (!lhq.compatiblyIncludes(rhq)) {
// Treat address-space mismatches as fatal. TODO: address subspaces
if (lhq.getAddressSpace() != rhq.getAddressSpace())
ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
- // It's okay to add or remove GC qualifiers when converting to
+ // It's okay to add or remove GC or lifetime qualifiers when converting to
// and from void*.
- else if (lhq.withoutObjCGCAttr().compatiblyIncludes(rhq.withoutObjCGCAttr())
+ else if (lhq.withoutObjCGCAttr().withoutObjCGLifetime()
+ .compatiblyIncludes(
+ rhq.withoutObjCGCAttr().withoutObjCGLifetime())
&& (lhptee->isVoidType() || rhptee->isVoidType()))
; // keep old
+ // Treat lifetime mismatches as fatal.
+ else if (lhq.getObjCLifetime() != rhq.getObjCLifetime())
+ ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+
// For GCC compatibility, other qualifier mismatches are treated
// as still compatible in C.
else ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
@@ -6614,6 +5125,7 @@ Sema::AssignConvertType
Sema::CheckAssignmentConstraints(QualType lhsType, ExprResult &rhs,
CastKind &Kind) {
QualType rhsType = rhs.get()->getType();
+ QualType origLhsType = lhsType;
// Get canonical types. We're not formatting these types, just comparing
// them.
@@ -6768,7 +5280,13 @@ Sema::CheckAssignmentConstraints(QualType lhsType, ExprResult &rhs,
// A* -> B*
if (rhsType->isObjCObjectPointerType()) {
Kind = CK_BitCast;
- return checkObjCPointerTypesForAssignment(*this, lhsType, rhsType);
+ Sema::AssignConvertType result =
+ checkObjCPointerTypesForAssignment(*this, lhsType, rhsType);
+ if (getLangOptions().ObjCAutoRefCount &&
+ result == Compatible &&
+ !CheckObjCARCUnavailableWeakConversion(origLhsType, rhsType))
+ result = IncompatibleObjCWeakRef;
+ return result;
}
// int or null -> A*
@@ -6936,8 +5454,12 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, ExprResult &rExpr) {
AA_Assigning);
if (Res.isInvalid())
return Incompatible;
+ Sema::AssignConvertType result = Compatible;
+ if (getLangOptions().ObjCAutoRefCount &&
+ !CheckObjCARCUnavailableWeakConversion(lhsType, rExpr.get()->getType()))
+ result = IncompatibleObjCWeakRef;
rExpr = move(Res);
- return Compatible;
+ return result;
}
// FIXME: Currently, we fall through and treat C++ classes like C
@@ -6989,7 +5511,8 @@ QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &lex, ExprResult &
return QualType();
}
-QualType Sema::CheckVectorOperands(SourceLocation Loc, ExprResult &lex, ExprResult &rex) {
+QualType Sema::CheckVectorOperands(ExprResult &lex, ExprResult &rex,
+ SourceLocation Loc, bool isCompAssign) {
// For conversion purposes, we ignore any qualifiers.
// For example, "const float" and "float" are equivalent.
QualType lhsType =
@@ -7001,42 +5524,33 @@ QualType Sema::CheckVectorOperands(SourceLocation Loc, ExprResult &lex, ExprResu
if (lhsType == rhsType)
return lhsType;
- // Handle the case of a vector & extvector type of the same size and element
- // type. It would be nice if we only had one vector type someday.
- if (getLangOptions().LaxVectorConversions) {
- if (const VectorType *LV = lhsType->getAs<VectorType>()) {
- if (const VectorType *RV = rhsType->getAs<VectorType>()) {
- if (LV->getElementType() == RV->getElementType() &&
- LV->getNumElements() == RV->getNumElements()) {
- if (lhsType->isExtVectorType()) {
- rex = ImpCastExprToType(rex.take(), lhsType, CK_BitCast);
- return lhsType;
- }
-
- lex = ImpCastExprToType(lex.take(), rhsType, CK_BitCast);
- return rhsType;
- } else if (Context.getTypeSize(lhsType) ==Context.getTypeSize(rhsType)){
- // If we are allowing lax vector conversions, and LHS and RHS are both
- // vectors, the total size only needs to be the same. This is a
- // bitcast; no bits are changed but the result type is different.
- rex = ImpCastExprToType(rex.take(), lhsType, CK_BitCast);
- return lhsType;
- }
- }
- }
- }
-
// Handle the case of equivalent AltiVec and GCC vector types
if (lhsType->isVectorType() && rhsType->isVectorType() &&
Context.areCompatibleVectorTypes(lhsType, rhsType)) {
- lex = ImpCastExprToType(lex.take(), rhsType, CK_BitCast);
+ if (lhsType->isExtVectorType()) {
+ rex = ImpCastExprToType(rex.take(), lhsType, CK_BitCast);
+ return lhsType;
+ }
+
+ if (!isCompAssign)
+ lex = ImpCastExprToType(lex.take(), rhsType, CK_BitCast);
return rhsType;
}
+ if (getLangOptions().LaxVectorConversions &&
+ Context.getTypeSize(lhsType) == Context.getTypeSize(rhsType)) {
+ // If we are allowing lax vector conversions, and LHS and RHS are both
+ // vectors, the total size only needs to be the same. This is a
+ // bitcast; no bits are changed but the result type is different.
+ // FIXME: Should we really be allowing this?
+ rex = ImpCastExprToType(rex.take(), lhsType, CK_BitCast);
+ return lhsType;
+ }
+
// Canonicalize the ExtVector to the LHS, remember if we swapped so we can
// swap back (so that we don't reverse the inputs to a subtract, for instance.
bool swapped = false;
- if (rhsType->isExtVectorType()) {
+ if (rhsType->isExtVectorType() && !isCompAssign) {
swapped = true;
std::swap(rex, lex);
std::swap(rhsType, lhsType);
@@ -7069,6 +5583,7 @@ QualType Sema::CheckVectorOperands(SourceLocation Loc, ExprResult &lex, ExprResu
}
// Vectors of different size or scalar and non-ext-vector are errors.
+ if (swapped) std::swap(rex, lex);
Diag(Loc, diag::err_typecheck_vector_not_convertable)
<< lex.get()->getType() << rex.get()->getType()
<< lex.get()->getSourceRange() << rex.get()->getSourceRange();
@@ -7078,7 +5593,7 @@ QualType Sema::CheckVectorOperands(SourceLocation Loc, ExprResult &lex, ExprResu
QualType Sema::CheckMultiplyDivideOperands(
ExprResult &lex, ExprResult &rex, SourceLocation Loc, bool isCompAssign, bool isDiv) {
if (lex.get()->getType()->isVectorType() || rex.get()->getType()->isVectorType())
- return CheckVectorOperands(Loc, lex, rex);
+ return CheckVectorOperands(lex, rex, Loc, isCompAssign);
QualType compType = UsualArithmeticConversions(lex, rex, isCompAssign);
if (lex.isInvalid() || rex.isInvalid())
@@ -7102,7 +5617,7 @@ QualType Sema::CheckRemainderOperands(
if (lex.get()->getType()->isVectorType() || rex.get()->getType()->isVectorType()) {
if (lex.get()->getType()->hasIntegerRepresentation() &&
rex.get()->getType()->hasIntegerRepresentation())
- return CheckVectorOperands(Loc, lex, rex);
+ return CheckVectorOperands(lex, rex, Loc, isCompAssign);
return InvalidOperands(Loc, lex, rex);
}
@@ -7121,10 +5636,149 @@ QualType Sema::CheckRemainderOperands(
return compType;
}
+/// \brief Diagnose invalid arithmetic on two void pointers.
+static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ S.Diag(Loc, S.getLangOptions().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_void_type
+ : diag::ext_gnu_void_ptr)
+ << 1 /* two pointers */ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on a void pointer.
+static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer) {
+ S.Diag(Loc, S.getLangOptions().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_void_type
+ : diag::ext_gnu_void_ptr)
+ << 0 /* one pointer */ << Pointer->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on two function pointers.
+static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ assert(LHS->getType()->isAnyPointerType());
+ assert(RHS->getType()->isAnyPointerType());
+ S.Diag(Loc, S.getLangOptions().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_function_type
+ : diag::ext_gnu_ptr_func_arith)
+ << 1 /* two pointers */ << LHS->getType()->getPointeeType()
+ // We only show the second type if it differs from the first.
+ << (unsigned)!S.Context.hasSameUnqualifiedType(LHS->getType(),
+ RHS->getType())
+ << RHS->getType()->getPointeeType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on a function pointer.
+static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer) {
+ assert(Pointer->getType()->isAnyPointerType());
+ S.Diag(Loc, S.getLangOptions().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_function_type
+ : diag::ext_gnu_ptr_func_arith)
+ << 0 /* one pointer */ << Pointer->getType()->getPointeeType()
+ << 0 /* one pointer, so only one type */
+ << Pointer->getSourceRange();
+}
+
+/// \brief Check the validity of an arithmetic pointer operand.
+///
+/// If the operand has pointer type, this code will check for pointer types
+/// which are invalid in arithmetic operations. These will be diagnosed
+/// appropriately, including whether or not the use is supported as an
+/// extension.
+///
+/// \returns True when the operand is valid to use (even if as an extension).
+static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc,
+ Expr *Operand) {
+ if (!Operand->getType()->isAnyPointerType()) return true;
+
+ QualType PointeeTy = Operand->getType()->getPointeeType();
+ if (PointeeTy->isVoidType()) {
+ diagnoseArithmeticOnVoidPointer(S, Loc, Operand);
+ return !S.getLangOptions().CPlusPlus;
+ }
+ if (PointeeTy->isFunctionType()) {
+ diagnoseArithmeticOnFunctionPointer(S, Loc, Operand);
+ return !S.getLangOptions().CPlusPlus;
+ }
+
+ if ((Operand->getType()->isPointerType() &&
+ !Operand->getType()->isDependentType()) ||
+ Operand->getType()->isObjCObjectPointerType()) {
+ QualType PointeeTy = Operand->getType()->getPointeeType();
+ if (S.RequireCompleteType(
+ Loc, PointeeTy,
+ S.PDiag(diag::err_typecheck_arithmetic_incomplete_type)
+ << PointeeTy << Operand->getSourceRange()))
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Check the validity of a binary arithmetic operation w.r.t. pointer
+/// operands.
+///
+/// This routine will diagnose any invalid arithmetic on pointer operands much
+/// like \see checkArithmeticOpPointerOperand. However, it has special logic
+/// for emitting a single diagnostic even for operations where both LHS and RHS
+/// are (potentially problematic) pointers.
+///
+/// \returns True when the operand is valid to use (even if as an extension).
+static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ bool isLHSPointer = LHS->getType()->isAnyPointerType();
+ bool isRHSPointer = RHS->getType()->isAnyPointerType();
+ if (!isLHSPointer && !isRHSPointer) return true;
+
+ QualType LHSPointeeTy, RHSPointeeTy;
+ if (isLHSPointer) LHSPointeeTy = LHS->getType()->getPointeeType();
+ if (isRHSPointer) RHSPointeeTy = RHS->getType()->getPointeeType();
+
+ // Check for arithmetic on pointers to incomplete types.
+ bool isLHSVoidPtr = isLHSPointer && LHSPointeeTy->isVoidType();
+ bool isRHSVoidPtr = isRHSPointer && RHSPointeeTy->isVoidType();
+ if (isLHSVoidPtr || isRHSVoidPtr) {
+ if (!isRHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, LHS);
+ else if (!isLHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, RHS);
+ else diagnoseArithmeticOnTwoVoidPointers(S, Loc, LHS, RHS);
+
+ return !S.getLangOptions().CPlusPlus;
+ }
+
+ bool isLHSFuncPtr = isLHSPointer && LHSPointeeTy->isFunctionType();
+ bool isRHSFuncPtr = isRHSPointer && RHSPointeeTy->isFunctionType();
+ if (isLHSFuncPtr || isRHSFuncPtr) {
+ if (!isRHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, LHS);
+ else if (!isLHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, RHS);
+ else diagnoseArithmeticOnTwoFunctionPointers(S, Loc, LHS, RHS);
+
+ return !S.getLangOptions().CPlusPlus;
+ }
+
+ Expr *Operands[] = { LHS, RHS };
+ for (unsigned i = 0; i < 2; ++i) {
+ Expr *Operand = Operands[i];
+ if ((Operand->getType()->isPointerType() &&
+ !Operand->getType()->isDependentType()) ||
+ Operand->getType()->isObjCObjectPointerType()) {
+ QualType PointeeTy = Operand->getType()->getPointeeType();
+ if (S.RequireCompleteType(
+ Loc, PointeeTy,
+ S.PDiag(diag::err_typecheck_arithmetic_incomplete_type)
+ << PointeeTy << Operand->getSourceRange()))
+ return false;
+ }
+ }
+ return true;
+}
+
QualType Sema::CheckAdditionOperands( // C99 6.5.6
ExprResult &lex, ExprResult &rex, SourceLocation Loc, QualType* CompLHSTy) {
if (lex.get()->getType()->isVectorType() || rex.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(Loc, lex, rex);
+ QualType compType = CheckVectorOperands(lex, rex, Loc, CompLHSTy);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
@@ -7146,42 +5800,12 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
std::swap(PExp, IExp);
if (PExp->getType()->isAnyPointerType()) {
-
if (IExp->getType()->isIntegerType()) {
- QualType PointeeTy = PExp->getType()->getPointeeType();
-
- // Check for arithmetic on pointers to incomplete types.
- if (PointeeTy->isVoidType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(Loc, diag::err_typecheck_pointer_arith_void_type)
- << lex.get()->getSourceRange() << rex.get()->getSourceRange();
- return QualType();
- }
+ if (!checkArithmeticOpPointerOperand(*this, Loc, PExp))
+ return QualType();
- // GNU extension: arithmetic on pointer to void
- Diag(Loc, diag::ext_gnu_void_ptr)
- << lex.get()->getSourceRange() << rex.get()->getSourceRange();
- } else if (PointeeTy->isFunctionType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(Loc, diag::err_typecheck_pointer_arith_function_type)
- << lex.get()->getType() << lex.get()->getSourceRange();
- return QualType();
- }
+ QualType PointeeTy = PExp->getType()->getPointeeType();
- // GNU extension: arithmetic on pointer to function
- Diag(Loc, diag::ext_gnu_ptr_func_arith)
- << lex.get()->getType() << lex.get()->getSourceRange();
- } else {
- // Check if we require a complete type.
- if (((PExp->getType()->isPointerType() &&
- !PExp->getType()->isDependentType()) ||
- PExp->getType()->isObjCObjectPointerType()) &&
- RequireCompleteType(Loc, PointeeTy,
- PDiag(diag::err_typecheck_arithmetic_incomplete_type)
- << PExp->getSourceRange()
- << PExp->getType()))
- return QualType();
- }
// Diagnose bad cases where we step over interface counts.
if (PointeeTy->isObjCObjectType() && LangOpts.ObjCNonFragileABI) {
Diag(Loc, diag::err_arithmetic_nonfragile_interface)
@@ -7209,7 +5833,7 @@ QualType Sema::CheckAdditionOperands( // C99 6.5.6
QualType Sema::CheckSubtractionOperands(ExprResult &lex, ExprResult &rex,
SourceLocation Loc, QualType* CompLHSTy) {
if (lex.get()->getType()->isVectorType() || rex.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(Loc, lex, rex);
+ QualType compType = CheckVectorOperands(lex, rex, Loc, CompLHSTy);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
@@ -7231,35 +5855,6 @@ QualType Sema::CheckSubtractionOperands(ExprResult &lex, ExprResult &rex,
if (lex.get()->getType()->isAnyPointerType()) {
QualType lpointee = lex.get()->getType()->getPointeeType();
- // The LHS must be an completely-defined object type.
-
- bool ComplainAboutVoid = false;
- Expr *ComplainAboutFunc = 0;
- if (lpointee->isVoidType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(Loc, diag::err_typecheck_pointer_arith_void_type)
- << lex.get()->getSourceRange() << rex.get()->getSourceRange();
- return QualType();
- }
-
- // GNU C extension: arithmetic on pointer to void
- ComplainAboutVoid = true;
- } else if (lpointee->isFunctionType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(Loc, diag::err_typecheck_pointer_arith_function_type)
- << lex.get()->getType() << lex.get()->getSourceRange();
- return QualType();
- }
-
- // GNU C extension: arithmetic on pointer to function
- ComplainAboutFunc = lex.get();
- } else if (!lpointee->isDependentType() &&
- RequireCompleteType(Loc, lpointee,
- PDiag(diag::err_typecheck_sub_ptr_object)
- << lex.get()->getSourceRange()
- << lex.get()->getType()))
- return QualType();
-
// Diagnose bad cases where we step over interface counts.
if (lpointee->isObjCObjectType() && LangOpts.ObjCNonFragileABI) {
Diag(Loc, diag::err_arithmetic_nonfragile_interface)
@@ -7269,13 +5864,8 @@ QualType Sema::CheckSubtractionOperands(ExprResult &lex, ExprResult &rex,
// The result type of a pointer-int computation is the pointer type.
if (rex.get()->getType()->isIntegerType()) {
- if (ComplainAboutVoid)
- Diag(Loc, diag::ext_gnu_void_ptr)
- << lex.get()->getSourceRange() << rex.get()->getSourceRange();
- if (ComplainAboutFunc)
- Diag(Loc, diag::ext_gnu_ptr_func_arith)
- << ComplainAboutFunc->getType()
- << ComplainAboutFunc->getSourceRange();
+ if (!checkArithmeticOpPointerOperand(*this, Loc, lex.get()))
+ return QualType();
if (CompLHSTy) *CompLHSTy = lex.get()->getType();
return lex.get()->getType();
@@ -7285,33 +5875,6 @@ QualType Sema::CheckSubtractionOperands(ExprResult &lex, ExprResult &rex,
if (const PointerType *RHSPTy = rex.get()->getType()->getAs<PointerType>()) {
QualType rpointee = RHSPTy->getPointeeType();
- // RHS must be a completely-type object type.
- // Handle the GNU void* extension.
- if (rpointee->isVoidType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(Loc, diag::err_typecheck_pointer_arith_void_type)
- << lex.get()->getSourceRange() << rex.get()->getSourceRange();
- return QualType();
- }
-
- ComplainAboutVoid = true;
- } else if (rpointee->isFunctionType()) {
- if (getLangOptions().CPlusPlus) {
- Diag(Loc, diag::err_typecheck_pointer_arith_function_type)
- << rex.get()->getType() << rex.get()->getSourceRange();
- return QualType();
- }
-
- // GNU extension: arithmetic on pointer to function
- if (!ComplainAboutFunc)
- ComplainAboutFunc = rex.get();
- } else if (!rpointee->isDependentType() &&
- RequireCompleteType(Loc, rpointee,
- PDiag(diag::err_typecheck_sub_ptr_object)
- << rex.get()->getSourceRange()
- << rex.get()->getType()))
- return QualType();
-
if (getLangOptions().CPlusPlus) {
// Pointee types must be the same: C++ [expr.add]
if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) {
@@ -7332,13 +5895,9 @@ QualType Sema::CheckSubtractionOperands(ExprResult &lex, ExprResult &rex,
}
}
- if (ComplainAboutVoid)
- Diag(Loc, diag::ext_gnu_void_ptr)
- << lex.get()->getSourceRange() << rex.get()->getSourceRange();
- if (ComplainAboutFunc)
- Diag(Loc, diag::ext_gnu_ptr_func_arith)
- << ComplainAboutFunc->getType()
- << ComplainAboutFunc->getSourceRange();
+ if (!checkArithmeticBinOpPointerOperands(*this, Loc,
+ lex.get(), rex.get()))
+ return QualType();
if (CompLHSTy) *CompLHSTy = lex.get()->getType();
return Context.getPointerDiffType();
@@ -7394,19 +5953,24 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &lex, ExprResult &rex,
llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue());
Result = Result.shl(Right);
+ // Print the bit representation of the signed integer as an unsigned
+ // hexadecimal number.
+ llvm::SmallString<40> HexResult;
+ Result.toString(HexResult, 16, /*Signed =*/false, /*Literal =*/true);
+
// If we are only missing a sign bit, this is less likely to result in actual
// bugs -- if the result is cast back to an unsigned type, it will have the
// expected value. Thus we place this behind a different warning that can be
// turned off separately if needed.
if (LeftBits == ResultBits - 1) {
- S.Diag(Loc, diag::warn_shift_result_overrides_sign_bit)
- << Result.toString(10) << LHSTy
+ S.Diag(Loc, diag::warn_shift_result_sets_sign_bit)
+ << HexResult.str() << LHSTy
<< lex.get()->getSourceRange() << rex.get()->getSourceRange();
return;
}
S.Diag(Loc, diag::warn_shift_result_gt_typewidth)
- << Result.toString(10) << Result.getMinSignedBits() << LHSTy
+ << HexResult.str() << Result.getMinSignedBits() << LHSTy
<< Left.getBitWidth() << lex.get()->getSourceRange() << rex.get()->getSourceRange();
}
@@ -7427,7 +5991,7 @@ QualType Sema::CheckShiftOperands(ExprResult &lex, ExprResult &rex, SourceLocati
// Vector shifts promote their scalar inputs to vector type.
if (lex.get()->getType()->isVectorType() || rex.get()->getType()->isVectorType())
- return CheckVectorOperands(Loc, lex, rex);
+ return CheckVectorOperands(lex, rex, Loc, isCompAssign);
// Shifts don't perform usual arithmetic conversions, they just do integer
// promotions on each operand. C99 6.5.7p3
@@ -7720,7 +6284,8 @@ QualType Sema::CheckCompareOperands(ExprResult &lex, ExprResult &rex, SourceLoca
// comparisons of member pointers to null pointer constants.
if (RHSIsNull &&
((lType->isAnyPointerType() || lType->isNullPtrType()) ||
- (!isRelational && lType->isMemberPointerType()))) {
+ (!isRelational &&
+ (lType->isMemberPointerType() || lType->isBlockPointerType())))) {
rex = ImpCastExprToType(rex.take(), lType,
lType->isMemberPointerType()
? CK_NullToMemberPointer
@@ -7729,7 +6294,8 @@ QualType Sema::CheckCompareOperands(ExprResult &lex, ExprResult &rex, SourceLoca
}
if (LHSIsNull &&
((rType->isAnyPointerType() || rType->isNullPtrType()) ||
- (!isRelational && rType->isMemberPointerType()))) {
+ (!isRelational &&
+ (rType->isMemberPointerType() || rType->isBlockPointerType())))) {
lex = ImpCastExprToType(lex.take(), rType,
rType->isMemberPointerType()
? CK_NullToMemberPointer
@@ -7894,7 +6460,7 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &lex, ExprResult &rex,
bool isRelational) {
// Check to make sure we're operating on vectors of the same type and width,
// Allowing one side to be a scalar of element type.
- QualType vType = CheckVectorOperands(Loc, lex, rex);
+ QualType vType = CheckVectorOperands(lex, rex, Loc, /*isCompAssign*/false);
if (vType.isNull())
return vType;
@@ -7949,7 +6515,7 @@ inline QualType Sema::CheckBitwiseOperands(
if (lex.get()->getType()->isVectorType() || rex.get()->getType()->isVectorType()) {
if (lex.get()->getType()->hasIntegerRepresentation() &&
rex.get()->getType()->hasIntegerRepresentation())
- return CheckVectorOperands(Loc, lex, rex);
+ return CheckVectorOperands(lex, rex, Loc, isCompAssign);
return InvalidOperands(Loc, lex, rex);
}
@@ -7975,8 +6541,8 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
// is a constant.
if (lex.get()->getType()->isIntegerType() && !lex.get()->getType()->isBooleanType() &&
rex.get()->getType()->isIntegerType() && !rex.get()->isValueDependent() &&
- // Don't warn in macros.
- !Loc.isMacroID()) {
+ // Don't warn in macros or template instantiations.
+ !Loc.isMacroID() && ActiveTemplateInstantiations.empty()) {
// If the RHS can be constant folded, and if it constant folds to something
// that isn't 0 or 1 (which indicate a potential logical operation that
// happened to fold to true/false) then warn.
@@ -8099,7 +6665,43 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
unsigned Diag = 0;
bool NeedType = false;
switch (IsLV) { // C99 6.5.16p2
- case Expr::MLV_ConstQualified: Diag = diag::err_typecheck_assign_const; break;
+ case Expr::MLV_ConstQualified:
+ Diag = diag::err_typecheck_assign_const;
+
+ // In ARC, use some specialized diagnostics for occasions where we
+ // infer 'const'. These are always pseudo-strong variables.
+ if (S.getLangOptions().ObjCAutoRefCount) {
+ DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (declRef && isa<VarDecl>(declRef->getDecl())) {
+ VarDecl *var = cast<VarDecl>(declRef->getDecl());
+
+ // Use the normal diagnostic if it's pseudo-__strong but the
+ // user actually wrote 'const'.
+ if (var->isARCPseudoStrong() &&
+ (!var->getTypeSourceInfo() ||
+ !var->getTypeSourceInfo()->getType().isConstQualified())) {
+ // There are two pseudo-strong cases:
+ // - self
+ ObjCMethodDecl *method = S.getCurMethodDecl();
+ if (method && var == method->getSelfDecl())
+ Diag = diag::err_typecheck_arr_assign_self;
+
+ // - fast enumeration variables
+ else
+ Diag = diag::err_typecheck_arr_assign_enumeration;
+
+ SourceRange Assign;
+ if (Loc != OrigLoc)
+ Assign = SourceRange(OrigLoc, OrigLoc);
+ S.Diag(Loc, Diag) << E->getSourceRange() << Assign;
+ // We need to preserve the AST regardless, so migration tool
+ // can do its job.
+ return false;
+ }
+ }
+ }
+
+ break;
case Expr::MLV_ArrayType:
Diag = diag::err_typecheck_array_not_modifiable_lvalue;
NeedType = true;
@@ -8214,6 +6816,13 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, ExprResult &RHS,
<< SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc());
}
}
+
+ if (ConvTy == Compatible) {
+ if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong)
+ checkRetainCycles(LHS, RHS.get());
+ else if (getLangOptions().ObjCAutoRefCount)
+ checkUnsafeExprAssigns(Loc, LHS, RHS.get());
+ }
} else {
// Compound assignment "x += y"
ConvTy = CheckAssignmentConstraints(Loc, LHSType, RHSType);
@@ -8295,29 +6904,9 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
QualType PointeeTy = ResType->getPointeeType();
// C99 6.5.2.4p2, 6.5.6p2
- if (PointeeTy->isVoidType()) {
- if (S.getLangOptions().CPlusPlus) {
- S.Diag(OpLoc, diag::err_typecheck_pointer_arith_void_type)
- << Op->getSourceRange();
- return QualType();
- }
-
- // Pointer to void is a GNU extension in C.
- S.Diag(OpLoc, diag::ext_gnu_void_ptr) << Op->getSourceRange();
- } else if (PointeeTy->isFunctionType()) {
- if (S.getLangOptions().CPlusPlus) {
- S.Diag(OpLoc, diag::err_typecheck_pointer_arith_function_type)
- << Op->getType() << Op->getSourceRange();
- return QualType();
- }
-
- S.Diag(OpLoc, diag::ext_gnu_ptr_func_arith)
- << ResType << Op->getSourceRange();
- } else if (S.RequireCompleteType(OpLoc, PointeeTy,
- S.PDiag(diag::err_typecheck_arithmetic_incomplete_type)
- << Op->getSourceRange()
- << ResType))
+ if (!checkArithmeticOpPointerOperand(S, OpLoc, Op))
return QualType();
+
// Diagnose bad cases where we step over interface counts.
else if (PointeeTy->isObjCObjectType() && S.LangOpts.ObjCNonFragileABI) {
S.Diag(OpLoc, diag::err_arithmetic_nonfragile_interface)
@@ -8400,6 +6989,8 @@ void Sema::ConvertPropertyForLValue(ExprResult &LHS, ExprResult &RHS, QualType &
LHS.get()->getObjectKind() == OK_ObjCProperty);
const ObjCPropertyRefExpr *PropRef = LHS.get()->getObjCProperty();
+ bool Consumed = false;
+
if (PropRef->isImplicitProperty()) {
// If using property-dot syntax notation for assignment, and there is a
// setter, RHS expression is being passed to the setter argument. So,
@@ -8407,6 +6998,8 @@ void Sema::ConvertPropertyForLValue(ExprResult &LHS, ExprResult &RHS, QualType &
if (const ObjCMethodDecl *SetterMD = PropRef->getImplicitPropertySetter()) {
ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
LHSTy = (*P)->getType();
+ Consumed = (getLangOptions().ObjCAutoRefCount &&
+ (*P)->hasAttr<NSConsumedAttr>());
// Otherwise, if the getter returns an l-value, just call that.
} else {
@@ -8418,14 +7011,26 @@ void Sema::ConvertPropertyForLValue(ExprResult &LHS, ExprResult &RHS, QualType &
return;
}
}
+ } else if (getLangOptions().ObjCAutoRefCount) {
+ const ObjCMethodDecl *setter
+ = PropRef->getExplicitProperty()->getSetterMethodDecl();
+ if (setter) {
+ ObjCMethodDecl::param_iterator P = setter->param_begin();
+ LHSTy = (*P)->getType();
+ Consumed = (*P)->hasAttr<NSConsumedAttr>();
+ }
}
- if (getLangOptions().CPlusPlus && LHSTy->isRecordType()) {
+ if ((getLangOptions().CPlusPlus && LHSTy->isRecordType()) ||
+ getLangOptions().ObjCAutoRefCount) {
InitializedEntity Entity =
- InitializedEntity::InitializeParameter(Context, LHSTy);
+ InitializedEntity::InitializeParameter(Context, LHSTy, Consumed);
ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), RHS);
- if (!ArgE.isInvalid())
+ if (!ArgE.isInvalid()) {
RHS = ArgE;
+ if (getLangOptions().ObjCAutoRefCount && !PropRef->isSuperReceiver())
+ checkRetainCycles(const_cast<Expr*>(PropRef->getBase()), RHS.get());
+ }
}
}
@@ -8682,8 +7287,7 @@ static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
VK = VK_LValue;
// ...except that certain expressions are never l-values in C.
- if (!S.getLangOptions().CPlusPlus &&
- IsCForbiddenLValueType(S.Context, Result))
+ if (!S.getLangOptions().CPlusPlus && Result.isCForbiddenLValueType())
VK = VK_RValue;
return Result;
@@ -8815,6 +7419,53 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
rhs = move(resolvedRHS);
}
+ // The canonical way to check for a GNU null is with isNullPointerConstant,
+ // but we use a bit of a hack here for speed; this is a relatively
+ // hot path, and isNullPointerConstant is slow.
+ bool LeftNull = isa<GNUNullExpr>(lhs.get()->IgnoreParenImpCasts());
+ bool RightNull = isa<GNUNullExpr>(rhs.get()->IgnoreParenImpCasts());
+
+ // Detect when a NULL constant is used improperly in an expression. These
+ // are mainly cases where the null pointer is used as an integer instead
+ // of a pointer.
+ if (LeftNull || RightNull) {
+ // Avoid analyzing cases where the result will either be invalid (and
+ // diagnosed as such) or entirely valid and not something to warn about.
+ QualType LeftType = lhs.get()->getType();
+ QualType RightType = rhs.get()->getType();
+ if (!LeftType->isBlockPointerType() && !LeftType->isMemberPointerType() &&
+ !LeftType->isFunctionType() &&
+ !RightType->isBlockPointerType() &&
+ !RightType->isMemberPointerType() &&
+ !RightType->isFunctionType()) {
+ if (Opc == BO_Mul || Opc == BO_Div || Opc == BO_Rem || Opc == BO_Add ||
+ Opc == BO_Sub || Opc == BO_Shl || Opc == BO_Shr || Opc == BO_And ||
+ Opc == BO_Xor || Opc == BO_Or || Opc == BO_MulAssign ||
+ Opc == BO_DivAssign || Opc == BO_AddAssign || Opc == BO_SubAssign ||
+ Opc == BO_RemAssign || Opc == BO_ShlAssign || Opc == BO_ShrAssign ||
+ Opc == BO_AndAssign || Opc == BO_OrAssign || Opc == BO_XorAssign) {
+ // These are the operations that would not make sense with a null pointer
+ // no matter what the other expression is.
+ Diag(OpLoc, diag::warn_null_in_arithmetic_operation)
+ << (LeftNull ? lhs.get()->getSourceRange() : SourceRange())
+ << (RightNull ? rhs.get()->getSourceRange() : SourceRange());
+ } else if (Opc == BO_LE || Opc == BO_LT || Opc == BO_GE || Opc == BO_GT ||
+ Opc == BO_EQ || Opc == BO_NE) {
+ // These are the operations that would not make sense with a null pointer
+ // if the other expression the other expression is not a pointer.
+ if (LeftNull != RightNull &&
+ !LeftType->isAnyPointerType() &&
+ !LeftType->canDecayToPointerType() &&
+ !RightType->isAnyPointerType() &&
+ !RightType->canDecayToPointerType()) {
+ Diag(OpLoc, diag::warn_null_in_arithmetic_operation)
+ << (LeftNull ? lhs.get()->getSourceRange()
+ : rhs.get()->getSourceRange());
+ }
+ }
+ }
+ }
+
switch (Opc) {
case BO_Assign:
ResultTy = CheckAssignmentOperands(lhs.get(), rhs, OpLoc, QualType());
@@ -8953,28 +7604,46 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
(BinOp::isComparisonOp(rhsopc) || BinOp::isBitwiseOp(rhsopc)))
return;
- if (BinOp::isComparisonOp(lhsopc))
+ if (BinOp::isComparisonOp(lhsopc)) {
+ Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
+ << SourceRange(lhs->getLocStart(), OpLoc)
+ << BinOp::getOpcodeStr(Opc) << BinOp::getOpcodeStr(lhsopc);
SuggestParentheses(Self, OpLoc,
- Self.PDiag(diag::warn_precedence_bitwise_rel)
- << SourceRange(lhs->getLocStart(), OpLoc)
- << BinOp::getOpcodeStr(Opc) << BinOp::getOpcodeStr(lhsopc),
Self.PDiag(diag::note_precedence_bitwise_silence)
<< BinOp::getOpcodeStr(lhsopc),
- lhs->getSourceRange(),
+ lhs->getSourceRange());
+ SuggestParentheses(Self, OpLoc,
Self.PDiag(diag::note_precedence_bitwise_first)
<< BinOp::getOpcodeStr(Opc),
SourceRange(cast<BinOp>(lhs)->getRHS()->getLocStart(), rhs->getLocEnd()));
- else if (BinOp::isComparisonOp(rhsopc))
+ } else if (BinOp::isComparisonOp(rhsopc)) {
+ Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
+ << SourceRange(OpLoc, rhs->getLocEnd())
+ << BinOp::getOpcodeStr(Opc) << BinOp::getOpcodeStr(rhsopc);
SuggestParentheses(Self, OpLoc,
- Self.PDiag(diag::warn_precedence_bitwise_rel)
- << SourceRange(OpLoc, rhs->getLocEnd())
- << BinOp::getOpcodeStr(Opc) << BinOp::getOpcodeStr(rhsopc),
Self.PDiag(diag::note_precedence_bitwise_silence)
<< BinOp::getOpcodeStr(rhsopc),
- rhs->getSourceRange(),
+ rhs->getSourceRange());
+ SuggestParentheses(Self, OpLoc,
Self.PDiag(diag::note_precedence_bitwise_first)
<< BinOp::getOpcodeStr(Opc),
- SourceRange(lhs->getLocEnd(), cast<BinOp>(rhs)->getLHS()->getLocStart()));
+ SourceRange(lhs->getLocStart(),
+ cast<BinOp>(rhs)->getLHS()->getLocStart()));
+ }
+}
+
+/// \brief It accepts a '&' expr that is inside a '|' one.
+/// Emit a diagnostic together with a fixit hint that wraps the '&' expression
+/// in parentheses.
+static void
+EmitDiagnosticForBitwiseAndInBitwiseOr(Sema &Self, SourceLocation OpLoc,
+ BinaryOperator *Bop) {
+ assert(Bop->getOpcode() == BO_And);
+ Self.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_and_in_bitwise_or)
+ << Bop->getSourceRange() << OpLoc;
+ SuggestParentheses(Self, Bop->getOperatorLoc(),
+ Self.PDiag(diag::note_bitwise_and_in_bitwise_or_silence),
+ Bop->getSourceRange());
}
/// \brief It accepts a '&&' expr that is inside a '||' one.
@@ -8984,12 +7653,11 @@ static void
EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
BinaryOperator *Bop) {
assert(Bop->getOpcode() == BO_LAnd);
+ Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or)
+ << Bop->getSourceRange() << OpLoc;
SuggestParentheses(Self, Bop->getOperatorLoc(),
- Self.PDiag(diag::warn_logical_and_in_logical_or)
- << Bop->getSourceRange() << OpLoc,
Self.PDiag(diag::note_logical_and_in_logical_or_silence),
- Bop->getSourceRange(),
- Self.PDiag(0), SourceRange());
+ Bop->getSourceRange());
}
/// \brief Returns true if the given expression can be evaluated as a constant
@@ -9043,13 +7711,28 @@ static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
}
}
+/// \brief Look for '&' in the left or right hand of a '|' expr.
+static void DiagnoseBitwiseAndInBitwiseOr(Sema &S, SourceLocation OpLoc,
+ Expr *OrArg) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(OrArg)) {
+ if (Bop->getOpcode() == BO_And)
+ return EmitDiagnosticForBitwiseAndInBitwiseOr(S, OpLoc, Bop);
+ }
+}
+
/// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky
/// precedence.
static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
SourceLocation OpLoc, Expr *lhs, Expr *rhs){
// Diagnose "arg1 'bitwise' arg2 'eq' arg3".
if (BinaryOperator::isBitwiseOp(Opc))
- return DiagnoseBitwisePrecedence(Self, Opc, OpLoc, lhs, rhs);
+ DiagnoseBitwisePrecedence(Self, Opc, OpLoc, lhs, rhs);
+
+ // Diagnose "arg1 & arg2 | arg3"
+ if (Opc == BO_Or && !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, lhs);
+ DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, rhs);
+ }
// Warn about arg1 || arg2 && arg3, as GCC 4.3+ does.
// We don't warn for 'assert(a || b && "bad")' since this is safe.
@@ -9274,6 +7957,29 @@ ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
Context.getPointerType(Context.VoidTy)));
}
+/// Given the last statement in a statement-expression, check whether
+/// the result is a producing expression (like a call to an
+/// ns_returns_retained function) and, if so, rebuild it to hoist the
+/// release out of the full-expression. Otherwise, return null.
+/// Cannot fail.
+static Expr *maybeRebuildARCConsumingStmt(Stmt *s) {
+ // Should always be wrapped with one of these.
+ ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(s);
+ if (!cleanups) return 0;
+
+ ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(cleanups->getSubExpr());
+ if (!cast || cast->getCastKind() != CK_ObjCConsumeObject)
+ return 0;
+
+ // Splice out the cast. This shouldn't modify any interesting
+ // features of the statement.
+ Expr *producer = cast->getSubExpr();
+ assert(producer->getType() == cast->getType());
+ assert(producer->getValueKind() == cast->getValueKind());
+ cleanups->setSubExpr(producer);
+ return cleanups;
+}
+
ExprResult
Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc) { // "({..})"
@@ -9301,6 +8007,7 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
LastLabelStmt = Label;
LastStmt = Label->getSubStmt();
}
+
if (Expr *LastE = dyn_cast<Expr>(LastStmt)) {
// Do function/array conversion on the last expression, but not
// lvalue-to-rvalue. However, initialize an unqualified type.
@@ -9310,12 +8017,24 @@ Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
Ty = LastExpr.get()->getType().getUnqualifiedType();
if (!Ty->isDependentType() && !LastExpr.get()->isTypeDependent()) {
- LastExpr = PerformCopyInitialization(
+ // In ARC, if the final expression ends in a consume, splice
+ // the consume out and bind it later. In the alternate case
+ // (when dealing with a retainable type), the result
+ // initialization will create a produce. In both cases the
+ // result will be +1, and we'll need to balance that out with
+ // a bind.
+ if (Expr *rebuiltLastStmt
+ = maybeRebuildARCConsumingStmt(LastExpr.get())) {
+ LastExpr = rebuiltLastStmt;
+ } else {
+ LastExpr = PerformCopyInitialization(
InitializedEntity::InitializeResult(LPLoc,
Ty,
false),
SourceLocation(),
- LastExpr);
+ LastExpr);
+ }
+
if (LastExpr.isInvalid())
return ExprError();
if (LastExpr.get() != 0) {
@@ -9757,7 +8476,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
// If we don't have a function type, just build one from nothing.
} else {
FunctionProtoType::ExtProtoInfo EPI;
- EPI.ExtInfo = FunctionType::ExtInfo(NoReturn, false, 0, CC_Default);
+ EPI.ExtInfo = FunctionType::ExtInfo().withNoReturn(NoReturn);
BlockTy = Context.getFunctionType(RetTy, 0, 0, EPI);
}
@@ -9766,15 +8485,25 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
BlockTy = Context.getBlockPointerType(BlockTy);
// If needed, diagnose invalid gotos and switches in the block.
- if (getCurFunction()->NeedsScopeChecking() && !hasAnyErrorsInThisFunction())
+ if (getCurFunction()->NeedsScopeChecking() &&
+ !hasAnyUnrecoverableErrorsInThisFunction())
DiagnoseInvalidJumps(cast<CompoundStmt>(Body));
BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
- BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
+ for (BlockDecl::capture_const_iterator ci = BSI->TheDecl->capture_begin(),
+ ce = BSI->TheDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType T = variable->getType();
+ QualType::DestructionKind destructKind = T.isDestructedType();
+ if (destructKind != QualType::DK_none)
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+ BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
const AnalysisBasedWarnings::Policy &WP = AnalysisWarnings.getDefaultPolicy();
PopFunctionOrBlockScope(&WP, Result->getBlockDecl(), Result);
+
return Owned(Result);
}
@@ -9818,8 +8547,41 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
<< OrigExpr->getType() << E->getSourceRange());
}
- // FIXME: Check that type is complete/non-abstract
- // FIXME: Warn if a non-POD type is passed in.
+ if (!TInfo->getType()->isDependentType()) {
+ if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(),
+ PDiag(diag::err_second_parameter_to_va_arg_incomplete)
+ << TInfo->getTypeLoc().getSourceRange()))
+ return ExprError();
+
+ if (RequireNonAbstractType(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo->getType(),
+ PDiag(diag::err_second_parameter_to_va_arg_abstract)
+ << TInfo->getTypeLoc().getSourceRange()))
+ return ExprError();
+
+ if (!TInfo->getType().isPODType(Context))
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_second_parameter_to_va_arg_not_pod)
+ << TInfo->getType()
+ << TInfo->getTypeLoc().getSourceRange();
+
+ // Check for va_arg where arguments of the given type will be promoted
+ // (i.e. this va_arg is guaranteed to have undefined behavior).
+ QualType PromoteType;
+ if (TInfo->getType()->isPromotableIntegerType()) {
+ PromoteType = Context.getPromotedIntegerType(TInfo->getType());
+ if (Context.typesAreCompatible(PromoteType, TInfo->getType()))
+ PromoteType = QualType();
+ }
+ if (TInfo->getType()->isSpecificBuiltinType(BuiltinType::Float))
+ PromoteType = Context.DoubleTy;
+ if (!PromoteType.isNull())
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_second_parameter_to_va_arg_never_compatible)
+ << TInfo->getType()
+ << PromoteType
+ << TInfo->getTypeLoc().getSourceRange();
+ }
QualType T = TInfo->getType().getNonLValueExprType(Context);
return Owned(new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T));
@@ -9913,6 +8675,11 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
DiagKind = diag::err_typecheck_incompatible_address_space;
break;
+
+
+ } else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) {
+ DiagKind = diag::err_typecheck_incompatible_ownership;
+ break;
}
llvm_unreachable("unknown error case for discarding qualifiers!");
@@ -9950,6 +8717,9 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
case IncompatibleVectors:
DiagKind = diag::warn_incompatible_vectors;
break;
+ case IncompatibleObjCWeakRef:
+ DiagKind = diag::err_arc_weak_unavailable_assign;
+ break;
case Incompatible:
DiagKind = diag::err_typecheck_convert_incompatible;
isInvalid = true;
@@ -10027,7 +8797,10 @@ bool Sema::VerifyIntegerConstantExpression(const Expr *E, llvm::APSInt *Result){
void
Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext) {
ExprEvalContexts.push_back(
- ExpressionEvaluationContextRecord(NewContext, ExprTemporaries.size()));
+ ExpressionEvaluationContextRecord(NewContext,
+ ExprTemporaries.size(),
+ ExprNeedsCleanups));
+ ExprNeedsCleanups = false;
}
void
@@ -10062,15 +8835,27 @@ Sema::PopExpressionEvaluationContext() {
// temporaries that we may have created as part of the evaluation of
// the expression in that context: they aren't relevant because they
// will never be constructed.
- if (Rec.Context == Unevaluated &&
- ExprTemporaries.size() > Rec.NumTemporaries)
+ if (Rec.Context == Unevaluated) {
ExprTemporaries.erase(ExprTemporaries.begin() + Rec.NumTemporaries,
ExprTemporaries.end());
+ ExprNeedsCleanups = Rec.ParentNeedsCleanups;
+
+ // Otherwise, merge the contexts together.
+ } else {
+ ExprNeedsCleanups |= Rec.ParentNeedsCleanups;
+ }
// Destroy the popped expression evaluation record.
Rec.Destroy();
}
+void Sema::DiscardCleanupsInEvaluationContext() {
+ ExprTemporaries.erase(
+ ExprTemporaries.begin() + ExprEvalContexts.back().NumTemporaries,
+ ExprTemporaries.end());
+ ExprNeedsCleanups = false;
+}
+
/// \brief Note that the given declaration was referenced in the source code.
///
/// This routine should be invoke whenever a given declaration is referenced
@@ -10364,7 +9149,7 @@ void Sema::MarkDeclarationsReferencedInExpr(Expr *E) {
/// during overload resolution or within sizeof/alignof/typeof/typeid.
bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *stmt,
const PartialDiagnostic &PD) {
- switch (ExprEvalContexts.back().Context ) {
+ switch (ExprEvalContexts.back().Context) {
case Unevaluated:
// The argument will never be evaluated, so don't complain.
break;
@@ -10780,9 +9565,6 @@ ExprResult RebuildUnknownAnyExpr::VisitCallExpr(CallExpr *call) {
}
ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *msg) {
- ObjCMethodDecl *method = msg->getMethodDecl();
- assert(method && "__unknown_anytype message without result type?");
-
// Verify that this is a legal result type of a call.
if (DestType->isArrayType() || DestType->isFunctionType()) {
S.Diag(msg->getExprLoc(), diag::err_func_returning_array_function)
@@ -10790,8 +9572,11 @@ ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *msg) {
return ExprError();
}
- assert(method->getResultType() == S.Context.UnknownAnyTy);
- method->setResultType(DestType);
+ // Rewrite the method result type if available.
+ if (ObjCMethodDecl *method = msg->getMethodDecl()) {
+ assert(method->getResultType() == S.Context.UnknownAnyTy);
+ method->setResultType(DestType);
+ }
// Change the type of the message.
msg->setType(DestType.getNonReferenceType());
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 2f5a890e51de..e804fcd60c74 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -480,23 +480,63 @@ Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
/// ActOnCXXThrow - Parse throw expressions.
ExprResult
-Sema::ActOnCXXThrow(SourceLocation OpLoc, Expr *Ex) {
+Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
+ bool IsThrownVarInScope = false;
+ if (Ex) {
+ // C++0x [class.copymove]p31:
+ // When certain criteria are met, an implementation is allowed to omit the
+ // copy/move construction of a class object [...]
+ //
+ // - in a throw-expression, when the operand is the name of a
+ // non-volatile automatic object (other than a function or catch-
+ // clause parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
+ // exception object
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) {
+ for( ; S; S = S->getParent()) {
+ if (S->isDeclScope(Var)) {
+ IsThrownVarInScope = true;
+ break;
+ }
+
+ if (S->getFlags() &
+ (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
+ Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
+ Scope::TryScope))
+ break;
+ }
+ }
+ }
+ }
+
+ return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
+}
+
+ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
+ bool IsThrownVarInScope) {
// Don't report an error if 'throw' is used in system headers.
if (!getLangOptions().CXXExceptions &&
!getSourceManager().isInSystemHeader(OpLoc))
Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
-
+
if (Ex && !Ex->isTypeDependent()) {
- ExprResult ExRes = CheckCXXThrowOperand(OpLoc, Ex);
+ ExprResult ExRes = CheckCXXThrowOperand(OpLoc, Ex, IsThrownVarInScope);
if (ExRes.isInvalid())
return ExprError();
Ex = ExRes.take();
}
- return Owned(new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc));
+
+ return Owned(new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc,
+ IsThrownVarInScope));
}
/// CheckCXXThrowOperand - Validate the operand of a throw.
-ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E) {
+ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
+ bool IsThrownVarInScope) {
// C++ [except.throw]p3:
// A throw-expression initializes a temporary object, called the exception
// object, the type of which is determined by removing any top-level
@@ -535,14 +575,28 @@ ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E) {
// Initialize the exception result. This implicitly weeds out
// abstract types or types with inaccessible copy constructors.
- const VarDecl *NRVOVariable = getCopyElisionCandidate(QualType(), E, false);
-
- // FIXME: Determine whether we can elide this copy per C++0x [class.copy]p32.
+
+ // C++0x [class.copymove]p31:
+ // When certain criteria are met, an implementation is allowed to omit the
+ // copy/move construction of a class object [...]
+ //
+ // - in a throw-expression, when the operand is the name of a
+ // non-volatile automatic object (other than a function or catch-clause
+ // parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
+ // exception object
+ const VarDecl *NRVOVariable = 0;
+ if (IsThrownVarInScope)
+ NRVOVariable = getCopyElisionCandidate(QualType(), E, false);
+
InitializedEntity Entity =
InitializedEntity::InitializeException(ThrowLoc, E->getType(),
- /*NRVO=*/false);
+ /*NRVO=*/NRVOVariable != 0);
Res = PerformMoveOrCopyInitialization(Entity, NRVOVariable,
- QualType(), E);
+ QualType(), E,
+ IsThrownVarInScope);
if (Res.isInvalid())
return ExprError();
E = Res.take();
@@ -691,7 +745,8 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
ExprValueKind VK = VK_RValue;
CXXCastPath BasePath;
ExprResult CastExpr =
- CheckCastTypes(TInfo->getTypeLoc().getSourceRange(), Ty, Exprs[0],
+ CheckCastTypes(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo->getTypeLoc().getSourceRange(), Ty, Exprs[0],
Kind, VK, BasePath,
/*FunctionalStyle=*/true);
if (CastExpr.isInvalid())
@@ -827,8 +882,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0, /*OwnedDecl=*/0,
- /*AllowAuto=*/true);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0);
QualType AllocType = TInfo->getType();
if (D.isInvalidType())
return ExprError();
@@ -902,8 +956,16 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
return ExprError();
- QualType ResultType = Context.getPointerType(AllocType);
+ // In ARC, infer 'retaining' for the allocated
+ if (getLangOptions().ObjCAutoRefCount &&
+ AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
+ AllocType->isObjCLifetimeType()) {
+ AllocType = Context.getLifetimeQualifiedType(AllocType,
+ AllocType->getObjCARCImplicitLifetime());
+ }
+ QualType ResultType = Context.getPointerType(AllocType);
+
// C++ 5.3.4p6: "The expression in a direct-new-declarator shall have integral
// or enumeration type with a non-negative value."
if (ArraySize && !ArraySize->isTypeDependent()) {
@@ -964,6 +1026,14 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
+ // ARC: warn about ABI issues.
+ if (getLangOptions().ObjCAutoRefCount) {
+ QualType BaseAllocType = Context.getBaseElementType(AllocType);
+ if (BaseAllocType.hasStrongOrWeakObjCLifetime())
+ Diag(StartLoc, diag::warn_err_new_delete_object_array)
+ << 0 << BaseAllocType;
+ }
+
// Note that we do *not* convert the argument in any way. It can
// be signed, larger than size_t, whatever.
}
@@ -1078,7 +1148,17 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
if (OperatorDelete)
MarkDeclarationReferenced(StartLoc, OperatorDelete);
- // FIXME: Also check that the destructor is accessible. (C++ 5.3.4p16)
+ // C++0x [expr.new]p17:
+ // If the new expression creates an array of objects of class type,
+ // access and ambiguity control are done for the destructor.
+ if (ArraySize && Constructor) {
+ if (CXXDestructorDecl *dtor = LookupDestructor(Constructor->getParent())) {
+ MarkDeclarationReferenced(StartLoc, dtor);
+ CheckDestructorAccess(StartLoc, dtor,
+ PDiag(diag::err_access_dtor)
+ << Context.getBaseElementType(AllocType));
+ }
+ }
PlacementArgs.release();
ConstructorArgs.release();
@@ -1122,6 +1202,15 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
else if (unsigned AddressSpace = AllocType.getAddressSpace())
return Diag(Loc, diag::err_address_space_qualified_new)
<< AllocType.getUnqualifiedType() << AddressSpace;
+ else if (getLangOptions().ObjCAutoRefCount) {
+ if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
+ QualType BaseAllocType = Context.getBaseElementType(AT);
+ if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
+ BaseAllocType->isObjCLifetimeType())
+ return Diag(Loc, diag::err_arc_new_array_without_ownership)
+ << BaseAllocType;
+ }
+ }
return false;
}
@@ -1774,8 +1863,9 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// delete-expression; it is not necessary to cast away the constness
// (5.2.11) of the pointer expression before it is used as the operand
// of the delete-expression. ]
- Ex = ImpCastExprToType(Ex.take(), Context.getPointerType(Context.VoidTy),
- CK_NoOp);
+ if (!Context.hasSameType(Ex.get()->getType(), Context.VoidPtrTy))
+ Ex = Owned(ImplicitCastExpr::Create(Context, Context.VoidPtrTy, CK_NoOp,
+ Ex.take(), 0, VK_RValue));
if (Pointee->isArrayType() && !ArrayForm) {
Diag(StartLoc, diag::warn_delete_array_type)
@@ -1830,6 +1920,14 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
if (!dtor || !dtor->isVirtual())
Diag(StartLoc, diag::warn_delete_non_virtual_dtor) << PointeeElem;
}
+
+ } else if (getLangOptions().ObjCAutoRefCount &&
+ PointeeElem->isObjCLifetimeType() &&
+ (PointeeElem.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ PointeeElem.getObjCLifetime() == Qualifiers::OCL_Weak) &&
+ ArrayForm) {
+ Diag(StartLoc, diag::warn_err_new_delete_object_array)
+ << 1 << PointeeElem;
}
if (!OperatorDelete) {
@@ -1988,11 +2086,12 @@ static ExprResult BuildCXXCastArgument(Sema &S,
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence &ICS,
- AssignmentAction Action, bool CStyle) {
+ AssignmentAction Action,
+ CheckedConversionKind CCK) {
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion: {
ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard,
- Action, CStyle);
+ Action, CCK);
if (Res.isInvalid())
return ExprError();
From = Res.take();
@@ -2027,7 +2126,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ExprResult Res =
PerformImplicitConversion(From, BeforeToType,
ICS.UserDefined.Before, AA_Converting,
- CStyle);
+ CCK);
if (Res.isInvalid())
return ExprError();
From = Res.take();
@@ -2047,7 +2146,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From = CastArg.take();
return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
- AA_Converting, CStyle);
+ AA_Converting, CCK);
}
case ImplicitConversionSequence::AmbiguousConversion:
@@ -2076,13 +2175,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
- AssignmentAction Action, bool CStyle) {
+ AssignmentAction Action,
+ CheckedConversionKind CCK) {
+ bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast);
+
// Overall FIXME: we are recomputing too many types here and doing far too
// much extra work. What this means is that we need to keep track of more
// information that is computed when we try the implicit conversion initially,
// so that we don't need to recompute anything here.
QualType FromType = From->getType();
-
+
if (SCS.CopyConstructor) {
// FIXME: When can ToType be a reference type?
assert(!ToType->isReferenceType());
@@ -2149,12 +2251,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Array_To_Pointer:
FromType = Context.getArrayDecayedType(FromType);
- From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay).take();
+ From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Function_To_Pointer:
FromType = Context.getPointerType(FromType);
- From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay).take();
+ From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
default:
@@ -2178,17 +2282,20 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (CheckExceptionSpecCompatibility(From, ToType))
return ExprError();
- From = ImpCastExprToType(From, ToType, CK_NoOp).take();
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Integral_Promotion:
case ICK_Integral_Conversion:
- From = ImpCastExprToType(From, ToType, CK_IntegralCast).take();
+ From = ImpCastExprToType(From, ToType, CK_IntegralCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Floating_Promotion:
case ICK_Floating_Conversion:
- From = ImpCastExprToType(From, ToType, CK_FloatingCast).take();
+ From = ImpCastExprToType(From, ToType, CK_FloatingCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Complex_Promotion:
@@ -2206,21 +2313,26 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
} else {
CK = CK_IntegralComplexCast;
}
- From = ImpCastExprToType(From, ToType, CK).take();
+ From = ImpCastExprToType(From, ToType, CK,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
}
case ICK_Floating_Integral:
if (ToType->isRealFloatingType())
- From = ImpCastExprToType(From, ToType, CK_IntegralToFloating).take();
+ From = ImpCastExprToType(From, ToType, CK_IntegralToFloating,
+ VK_RValue, /*BasePath=*/0, CCK).take();
else
- From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral).take();
+ From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Compatible_Conversion:
- From = ImpCastExprToType(From, ToType, CK_NoOp).take();
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
+ case ICK_Writeback_Conversion:
case ICK_Pointer_Conversion: {
if (SCS.IncompatibleObjC && Action != AA_Casting) {
// Diagnose incompatible Objective-C conversions
@@ -2234,17 +2346,30 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
diag::ext_typecheck_convert_incompatible_pointer)
<< From->getType() << ToType << Action
<< From->getSourceRange();
-
+
if (From->getType()->isObjCObjectPointerType() &&
ToType->isObjCObjectPointerType())
EmitRelatedResultTypeNote(From);
- }
-
+ }
+ else if (getLangOptions().ObjCAutoRefCount &&
+ !CheckObjCARCUnavailableWeakConversion(ToType,
+ From->getType())) {
+ if (Action == AA_Initializing)
+ Diag(From->getSourceRange().getBegin(),
+ diag::err_arc_weak_unavailable_assign);
+ else
+ Diag(From->getSourceRange().getBegin(),
+ diag::err_arc_convesion_of_weak_unavailable)
+ << (Action == AA_Casting) << From->getType() << ToType
+ << From->getSourceRange();
+ }
+
CastKind Kind = CK_Invalid;
CXXCastPath BasePath;
if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle))
return ExprError();
- From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath).take();
+ From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
+ .take();
break;
}
@@ -2255,13 +2380,15 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return ExprError();
if (CheckExceptionSpecCompatibility(From, ToType))
return ExprError();
- From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath).take();
+ From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
+ .take();
break;
}
case ICK_Boolean_Conversion:
From = ImpCastExprToType(From, Context.BoolTy,
- ScalarTypeToBooleanCastKind(FromType)).take();
+ ScalarTypeToBooleanCastKind(FromType),
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Derived_To_Base: {
@@ -2276,16 +2403,18 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From = ImpCastExprToType(From, ToType.getNonReferenceType(),
CK_DerivedToBase, CastCategory(From),
- &BasePath).take();
+ &BasePath, CCK).take();
break;
}
case ICK_Vector_Conversion:
- From = ImpCastExprToType(From, ToType, CK_BitCast).take();
+ From = ImpCastExprToType(From, ToType, CK_BitCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Vector_Splat:
- From = ImpCastExprToType(From, ToType, CK_VectorSplat).take();
+ From = ImpCastExprToType(From, ToType, CK_VectorSplat,
+ VK_RValue, /*BasePath=*/0, CCK).take();
break;
case ICK_Complex_Real:
@@ -2321,27 +2450,30 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// _Complex x -> x
From = ImpCastExprToType(From, ElType,
isFloatingComplex ? CK_FloatingComplexToReal
- : CK_IntegralComplexToReal).take();
+ : CK_IntegralComplexToReal,
+ VK_RValue, /*BasePath=*/0, CCK).take();
// x -> y
if (Context.hasSameUnqualifiedType(ElType, ToType)) {
// do nothing
} else if (ToType->isRealFloatingType()) {
From = ImpCastExprToType(From, ToType,
- isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating).take();
+ isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating,
+ VK_RValue, /*BasePath=*/0, CCK).take();
} else {
assert(ToType->isIntegerType());
From = ImpCastExprToType(From, ToType,
- isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast).take();
+ isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
}
}
break;
case ICK_Block_Pointer_Conversion: {
- From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast,
- VK_RValue).take();
- break;
- }
+ From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+ }
case ICK_TransparentUnionConversion: {
ExprResult FromRes = Owned(From);
@@ -2376,7 +2508,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ExprValueKind VK = ToType->isReferenceType() ?
CastCategory(From) : VK_RValue;
From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context),
- CK_NoOp, VK).take();
+ CK_NoOp, VK, /*BasePath=*/0, CCK).take();
if (SCS.DeprecatedStringLiteralToCharPtr &&
!getLangOptions().WritableStrings)
@@ -2553,6 +2685,23 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
case UTT_IsObject:
return T->isObjectType();
case UTT_IsScalar:
+ // Note: semantic analysis depends on Objective-C lifetime types to be
+ // considered scalar types. However, such types do not actually behave
+ // like scalar types at run time (since they may require retain/release
+ // operations), so we report them as non-scalar.
+ if (T->isObjCLifetimeType()) {
+ switch (T.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+ }
+ }
+
return T->isScalarType();
case UTT_IsCompound:
return T->isCompoundType();
@@ -2566,13 +2715,13 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
case UTT_IsVolatile:
return T.isVolatileQualified();
case UTT_IsTrivial:
- return T->isTrivialType();
+ return T.isTrivialType(Self.Context);
case UTT_IsTriviallyCopyable:
- return T->isTriviallyCopyableType();
+ return T.isTriviallyCopyableType(Self.Context);
case UTT_IsStandardLayout:
return T->isStandardLayoutType();
case UTT_IsPOD:
- return T->isPODType();
+ return T.isPODType(Self.Context);
case UTT_IsLiteral:
return T->isLiteralType();
case UTT_IsEmpty:
@@ -2605,7 +2754,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
// If __is_pod (type) is true then the trait is true, else if type is
// a cv class or union type (or array thereof) with a trivial default
// constructor ([class.ctor]) then the trait is true, else it is false.
- if (T->isPODType())
+ if (T.isPODType(Self.Context))
return true;
if (const RecordType *RT =
C.getBaseElementType(T)->getAs<RecordType>())
@@ -2617,7 +2766,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
// the trait is true, else if type is a cv class or union type
// with a trivial copy constructor ([class.copy]) then the trait
// is true, else it is false.
- if (T->isPODType() || T->isReferenceType())
+ if (T.isPODType(Self.Context) || T->isReferenceType())
return true;
if (const RecordType *RT = T->getAs<RecordType>())
return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyConstructor();
@@ -2637,7 +2786,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
if (C.getBaseElementType(T).isConstQualified())
return false;
- if (T->isPODType())
+ if (T.isPODType(Self.Context))
return true;
if (const RecordType *RT = T->getAs<RecordType>())
return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyAssignment();
@@ -2649,8 +2798,14 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
// type (or array thereof) with a trivial destructor
// ([class.dtor]) then the trait is true, else it is
// false.
- if (T->isPODType() || T->isReferenceType())
+ if (T.isPODType(Self.Context) || T->isReferenceType())
+ return true;
+
+ // Objective-C++ ARC: autorelease types don't require destruction.
+ if (T->isObjCLifetimeType() &&
+ T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing)
return true;
+
if (const RecordType *RT =
C.getBaseElementType(T)->getAs<RecordType>())
return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor();
@@ -2668,8 +2823,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
return false;
if (T->isReferenceType())
return false;
- if (T->isPODType())
- return true;
+ if (T.isPODType(Self.Context) || T->isObjCLifetimeType())
+ return true;
if (const RecordType *RT = T->getAs<RecordType>()) {
CXXRecordDecl* RD = cast<CXXRecordDecl>(RT->getDecl());
if (RD->hasTrivialCopyAssignment())
@@ -2704,7 +2859,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
// if type is a cv class or union type with copy constructors that are
// known not to throw an exception then the trait is true, else it is
// false.
- if (T->isPODType() || T->isReferenceType())
+ if (T.isPODType(C) || T->isReferenceType() || T->isObjCLifetimeType())
return true;
if (const RecordType *RT = T->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -2744,7 +2899,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
// true, else if type is a cv class or union type (or array
// thereof) with a default constructor that is known not to
// throw an exception then the trait is true, else it is false.
- if (T->isPODType())
+ if (T.isPODType(C) || T->isObjCLifetimeType())
return true;
if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -3089,6 +3244,20 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &lex, ExprResult &rex,
ExprValueKind &VK,
SourceLocation Loc,
bool isIndirect) {
+ assert(!lex.get()->getType()->isPlaceholderType() &&
+ !rex.get()->getType()->isPlaceholderType() &&
+ "placeholders should have been weeded out by now");
+
+ // The LHS undergoes lvalue conversions if this is ->*.
+ if (isIndirect) {
+ lex = DefaultLvalueConversion(lex.take());
+ if (lex.isInvalid()) return QualType();
+ }
+
+ // The RHS always undergoes lvalue conversions.
+ rex = DefaultLvalueConversion(rex.take());
+ if (rex.isInvalid()) return QualType();
+
const char *OpSpelling = isIndirect ? "->*" : ".*";
// C++ 5.5p2
// The binary operator .* [p3: ->*] binds its second operand, which shall
@@ -3556,7 +3725,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, Ex
// Extension: conditional operator involving vector types.
if (LTy->isVectorType() || RTy->isVectorType())
- return CheckVectorOperands(QuestionLoc, LHS, RHS);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
// -- The second and third operands have arithmetic or enumeration type;
// the usual arithmetic conversions are performed to bring them to a
@@ -3828,17 +3997,83 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
if (!E)
return ExprError();
- if (!Context.getLangOptions().CPlusPlus)
+ assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
+
+ // If the result is a glvalue, we shouldn't bind it.
+ if (!E->isRValue())
return Owned(E);
- assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
+ // In ARC, calls that return a retainable type can return retained,
+ // in which case we have to insert a consuming cast.
+ if (getLangOptions().ObjCAutoRefCount &&
+ E->getType()->isObjCRetainableType()) {
+
+ bool ReturnsRetained;
+
+ // For actual calls, we compute this by examining the type of the
+ // called value.
+ if (CallExpr *Call = dyn_cast<CallExpr>(E)) {
+ Expr *Callee = Call->getCallee()->IgnoreParens();
+ QualType T = Callee->getType();
+
+ if (T == Context.BoundMemberTy) {
+ // Handle pointer-to-members.
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Callee))
+ T = BinOp->getRHS()->getType();
+ else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Callee))
+ T = Mem->getMemberDecl()->getType();
+ }
+
+ if (const PointerType *Ptr = T->getAs<PointerType>())
+ T = Ptr->getPointeeType();
+ else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
+ T = Ptr->getPointeeType();
+ else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
+ T = MemPtr->getPointeeType();
+
+ const FunctionType *FTy = T->getAs<FunctionType>();
+ assert(FTy && "call to value not of function type?");
+ ReturnsRetained = FTy->getExtInfo().getProducesResult();
+
+ // ActOnStmtExpr arranges things so that StmtExprs of retainable
+ // type always produce a +1 object.
+ } else if (isa<StmtExpr>(E)) {
+ ReturnsRetained = true;
+
+ // For message sends and property references, we try to find an
+ // actual method. FIXME: we should infer retention by selector in
+ // cases where we don't have an actual method.
+ } else {
+ Decl *D = 0;
+ if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(E)) {
+ D = Send->getMethodDecl();
+ } else {
+ CastExpr *CE = cast<CastExpr>(E);
+ // FIXME. What other cast kinds to check for?
+ if (CE->getCastKind() == CK_ObjCProduceObject ||
+ CE->getCastKind() == CK_LValueToRValue)
+ return MaybeBindToTemporary(CE->getSubExpr());
+ assert(CE->getCastKind() == CK_GetObjCProperty);
+ const ObjCPropertyRefExpr *PRE = CE->getSubExpr()->getObjCProperty();
+ D = (PRE->isImplicitProperty() ? PRE->getImplicitPropertyGetter() : 0);
+ }
- const RecordType *RT = E->getType()->getAs<RecordType>();
- if (!RT)
+ ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
+ }
+
+ ExprNeedsCleanups = true;
+
+ CastKind ck = (ReturnsRetained ? CK_ObjCConsumeObject
+ : CK_ObjCReclaimReturnedObject);
+ return Owned(ImplicitCastExpr::Create(Context, E->getType(), ck, E, 0,
+ VK_RValue));
+ }
+
+ if (!getLangOptions().CPlusPlus)
return Owned(E);
- // If the result is a glvalue, we shouldn't bind it.
- if (E->Classify(Context).isGLValue())
+ const RecordType *RT = E->getType()->getAs<RecordType>();
+ if (!RT)
return Owned(E);
// That should be enough to guarantee that this type is complete.
@@ -3847,15 +4082,18 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
if (RD->isInvalidDecl() || RD->hasTrivialDestructor())
return Owned(E);
- CXXTemporary *Temp = CXXTemporary::Create(Context, LookupDestructor(RD));
- ExprTemporaries.push_back(Temp);
- if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
+ CXXDestructorDecl *Destructor = LookupDestructor(RD);
+
+ CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor);
+ if (Destructor) {
MarkDeclarationReferenced(E->getExprLoc(), Destructor);
CheckDestructorAccess(E->getExprLoc(), Destructor,
PDiag(diag::err_access_dtor_temp)
<< E->getType());
+
+ ExprTemporaries.push_back(Temp);
+ ExprNeedsCleanups = true;
}
- // FIXME: Add the temporary to the temporaries vector.
return Owned(CXXBindTemporaryExpr::Create(Context, Temp, E));
}
@@ -3864,14 +4102,16 @@ Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
unsigned FirstTemporary = ExprEvalContexts.back().NumTemporaries;
assert(ExprTemporaries.size() >= FirstTemporary);
- if (ExprTemporaries.size() == FirstTemporary)
+ assert(ExprNeedsCleanups || ExprTemporaries.size() == FirstTemporary);
+ if (!ExprNeedsCleanups)
return SubExpr;
Expr *E = ExprWithCleanups::Create(Context, SubExpr,
- &ExprTemporaries[FirstTemporary],
+ ExprTemporaries.begin() + FirstTemporary,
ExprTemporaries.size() - FirstTemporary);
ExprTemporaries.erase(ExprTemporaries.begin() + FirstTemporary,
ExprTemporaries.end());
+ ExprNeedsCleanups = false;
return E;
}
@@ -3887,9 +4127,7 @@ Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
assert(SubStmt && "sub statement can't be null!");
- unsigned FirstTemporary = ExprEvalContexts.back().NumTemporaries;
- assert(ExprTemporaries.size() >= FirstTemporary);
- if (ExprTemporaries.size() == FirstTemporary)
+ if (!ExprNeedsCleanups)
return SubStmt;
// FIXME: In order to attach the temporaries, wrap the statement into
@@ -4047,17 +4285,35 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
QualType DestructedType = DestructedTypeInfo->getType();
SourceLocation DestructedTypeStart
= DestructedTypeInfo->getTypeLoc().getLocalSourceRange().getBegin();
- if (!DestructedType->isDependentType() && !ObjectType->isDependentType() &&
- !Context.hasSameUnqualifiedType(DestructedType, ObjectType)) {
- Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch)
- << ObjectType << DestructedType << Base->getSourceRange()
- << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
-
- // Recover by setting the destructed type to the object type.
- DestructedType = ObjectType;
- DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
+ if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
+ if (!Context.hasSameUnqualifiedType(DestructedType, ObjectType)) {
+ Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch)
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+
+ // Recover by setting the destructed type to the object type.
+ DestructedType = ObjectType;
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
+ DestructedTypeStart);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ } else if (DestructedType.getObjCLifetime() !=
+ ObjectType.getObjCLifetime()) {
+
+ if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
+ // Okay: just pretend that the user provided the correctly-qualified
+ // type.
+ } else {
+ Diag(DestructedTypeStart, diag::err_arc_pseudo_dtor_inconstant_quals)
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+ }
+
+ // Recover by setting the destructed type to the object type.
+ DestructedType = ObjectType;
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
DestructedTypeStart);
- Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ }
}
}
@@ -4293,7 +4549,16 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
// [Except in specific positions,] an lvalue that does not have
// array type is converted to the value stored in the
// designated object (and is no longer an lvalue).
- if (E->isRValue()) return Owned(E);
+ if (E->isRValue()) {
+ // In C, function designators (i.e. expressions of function type)
+ // are r-values, but we still want to do function-to-pointer decay
+ // on them. This is both technically correct and convenient for
+ // some clients.
+ if (!getLangOptions().CPlusPlus && E->getType()->isFunctionType())
+ return DefaultFunctionArrayConversion(E);
+
+ return Owned(E);
+ }
// We always want to do this on ObjC property references.
if (E->getObjectKind() == OK_ObjCProperty) {
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
new file mode 100644
index 000000000000..2488dc8849dd
--- /dev/null
+++ b/lib/Sema/SemaExprMember.cpp
@@ -0,0 +1,1594 @@
+//===--- SemaExprMember.cpp - Semantic Analysis for Expressions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis member access expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+using namespace sema;
+
+/// Determines if the given class is provably not derived from all of
+/// the prospective base classes.
+static bool IsProvablyNotDerivedFrom(Sema &SemaRef,
+ CXXRecordDecl *Record,
+ const llvm::SmallPtrSet<CXXRecordDecl*, 4> &Bases) {
+ if (Bases.count(Record->getCanonicalDecl()))
+ return false;
+
+ RecordDecl *RD = Record->getDefinition();
+ if (!RD) return false;
+ Record = cast<CXXRecordDecl>(RD);
+
+ for (CXXRecordDecl::base_class_iterator I = Record->bases_begin(),
+ E = Record->bases_end(); I != E; ++I) {
+ CanQualType BaseT = SemaRef.Context.getCanonicalType((*I).getType());
+ CanQual<RecordType> BaseRT = BaseT->getAs<RecordType>();
+ if (!BaseRT) return false;
+
+ CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
+ if (!IsProvablyNotDerivedFrom(SemaRef, BaseRecord, Bases))
+ return false;
+ }
+
+ return true;
+}
+
+enum IMAKind {
+ /// The reference is definitely not an instance member access.
+ IMA_Static,
+
+ /// The reference may be an implicit instance member access.
+ IMA_Mixed,
+
+ /// The reference may be to an instance member, but it is invalid if
+ /// so, because the context is not an instance method.
+ IMA_Mixed_StaticContext,
+
+ /// The reference may be to an instance member, but it is invalid if
+ /// so, because the context is from an unrelated class.
+ IMA_Mixed_Unrelated,
+
+ /// The reference is definitely an implicit instance member access.
+ IMA_Instance,
+
+ /// The reference may be to an unresolved using declaration.
+ IMA_Unresolved,
+
+ /// The reference may be to an unresolved using declaration and the
+ /// context is not an instance method.
+ IMA_Unresolved_StaticContext,
+
+ /// All possible referrents are instance members and the current
+ /// context is not an instance method.
+ IMA_Error_StaticContext,
+
+ /// All possible referrents are instance members of an unrelated
+ /// class.
+ IMA_Error_Unrelated
+};
+
+/// The given lookup names class member(s) and is not being used for
+/// an address-of-member expression. Classify the type of access
+/// according to whether it's possible that this reference names an
+/// instance member. This is best-effort; it is okay to
+/// conservatively answer "yes", in which case some errors will simply
+/// not be caught until template-instantiation.
+static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
+ Scope *CurScope,
+ const LookupResult &R) {
+ assert(!R.empty() && (*R.begin())->isCXXClassMember());
+
+ DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
+
+ bool isStaticContext =
+ (!isa<CXXMethodDecl>(DC) ||
+ cast<CXXMethodDecl>(DC)->isStatic());
+
+ // C++0x [expr.prim]p4:
+ // Otherwise, if a member-declarator declares a non-static data member
+ // of a class X, the expression this is a prvalue of type "pointer to X"
+ // within the optional brace-or-equal-initializer.
+ if (CurScope->getFlags() & Scope::ThisScope)
+ isStaticContext = false;
+
+ if (R.isUnresolvableResult())
+ return isStaticContext ? IMA_Unresolved_StaticContext : IMA_Unresolved;
+
+ // Collect all the declaring classes of instance members we find.
+ bool hasNonInstance = false;
+ bool hasField = false;
+ llvm::SmallPtrSet<CXXRecordDecl*, 4> Classes;
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ if (D->isCXXInstanceMember()) {
+ if (dyn_cast<FieldDecl>(D))
+ hasField = true;
+
+ CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
+ Classes.insert(R->getCanonicalDecl());
+ }
+ else
+ hasNonInstance = true;
+ }
+
+ // If we didn't find any instance members, it can't be an implicit
+ // member reference.
+ if (Classes.empty())
+ return IMA_Static;
+
+ // If the current context is not an instance method, it can't be
+ // an implicit member reference.
+ if (isStaticContext) {
+ if (hasNonInstance)
+ return IMA_Mixed_StaticContext;
+
+ if (SemaRef.getLangOptions().CPlusPlus0x && hasField) {
+ // C++0x [expr.prim.general]p10:
+ // An id-expression that denotes a non-static data member or non-static
+ // member function of a class can only be used:
+ // (...)
+ // - if that id-expression denotes a non-static data member and it
+ // appears in an unevaluated operand.
+ const Sema::ExpressionEvaluationContextRecord& record
+ = SemaRef.ExprEvalContexts.back();
+ bool isUnevaluatedExpression = (record.Context == Sema::Unevaluated);
+ if (isUnevaluatedExpression)
+ return IMA_Mixed_StaticContext;
+ }
+
+ return IMA_Error_StaticContext;
+ }
+
+ CXXRecordDecl *contextClass;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
+ contextClass = MD->getParent()->getCanonicalDecl();
+ else
+ contextClass = cast<CXXRecordDecl>(DC);
+
+ // [class.mfct.non-static]p3:
+ // ...is used in the body of a non-static member function of class X,
+ // if name lookup (3.4.1) resolves the name in the id-expression to a
+ // non-static non-type member of some class C [...]
+ // ...if C is not X or a base class of X, the class member access expression
+ // is ill-formed.
+ if (R.getNamingClass() &&
+ contextClass != R.getNamingClass()->getCanonicalDecl() &&
+ contextClass->isProvablyNotDerivedFrom(R.getNamingClass()))
+ return (hasNonInstance ? IMA_Mixed_Unrelated : IMA_Error_Unrelated);
+
+ // If we can prove that the current context is unrelated to all the
+ // declaring classes, it can't be an implicit member reference (in
+ // which case it's an error if any of those members are selected).
+ if (IsProvablyNotDerivedFrom(SemaRef, contextClass, Classes))
+ return (hasNonInstance ? IMA_Mixed_Unrelated : IMA_Error_Unrelated);
+
+ return (hasNonInstance ? IMA_Mixed : IMA_Instance);
+}
+
+/// Diagnose a reference to a field with no object available.
+static void DiagnoseInstanceReference(Sema &SemaRef,
+ const CXXScopeSpec &SS,
+ NamedDecl *rep,
+ const DeclarationNameInfo &nameInfo) {
+ SourceLocation Loc = nameInfo.getLoc();
+ SourceRange Range(Loc);
+ if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
+
+ if (isa<FieldDecl>(rep) || isa<IndirectFieldDecl>(rep)) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(SemaRef.CurContext)) {
+ if (MD->isStatic()) {
+ // "invalid use of member 'x' in static member function"
+ SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
+ << Range << nameInfo.getName();
+ return;
+ }
+ }
+
+ SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
+ << nameInfo.getName() << Range;
+ return;
+ }
+
+ SemaRef.Diag(Loc, diag::err_member_call_without_object) << Range;
+}
+
+/// Builds an expression which might be an implicit member expression.
+ExprResult
+Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ switch (ClassifyImplicitMemberAccess(*this, CurScope, R)) {
+ case IMA_Instance:
+ return BuildImplicitMemberExpr(SS, R, TemplateArgs, true);
+
+ case IMA_Mixed:
+ case IMA_Mixed_Unrelated:
+ case IMA_Unresolved:
+ return BuildImplicitMemberExpr(SS, R, TemplateArgs, false);
+
+ case IMA_Static:
+ case IMA_Mixed_StaticContext:
+ case IMA_Unresolved_StaticContext:
+ if (TemplateArgs)
+ return BuildTemplateIdExpr(SS, R, false, *TemplateArgs);
+ return BuildDeclarationNameExpr(SS, R, false);
+
+ case IMA_Error_StaticContext:
+ case IMA_Error_Unrelated:
+ DiagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
+ return ExprError();
+ }
+
+ llvm_unreachable("unexpected instance member access kind");
+ return ExprError();
+}
+
+/// Check an ext-vector component access expression.
+///
+/// VK should be set in advance to the value kind of the base
+/// expression.
+static QualType
+CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
+ SourceLocation OpLoc, const IdentifierInfo *CompName,
+ SourceLocation CompLoc) {
+ // FIXME: Share logic with ExtVectorElementExpr::containsDuplicateElements,
+ // see FIXME there.
+ //
+ // FIXME: This logic can be greatly simplified by splitting it along
+ // halving/not halving and reworking the component checking.
+ const ExtVectorType *vecType = baseType->getAs<ExtVectorType>();
+
+ // The vector accessor can't exceed the number of elements.
+ const char *compStr = CompName->getNameStart();
+
+ // This flag determines whether or not the component is one of the four
+ // special names that indicate a subset of exactly half the elements are
+ // to be selected.
+ bool HalvingSwizzle = false;
+
+ // This flag determines whether or not CompName has an 's' char prefix,
+ // indicating that it is a string of hex values to be used as vector indices.
+ bool HexSwizzle = *compStr == 's' || *compStr == 'S';
+
+ bool HasRepeated = false;
+ bool HasIndex[16] = {};
+
+ int Idx;
+
+ // Check that we've found one of the special components, or that the component
+ // names must come from the same set.
+ if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
+ !strcmp(compStr, "even") || !strcmp(compStr, "odd")) {
+ HalvingSwizzle = true;
+ } else if (!HexSwizzle &&
+ (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
+ do {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
+ compStr++;
+ } while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
+ } else {
+ if (HexSwizzle) compStr++;
+ while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
+ compStr++;
+ }
+ }
+
+ if (!HalvingSwizzle && *compStr) {
+ // We didn't get to the end of the string. This means the component names
+ // didn't come from the same set *or* we encountered an illegal name.
+ S.Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
+ << llvm::StringRef(compStr, 1) << SourceRange(CompLoc);
+ return QualType();
+ }
+
+ // Ensure no component accessor exceeds the width of the vector type it
+ // operates on.
+ if (!HalvingSwizzle) {
+ compStr = CompName->getNameStart();
+
+ if (HexSwizzle)
+ compStr++;
+
+ while (*compStr) {
+ if (!vecType->isAccessorWithinNumElements(*compStr++)) {
+ S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
+ << baseType << SourceRange(CompLoc);
+ return QualType();
+ }
+ }
+ }
+
+ // The component accessor looks fine - now we need to compute the actual type.
+ // The vector type is implied by the component accessor. For example,
+ // vec4.b is a float, vec4.xy is a vec2, vec4.rgb is a vec3, etc.
+ // vec4.s0 is a float, vec4.s23 is a vec3, etc.
+ // vec4.hi, vec4.lo, vec4.e, and vec4.o all return vec2.
+ unsigned CompSize = HalvingSwizzle ? (vecType->getNumElements() + 1) / 2
+ : CompName->getLength();
+ if (HexSwizzle)
+ CompSize--;
+
+ if (CompSize == 1)
+ return vecType->getElementType();
+
+ if (HasRepeated) VK = VK_RValue;
+
+ QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
+ // Now look up the TypeDefDecl from the vector type. Without this,
+ // diagostics look bad. We want extended vector types to appear built-in.
+ for (unsigned i = 0, E = S.ExtVectorDecls.size(); i != E; ++i) {
+ if (S.ExtVectorDecls[i]->getUnderlyingType() == VT)
+ return S.Context.getTypedefType(S.ExtVectorDecls[i]);
+ }
+ return VT; // should never get here (a typedef type should always be found).
+}
+
+static Decl *FindGetterSetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ if (Member)
+ if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member))
+ return PD;
+ if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel))
+ return OMD;
+
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I) {
+ if (Decl *D = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context))
+ return D;
+ }
+ return 0;
+}
+
+static Decl *FindGetterSetterNameDecl(const ObjCObjectPointerType *QIdTy,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ // Check protocols on qualified interfaces.
+ Decl *GDecl = 0;
+ for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ if (Member)
+ if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
+ GDecl = PD;
+ break;
+ }
+ // Also must look for a getter or setter name which uses property syntax.
+ if (ObjCMethodDecl *OMD = (*I)->getInstanceMethod(Sel)) {
+ GDecl = OMD;
+ break;
+ }
+ }
+ if (!GDecl) {
+ for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ // Search in the protocol-qualifier list of current protocol.
+ GDecl = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context);
+ if (GDecl)
+ return GDecl;
+ }
+ }
+ return GDecl;
+}
+
+ExprResult
+Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
+ bool IsArrow, SourceLocation OpLoc,
+ const CXXScopeSpec &SS,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Even in dependent contexts, try to diagnose base expressions with
+ // obviously wrong types, e.g.:
+ //
+ // T* t;
+ // t.f;
+ //
+ // In Obj-C++, however, the above expression is valid, since it could be
+ // accessing the 'f' property if T is an Obj-C interface. The extra check
+ // allows this, while still reporting an error if T is a struct pointer.
+ if (!IsArrow) {
+ const PointerType *PT = BaseType->getAs<PointerType>();
+ if (PT && (!getLangOptions().ObjC1 ||
+ PT->getPointeeType()->isRecordType())) {
+ assert(BaseExpr && "cannot happen with implicit member accesses");
+ Diag(NameInfo.getLoc(), diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ assert(BaseType->isDependentType() ||
+ NameInfo.getName().isDependentName() ||
+ isDependentScopeSpecifier(SS));
+
+ // Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
+ // must have pointer type, and the accessed type is the pointee.
+ return Owned(CXXDependentScopeMemberExpr::Create(Context, BaseExpr, BaseType,
+ IsArrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ FirstQualifierInScope,
+ NameInfo, TemplateArgs));
+}
+
+/// We know that the given qualified member reference points only to
+/// declarations which do not belong to the static type of the base
+/// expression. Diagnose the problem.
+static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
+ Expr *BaseExpr,
+ QualType BaseType,
+ const CXXScopeSpec &SS,
+ NamedDecl *rep,
+ const DeclarationNameInfo &nameInfo) {
+ // If this is an implicit member access, use a different set of
+ // diagnostics.
+ if (!BaseExpr)
+ return DiagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
+
+ SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated)
+ << SS.getRange() << rep << BaseType;
+}
+
+// Check whether the declarations we found through a nested-name
+// specifier in a member expression are actually members of the base
+// type. The restriction here is:
+//
+// C++ [expr.ref]p2:
+// ... In these cases, the id-expression shall name a
+// member of the class or of one of its base classes.
+//
+// So it's perfectly legitimate for the nested-name specifier to name
+// an unrelated class, and for us to find an overload set including
+// decls from classes which are not superclasses, as long as the decl
+// we actually pick through overload resolution is from a superclass.
+bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
+ QualType BaseType,
+ const CXXScopeSpec &SS,
+ const LookupResult &R) {
+ const RecordType *BaseRT = BaseType->getAs<RecordType>();
+ if (!BaseRT) {
+ // We can't check this yet because the base type is still
+ // dependent.
+ assert(BaseType->isDependentType());
+ return false;
+ }
+ CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ // If this is an implicit member reference and we find a
+ // non-instance member, it's not an error.
+ if (!BaseExpr && !(*I)->isCXXInstanceMember())
+ return false;
+
+ // Note that we use the DC of the decl, not the underlying decl.
+ DeclContext *DC = (*I)->getDeclContext();
+ while (DC->isTransparentContext())
+ DC = DC->getParent();
+
+ if (!DC->isRecord())
+ continue;
+
+ llvm::SmallPtrSet<CXXRecordDecl*,4> MemberRecord;
+ MemberRecord.insert(cast<CXXRecordDecl>(DC)->getCanonicalDecl());
+
+ if (!IsProvablyNotDerivedFrom(*this, BaseRecord, MemberRecord))
+ return false;
+ }
+
+ DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS,
+ R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
+ return true;
+}
+
+static bool
+LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
+ SourceRange BaseRange, const RecordType *RTy,
+ SourceLocation OpLoc, CXXScopeSpec &SS,
+ bool HasTemplateArgs) {
+ RecordDecl *RDecl = RTy->getDecl();
+ if (SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
+ SemaRef.PDiag(diag::err_typecheck_incomplete_tag)
+ << BaseRange))
+ return true;
+
+ if (HasTemplateArgs) {
+ // LookupTemplateName doesn't expect these both to exist simultaneously.
+ QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
+
+ bool MOUS;
+ SemaRef.LookupTemplateName(R, 0, SS, ObjectType, false, MOUS);
+ return false;
+ }
+
+ DeclContext *DC = RDecl;
+ if (SS.isSet()) {
+ // If the member name was a qualified-id, look into the
+ // nested-name-specifier.
+ DC = SemaRef.computeDeclContext(SS, false);
+
+ if (SemaRef.RequireCompleteDeclContext(SS, DC)) {
+ SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag)
+ << SS.getRange() << DC;
+ return true;
+ }
+
+ assert(DC && "Cannot handle non-computable dependent contexts in lookup");
+
+ if (!isa<TypeDecl>(DC)) {
+ SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass)
+ << DC << SS.getRange();
+ return true;
+ }
+ }
+
+ // The record definition is complete, now look up the member.
+ SemaRef.LookupQualifiedName(R, DC);
+
+ if (!R.empty())
+ return false;
+
+ // We didn't find anything with the given name, so try to correct
+ // for typos.
+ DeclarationName Name = R.getLookupName();
+ TypoCorrection Corrected = SemaRef.CorrectTypo(R.getLookupNameInfo(),
+ R.getLookupKind(), NULL,
+ &SS, DC, false,
+ Sema::CTC_MemberLookup);
+ NamedDecl *ND = Corrected.getCorrectionDecl();
+ R.clear();
+ if (ND && (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND))) {
+ std::string CorrectedStr(
+ Corrected.getAsString(SemaRef.getLangOptions()));
+ std::string CorrectedQuotedStr(
+ Corrected.getQuoted(SemaRef.getLangOptions()));
+ R.setLookupName(Corrected.getCorrection());
+ R.addDecl(ND);
+ SemaRef.Diag(R.getNameLoc(), diag::err_no_member_suggest)
+ << Name << DC << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ SemaRef.Diag(ND->getLocation(), diag::note_previous_decl)
+ << ND->getDeclName();
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
+ SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ if (BaseType->isDependentType() ||
+ (SS.isSet() && isDependentScopeSpecifier(SS)))
+ return ActOnDependentMemberExpr(Base, BaseType,
+ IsArrow, OpLoc,
+ SS, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+
+ LookupResult R(*this, NameInfo, LookupMemberName);
+
+ // Implicit member accesses.
+ if (!Base) {
+ QualType RecordTy = BaseType;
+ if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
+ if (LookupMemberExprInRecord(*this, R, SourceRange(),
+ RecordTy->getAs<RecordType>(),
+ OpLoc, SS, TemplateArgs != 0))
+ return ExprError();
+
+ // Explicit member accesses.
+ } else {
+ ExprResult BaseResult = Owned(Base);
+ ExprResult Result =
+ LookupMemberExpr(R, BaseResult, IsArrow, OpLoc,
+ SS, /*ObjCImpDecl*/ 0, TemplateArgs != 0);
+
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.take();
+
+ if (Result.isInvalid()) {
+ Owned(Base);
+ return ExprError();
+ }
+
+ if (Result.get())
+ return move(Result);
+
+ // LookupMemberExpr can modify Base, and thus change BaseType
+ BaseType = Base->getType();
+ }
+
+ return BuildMemberReferenceExpr(Base, BaseType,
+ OpLoc, IsArrow, SS, FirstQualifierInScope,
+ R, TemplateArgs);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS, FieldDecl *Field,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo);
+
+ExprResult
+Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
+ SourceLocation loc,
+ IndirectFieldDecl *indirectField,
+ Expr *baseObjectExpr,
+ SourceLocation opLoc) {
+ // First, build the expression that refers to the base object.
+
+ bool baseObjectIsPointer = false;
+ Qualifiers baseQuals;
+
+ // Case 1: the base of the indirect field is not a field.
+ VarDecl *baseVariable = indirectField->getVarDecl();
+ CXXScopeSpec EmptySS;
+ if (baseVariable) {
+ assert(baseVariable->getType()->isRecordType());
+
+ // In principle we could have a member access expression that
+ // accesses an anonymous struct/union that's a static member of
+ // the base object's class. However, under the current standard,
+ // static data members cannot be anonymous structs or unions.
+ // Supporting this is as easy as building a MemberExpr here.
+ assert(!baseObjectExpr && "anonymous struct/union is static data member?");
+
+ DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
+
+ ExprResult result
+ = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
+ if (result.isInvalid()) return ExprError();
+
+ baseObjectExpr = result.take();
+ baseObjectIsPointer = false;
+ baseQuals = baseObjectExpr->getType().getQualifiers();
+
+ // Case 2: the base of the indirect field is a field and the user
+ // wrote a member expression.
+ } else if (baseObjectExpr) {
+ // The caller provided the base object expression. Determine
+ // whether its a pointer and whether it adds any qualifiers to the
+ // anonymous struct/union fields we're looking into.
+ QualType objectType = baseObjectExpr->getType();
+
+ if (const PointerType *ptr = objectType->getAs<PointerType>()) {
+ baseObjectIsPointer = true;
+ objectType = ptr->getPointeeType();
+ } else {
+ baseObjectIsPointer = false;
+ }
+ baseQuals = objectType.getQualifiers();
+
+ // Case 3: the base of the indirect field is a field and we should
+ // build an implicit member access.
+ } else {
+ // We've found a member of an anonymous struct/union that is
+ // inside a non-anonymous struct/union, so in a well-formed
+ // program our base object expression is "this".
+ QualType ThisTy = getAndCaptureCurrentThisType();
+ if (ThisTy.isNull()) {
+ Diag(loc, diag::err_invalid_member_use_in_static_method)
+ << indirectField->getDeclName();
+ return ExprError();
+ }
+
+ // Our base object expression is "this".
+ baseObjectExpr
+ = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true);
+ baseObjectIsPointer = true;
+ baseQuals = ThisTy->castAs<PointerType>()->getPointeeType().getQualifiers();
+ }
+
+ // Build the implicit member references to the field of the
+ // anonymous struct/union.
+ Expr *result = baseObjectExpr;
+ IndirectFieldDecl::chain_iterator
+ FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
+
+ // Build the first member access in the chain with full information.
+ if (!baseVariable) {
+ FieldDecl *field = cast<FieldDecl>(*FI);
+
+ // FIXME: use the real found-decl info!
+ DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
+
+ // Make a nameInfo that properly uses the anonymous name.
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+
+ result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
+ EmptySS, field, foundDecl,
+ memberNameInfo).take();
+ baseObjectIsPointer = false;
+
+ // FIXME: check qualified member access
+ }
+
+ // In all cases, we should now skip the first declaration in the chain.
+ ++FI;
+
+ while (FI != FEnd) {
+ FieldDecl *field = cast<FieldDecl>(*FI++);
+
+ // FIXME: these are somewhat meaningless
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+ DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
+
+ result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
+ (FI == FEnd? SS : EmptySS), field,
+ foundDecl, memberNameInfo).take();
+ }
+
+ return Owned(result);
+}
+
+/// \brief Build a MemberExpr AST node.
+static MemberExpr *BuildMemberExpr(ASTContext &C, Expr *Base, bool isArrow,
+ const CXXScopeSpec &SS, ValueDecl *Member,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo,
+ QualType Ty,
+ ExprValueKind VK, ExprObjectKind OK,
+ const TemplateArgumentListInfo *TemplateArgs = 0) {
+ return MemberExpr::Create(C, Base, isArrow, SS.getWithLocInContext(C),
+ Member, FoundDecl, MemberNameInfo,
+ TemplateArgs, Ty, VK, OK);
+}
+
+ExprResult
+Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
+ SourceLocation OpLoc, bool IsArrow,
+ const CXXScopeSpec &SS,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool SuppressQualifierCheck) {
+ QualType BaseType = BaseExprType;
+ if (IsArrow) {
+ assert(BaseType->isPointerType());
+ BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+ }
+ R.setBaseObjectType(BaseType);
+
+ const DeclarationNameInfo &MemberNameInfo = R.getLookupNameInfo();
+ DeclarationName MemberName = MemberNameInfo.getName();
+ SourceLocation MemberLoc = MemberNameInfo.getLoc();
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ if (R.empty()) {
+ // Rederive where we looked up.
+ DeclContext *DC = (SS.isSet()
+ ? computeDeclContext(SS, false)
+ : BaseType->getAs<RecordType>()->getDecl());
+
+ Diag(R.getNameLoc(), diag::err_no_member)
+ << MemberName << DC
+ << (BaseExpr ? BaseExpr->getSourceRange() : SourceRange());
+ return ExprError();
+ }
+
+ // Diagnose lookups that find only declarations from a non-base
+ // type. This is possible for either qualified lookups (which may
+ // have been qualified with an unrelated type) or implicit member
+ // expressions (which were found with unqualified lookup and thus
+ // may have come from an enclosing scope). Note that it's okay for
+ // lookup to find declarations from a non-base type as long as those
+ // aren't the ones picked by overload resolution.
+ if ((SS.isSet() || !BaseExpr ||
+ (isa<CXXThisExpr>(BaseExpr) &&
+ cast<CXXThisExpr>(BaseExpr)->isImplicit())) &&
+ !SuppressQualifierCheck &&
+ CheckQualifiedMemberReference(BaseExpr, BaseType, SS, R))
+ return ExprError();
+
+ // Construct an unresolved result if we in fact got an unresolved
+ // result.
+ if (R.isOverloadedResult() || R.isUnresolvableResult()) {
+ // Suppress any lookup-related diagnostics; we'll do these when we
+ // pick a member.
+ R.suppressDiagnostics();
+
+ UnresolvedMemberExpr *MemExpr
+ = UnresolvedMemberExpr::Create(Context, R.isUnresolvableResult(),
+ BaseExpr, BaseExprType,
+ IsArrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ MemberNameInfo,
+ TemplateArgs, R.begin(), R.end());
+
+ return Owned(MemExpr);
+ }
+
+ assert(R.isSingleResult());
+ DeclAccessPair FoundDecl = R.begin().getPair();
+ NamedDecl *MemberDecl = R.getFoundDecl();
+
+ // FIXME: diagnose the presence of template arguments now.
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (MemberDecl->isInvalidDecl())
+ return ExprError();
+
+ // Handle the implicit-member-access case.
+ if (!BaseExpr) {
+ // If this is not an instance member, convert to a non-member access.
+ if (!MemberDecl->isCXXInstanceMember())
+ return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), MemberDecl);
+
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
+ }
+
+ bool ShouldCheckUse = true;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MemberDecl)) {
+ // Don't diagnose the use of a virtual member function unless it's
+ // explicitly qualified.
+ if (MD->isVirtual() && !SS.isSet())
+ ShouldCheckUse = false;
+ }
+
+ // Check the use of this member.
+ if (ShouldCheckUse && DiagnoseUseOfDecl(MemberDecl, MemberLoc)) {
+ Owned(BaseExpr);
+ return ExprError();
+ }
+
+ // Perform a property load on the base regardless of whether we
+ // actually need it for the declaration.
+ if (BaseExpr->getObjectKind() == OK_ObjCProperty) {
+ ExprResult Result = ConvertPropertyForRValue(BaseExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ BaseExpr = Result.take();
+ }
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
+ return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow,
+ SS, FD, FoundDecl, MemberNameInfo);
+
+ if (IndirectFieldDecl *FD = dyn_cast<IndirectFieldDecl>(MemberDecl))
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ return BuildAnonymousStructUnionMemberReference(SS, MemberLoc, FD,
+ BaseExpr, OpLoc);
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
+ MarkDeclarationReferenced(MemberLoc, Var);
+ return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
+ Var, FoundDecl, MemberNameInfo,
+ Var->getType().getNonReferenceType(),
+ VK_LValue, OK_Ordinary));
+ }
+
+ if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) {
+ ExprValueKind valueKind;
+ QualType type;
+ if (MemberFn->isInstance()) {
+ valueKind = VK_RValue;
+ type = Context.BoundMemberTy;
+ } else {
+ valueKind = VK_LValue;
+ type = MemberFn->getType();
+ }
+
+ MarkDeclarationReferenced(MemberLoc, MemberDecl);
+ return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
+ MemberFn, FoundDecl, MemberNameInfo,
+ type, valueKind, OK_Ordinary));
+ }
+ assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
+
+ if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
+ MarkDeclarationReferenced(MemberLoc, MemberDecl);
+ return Owned(BuildMemberExpr(Context, BaseExpr, IsArrow, SS,
+ Enum, FoundDecl, MemberNameInfo,
+ Enum->getType(), VK_RValue, OK_Ordinary));
+ }
+
+ Owned(BaseExpr);
+
+ // We found something that we didn't expect. Complain.
+ if (isa<TypeDecl>(MemberDecl))
+ Diag(MemberLoc, diag::err_typecheck_member_reference_type)
+ << MemberName << BaseType << int(IsArrow);
+ else
+ Diag(MemberLoc, diag::err_typecheck_member_reference_unknown)
+ << MemberName << BaseType << int(IsArrow);
+
+ Diag(MemberDecl->getLocation(), diag::note_member_declared_here)
+ << MemberName;
+ R.suppressDiagnostics();
+ return ExprError();
+}
+
+/// Given that normal member access failed on the given expression,
+/// and given that the expression's type involves builtin-id or
+/// builtin-Class, decide whether substituting in the redefinition
+/// types would be profitable. The redefinition type is whatever
+/// this translation unit tried to typedef to id/Class; we store
+/// it to the side and then re-use it in places like this.
+static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) {
+ const ObjCObjectPointerType *opty
+ = base.get()->getType()->getAs<ObjCObjectPointerType>();
+ if (!opty) return false;
+
+ const ObjCObjectType *ty = opty->getObjectType();
+
+ QualType redef;
+ if (ty->isObjCId()) {
+ redef = S.Context.ObjCIdRedefinitionType;
+ } else if (ty->isObjCClass()) {
+ redef = S.Context.ObjCClassRedefinitionType;
+ } else {
+ return false;
+ }
+
+ // Do the substitution as long as the redefinition type isn't just a
+ // possibly-qualified pointer to builtin-id or builtin-Class again.
+ opty = redef->getAs<ObjCObjectPointerType>();
+ if (opty && !opty->getObjectType()->getInterface() != 0)
+ return false;
+
+ base = S.ImpCastExprToType(base.take(), redef, CK_BitCast);
+ return true;
+}
+
+/// Look up the given member of the given non-type-dependent
+/// expression. This can return in one of two ways:
+/// * If it returns a sentinel null-but-valid result, the caller will
+/// assume that lookup was performed and the results written into
+/// the provided structure. It will take over from there.
+/// * Otherwise, the returned expression will be produced in place of
+/// an ordinary member expression.
+///
+/// The ObjCImpDecl bit is a gross hack that will need to be properly
+/// fixed for ObjC++.
+ExprResult
+Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
+ bool &IsArrow, SourceLocation OpLoc,
+ CXXScopeSpec &SS,
+ Decl *ObjCImpDecl, bool HasTemplateArgs) {
+ assert(BaseExpr.get() && "no base expression");
+
+ // Perform default conversions.
+ BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
+
+ if (IsArrow) {
+ BaseExpr = DefaultLvalueConversion(BaseExpr.take());
+ if (BaseExpr.isInvalid())
+ return ExprError();
+ }
+
+ QualType BaseType = BaseExpr.get()->getType();
+ assert(!BaseType->isDependentType());
+
+ DeclarationName MemberName = R.getLookupName();
+ SourceLocation MemberLoc = R.getNameLoc();
+
+ // For later type-checking purposes, turn arrow accesses into dot
+ // accesses. The only access type we support that doesn't follow
+ // the C equivalence "a->b === (*a).b" is ObjC property accesses,
+ // and those never use arrows, so this is unaffected.
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (const ObjCObjectPointerType *Ptr
+ = BaseType->getAs<ObjCObjectPointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isRecordType()) {
+ // Recover from arrow accesses to records, e.g.:
+ // struct MyRecord foo;
+ // foo->bar
+ // This is actually well-formed in C++ if MyRecord has an
+ // overloaded operator->, but that should have been dealt with
+ // by now.
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ IsArrow = false;
+ } else if (BaseType == Context.BoundMemberTy) {
+ goto fail;
+ } else {
+ Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
+ << BaseType << BaseExpr.get()->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ // Handle field access to simple records.
+ if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
+ if (LookupMemberExprInRecord(*this, R, BaseExpr.get()->getSourceRange(),
+ RTy, OpLoc, SS, HasTemplateArgs))
+ return ExprError();
+
+ // Returning valid-but-null is how we indicate to the caller that
+ // the lookup result was filled in.
+ return Owned((Expr*) 0);
+ }
+
+ // Handle ivar access to Objective-C objects.
+ if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) {
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ // There are three cases for the base type:
+ // - builtin id (qualified or unqualified)
+ // - builtin Class (qualified or unqualified)
+ // - an interface
+ ObjCInterfaceDecl *IDecl = OTy->getInterface();
+ if (!IDecl) {
+ if (getLangOptions().ObjCAutoRefCount &&
+ (OTy->isObjCId() || OTy->isObjCClass()))
+ goto fail;
+ // There's an implicit 'isa' ivar on all objects.
+ // But we only actually find it this way on objects of type 'id',
+ // apparently.
+ if (OTy->isObjCId() && Member->isStr("isa"))
+ return Owned(new (Context) ObjCIsaExpr(BaseExpr.take(), IsArrow, MemberLoc,
+ Context.getObjCClassType()));
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ goto fail;
+ }
+
+ ObjCInterfaceDecl *ClassDeclared;
+ ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
+
+ if (!IV) {
+ // Attempt to correct for typos in ivar names.
+ LookupResult Res(*this, R.getLookupName(), R.getNameLoc(),
+ LookupMemberName);
+ TypoCorrection Corrected = CorrectTypo(
+ R.getLookupNameInfo(), LookupMemberName, NULL, NULL, IDecl, false,
+ IsArrow ? CTC_ObjCIvarLookup : CTC_ObjCPropertyLookup);
+ if ((IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>())) {
+ Diag(R.getNameLoc(),
+ diag::err_typecheck_member_reference_ivar_suggest)
+ << IDecl->getDeclName() << MemberName << IV->getDeclName()
+ << FixItHint::CreateReplacement(R.getNameLoc(),
+ IV->getNameAsString());
+ Diag(IV->getLocation(), diag::note_previous_decl)
+ << IV->getDeclName();
+ } else {
+ if (IsArrow && IDecl->FindPropertyDeclaration(Member)) {
+ Diag(MemberLoc,
+ diag::err_property_found_suggest)
+ << Member << BaseExpr.get()->getType()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ return ExprError();
+ }
+ Res.clear();
+ Res.setLookupName(Member);
+
+ Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
+ << IDecl->getDeclName() << MemberName
+ << BaseExpr.get()->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check whether we can reference this field.
+ if (DiagnoseUseOfDecl(IV, MemberLoc))
+ return ExprError();
+ if (IV->getAccessControl() != ObjCIvarDecl::Public &&
+ IV->getAccessControl() != ObjCIvarDecl::Package) {
+ ObjCInterfaceDecl *ClassOfMethodDecl = 0;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (ObjCImpDecl && getCurFunctionDecl()) {
+ // Case of a c-function declared inside an objc implementation.
+ // FIXME: For a c-style function nested inside an objc implementation
+ // class, there is no implementation context available, so we pass
+ // down the context as argument to this routine. Ideally, this context
+ // need be passed down in the AST node and somehow calculated from the
+ // AST for a function decl.
+ if (ObjCImplementationDecl *IMPD =
+ dyn_cast<ObjCImplementationDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+
+ if (IV->getAccessControl() == ObjCIvarDecl::Private) {
+ if (ClassDeclared != IDecl ||
+ ClassOfMethodDecl != ClassDeclared)
+ Diag(MemberLoc, diag::error_private_ivar_access)
+ << IV->getDeclName();
+ } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
+ // @protected
+ Diag(MemberLoc, diag::error_protected_ivar_access)
+ << IV->getDeclName();
+ }
+ if (getLangOptions().ObjCAutoRefCount) {
+ Expr *BaseExp = BaseExpr.get()->IgnoreParenImpCasts();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(BaseExp))
+ if (UO->getOpcode() == UO_Deref)
+ BaseExp = UO->getSubExpr()->IgnoreParenCasts();
+
+ if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(BaseExp))
+ if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ Diag(DE->getLocation(), diag::error_arc_weak_ivar_access);
+ }
+
+ return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
+ MemberLoc, BaseExpr.take(),
+ IsArrow));
+ }
+
+ // Objective-C property access.
+ const ObjCObjectPointerType *OPT;
+ if (!IsArrow && (OPT = BaseType->getAs<ObjCObjectPointerType>())) {
+ // This actually uses the base as an r-value.
+ BaseExpr = DefaultLvalueConversion(BaseExpr.take());
+ if (BaseExpr.isInvalid())
+ return ExprError();
+
+ assert(Context.hasSameUnqualifiedType(BaseType, BaseExpr.get()->getType()));
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ const ObjCObjectType *OT = OPT->getObjectType();
+
+ // id, with and without qualifiers.
+ if (OT->isObjCId()) {
+ // Check protocols on qualified interfaces.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ if (Decl *PMDecl = FindGetterSetterNameDecl(OPT, Member, Sel, Context)) {
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
+ // Check the use of this declaration
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ QualType T = PD->getType();
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ T = getMessageSendResultType(BaseType, Getter, false, false);
+
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, T,
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ BaseExpr.take()));
+ }
+
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(OMD, MemberLoc))
+ return ExprError();
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *SMD = 0;
+ if (Decl *SDecl = FindGetterSetterNameDecl(OPT, /*Property id*/0,
+ SetterSel, Context))
+ SMD = dyn_cast<ObjCMethodDecl>(SDecl);
+ QualType PType = getMessageSendResultType(BaseType, OMD, false,
+ false);
+
+ ExprValueKind VK = VK_LValue;
+ if (!getLangOptions().CPlusPlus && PType.isCForbiddenLValueType())
+ VK = VK_RValue;
+ ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
+
+ return Owned(new (Context) ObjCPropertyRefExpr(OMD, SMD, PType,
+ VK, OK,
+ MemberLoc, BaseExpr.take()));
+ }
+ }
+ // Use of id.member can only be for a property reference. Do not
+ // use the 'id' redefinition in this case.
+ if (IsArrow && ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // 'Class', unqualified only.
+ if (OT->isObjCClass()) {
+ // Only works in a method declaration (??!).
+ ObjCMethodDecl *MD = getCurMethodDecl();
+ if (!MD) {
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ goto fail;
+ }
+
+ // Also must look for a getter name which uses property syntax.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ ObjCMethodDecl *Getter;
+ if ((Getter = IFace->lookupClassMethod(Sel))) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ } else
+ Getter = IFace->lookupPrivateMethod(Sel, false);
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = IFace->lookupPrivateMethod(SetterSel, false);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter)
+ Setter = IFace->getCategoryClassMethod(SetterSel);
+
+ if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ QualType PType;
+
+ ExprValueKind VK = VK_LValue;
+ if (Getter) {
+ PType = getMessageSendResultType(QualType(OT, 0), Getter, true,
+ false);
+ if (!getLangOptions().CPlusPlus && PType.isCForbiddenLValueType())
+ VK = VK_RValue;
+ } else {
+ // Get the expression type from Setter's incoming parameter.
+ PType = (*(Setter->param_end() -1))->getType();
+ }
+ ExprObjectKind OK = (VK == VK_RValue ? OK_Ordinary : OK_ObjCProperty);
+
+ // FIXME: we must check that the setter has property type.
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ PType, VK, OK,
+ MemberLoc, BaseExpr.take()));
+ }
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // Normal property access.
+ return HandleExprPropertyRefExpr(OPT, BaseExpr.get(), OpLoc,
+ MemberName, MemberLoc,
+ SourceLocation(), QualType(), false);
+ }
+
+ // Handle 'field access' to vectors, such as 'V.xx'.
+ if (BaseType->isExtVectorType()) {
+ // FIXME: this expr should store IsArrow.
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ ExprValueKind VK = (IsArrow ? VK_LValue : BaseExpr.get()->getValueKind());
+ QualType ret = CheckExtVectorComponent(*this, BaseType, VK, OpLoc,
+ Member, MemberLoc);
+ if (ret.isNull())
+ return ExprError();
+
+ return Owned(new (Context) ExtVectorElementExpr(ret, VK, BaseExpr.take(),
+ *Member, MemberLoc));
+ }
+
+ // Adjust builtin-sel to the appropriate redefinition type if that's
+ // not just a pointer to builtin-sel again.
+ if (IsArrow &&
+ BaseType->isSpecificBuiltinType(BuiltinType::ObjCSel) &&
+ !Context.ObjCSelRedefinitionType->isObjCSelType()) {
+ BaseExpr = ImpCastExprToType(BaseExpr.take(), Context.ObjCSelRedefinitionType,
+ CK_BitCast);
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+
+ // Failure cases.
+ fail:
+
+ // Recover from dot accesses to pointers, e.g.:
+ // type *foo;
+ // foo.bar
+ // This is actually well-formed in two cases:
+ // - 'type' is an Objective C type
+ // - 'bar' is a pseudo-destructor name which happens to refer to
+ // the appropriate pointer type
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
+ if (!IsArrow && Ptr->getPointeeType()->isRecordType() &&
+ MemberName.getNameKind() != DeclarationName::CXXDestructorName) {
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, "->");
+
+ // Recurse as an -> access.
+ IsArrow = true;
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+ }
+
+ // If the user is trying to apply -> or . to a function name, it's probably
+ // because they forgot parentheses to call that function.
+ QualType ZeroArgCallTy;
+ UnresolvedSet<4> Overloads;
+ if (isExprCallable(*BaseExpr.get(), ZeroArgCallTy, Overloads)) {
+ if (ZeroArgCallTy.isNull()) {
+ Diag(BaseExpr.get()->getExprLoc(), diag::err_member_reference_needs_call)
+ << (Overloads.size() > 1) << 0 << BaseExpr.get()->getSourceRange();
+ UnresolvedSet<2> PlausibleOverloads;
+ for (OverloadExpr::decls_iterator It = Overloads.begin(),
+ DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
+ const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
+ QualType OverloadResultTy = OverloadDecl->getResultType();
+ if ((!IsArrow && OverloadResultTy->isRecordType()) ||
+ (IsArrow && OverloadResultTy->isPointerType() &&
+ OverloadResultTy->getPointeeType()->isRecordType()))
+ PlausibleOverloads.addDecl(It.getDecl());
+ }
+ NoteOverloads(PlausibleOverloads, BaseExpr.get()->getExprLoc());
+ return ExprError();
+ }
+ if ((!IsArrow && ZeroArgCallTy->isRecordType()) ||
+ (IsArrow && ZeroArgCallTy->isPointerType() &&
+ ZeroArgCallTy->getPointeeType()->isRecordType())) {
+ // At this point, we know BaseExpr looks like it's potentially callable
+ // with 0 arguments, and that it returns something of a reasonable type,
+ // so we can emit a fixit and carry on pretending that BaseExpr was
+ // actually a CallExpr.
+ SourceLocation ParenInsertionLoc =
+ PP.getLocForEndOfToken(BaseExpr.get()->getLocEnd());
+ Diag(BaseExpr.get()->getExprLoc(), diag::err_member_reference_needs_call)
+ << (Overloads.size() > 1) << 1 << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateInsertion(ParenInsertionLoc, "()");
+ // FIXME: Try this before emitting the fixit, and suppress diagnostics
+ // while doing so.
+ ExprResult NewBase =
+ ActOnCallExpr(0, BaseExpr.take(), ParenInsertionLoc,
+ MultiExprArg(*this, 0, 0),
+ ParenInsertionLoc.getFileLocWithOffset(1));
+ if (NewBase.isInvalid())
+ return ExprError();
+ BaseExpr = NewBase;
+ BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+ }
+
+ Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr.get()->getSourceRange();
+
+ return ExprError();
+}
+
+/// The main callback when the parser finds something like
+/// expression . [nested-name-specifier] identifier
+/// expression -> [nested-name-specifier] identifier
+/// where 'identifier' encompasses a fairly broad spectrum of
+/// possibilities, including destructor and operator references.
+///
+/// \param OpKind either tok::arrow or tok::period
+/// \param HasTrailingLParen whether the next token is '(', which
+/// is used to diagnose mis-uses of special members that can
+/// only be called
+/// \param ObjCImpDecl the current ObjC @implementation decl;
+/// this is an ugly hack around the fact that ObjC @implementations
+/// aren't properly put in the context chain
+ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Id,
+ Decl *ObjCImpDecl,
+ bool HasTrailingLParen) {
+ if (SS.isSet() && SS.isInvalid())
+ return ExprError();
+
+ // Warn about the explicit constructor calls Microsoft extension.
+ if (getLangOptions().Microsoft &&
+ Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Diag(Id.getSourceRange().getBegin(),
+ diag::ext_ms_explicit_constructor_call);
+
+ TemplateArgumentListInfo TemplateArgsBuffer;
+
+ // Decompose the name into its component parts.
+ DeclarationNameInfo NameInfo;
+ const TemplateArgumentListInfo *TemplateArgs;
+ DecomposeUnqualifiedId(Id, TemplateArgsBuffer,
+ NameInfo, TemplateArgs);
+
+ DeclarationName Name = NameInfo.getName();
+ bool IsArrow = (OpKind == tok::arrow);
+
+ NamedDecl *FirstQualifierInScope
+ = (!SS.isSet() ? 0 : FindFirstQualifierInScope(S,
+ static_cast<NestedNameSpecifier*>(SS.getScopeRep())));
+
+ // This is a postfix expression, so get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.take();
+
+ if (Base->getType()->isDependentType() || Name.isDependentName() ||
+ isDependentScopeSpecifier(SS)) {
+ Result = ActOnDependentMemberExpr(Base, Base->getType(),
+ IsArrow, OpLoc,
+ SS, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+ } else {
+ LookupResult R(*this, NameInfo, LookupMemberName);
+ ExprResult BaseResult = Owned(Base);
+ Result = LookupMemberExpr(R, BaseResult, IsArrow, OpLoc,
+ SS, ObjCImpDecl, TemplateArgs != 0);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.take();
+
+ if (Result.isInvalid()) {
+ Owned(Base);
+ return ExprError();
+ }
+
+ if (Result.get()) {
+ // The only way a reference to a destructor can be used is to
+ // immediately call it, which falls into this case. If the
+ // next token is not a '(', produce a diagnostic and build the
+ // call now.
+ if (!HasTrailingLParen &&
+ Id.getKind() == UnqualifiedId::IK_DestructorName)
+ return DiagnoseDtorReference(NameInfo.getLoc(), Result.get());
+
+ return move(Result);
+ }
+
+ Result = BuildMemberReferenceExpr(Base, Base->getType(),
+ OpLoc, IsArrow, SS, FirstQualifierInScope,
+ R, TemplateArgs);
+ }
+
+ return move(Result);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS, FieldDecl *Field,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo) {
+ // x.a is an l-value if 'a' has a reference type. Otherwise:
+ // x.a is an l-value/x-value/pr-value if the base is (and note
+ // that *x is always an l-value), except that if the base isn't
+ // an ordinary object then we must have an rvalue.
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+ if (!IsArrow) {
+ if (BaseExpr->getObjectKind() == OK_Ordinary)
+ VK = BaseExpr->getValueKind();
+ else
+ VK = VK_RValue;
+ }
+ if (VK != VK_RValue && Field->isBitField())
+ OK = OK_BitField;
+
+ // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
+ QualType MemberType = Field->getType();
+ if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) {
+ MemberType = Ref->getPointeeType();
+ VK = VK_LValue;
+ } else {
+ QualType BaseType = BaseExpr->getType();
+ if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+
+ // GC attributes are never picked up by members.
+ BaseQuals.removeObjCGCAttr();
+
+ // CVR attributes from the base are picked up by members,
+ // except that 'mutable' members don't pick up 'const'.
+ if (Field->isMutable()) BaseQuals.removeConst();
+
+ Qualifiers MemberQuals
+ = S.Context.getCanonicalType(MemberType).getQualifiers();
+
+ // TR 18037 does not allow fields to be declared with address spaces.
+ assert(!MemberQuals.hasAddressSpace());
+
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ MemberType = S.Context.getQualifiedType(MemberType, Combined);
+ }
+
+ S.MarkDeclarationReferenced(MemberNameInfo.getLoc(), Field);
+ ExprResult Base =
+ S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
+ FoundDecl, Field);
+ if (Base.isInvalid())
+ return ExprError();
+ return S.Owned(BuildMemberExpr(S.Context, Base.take(), IsArrow, SS,
+ Field, FoundDecl, MemberNameInfo,
+ MemberType, VK, OK));
+}
+
+/// Builds an implicit member access expression. The current context
+/// is known to be an instance method, and the given unqualified lookup
+/// set is known to contain only instance members, at least one of which
+/// is from an appropriate type.
+ExprResult
+Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsKnownInstance) {
+ assert(!R.empty() && !R.isAmbiguous());
+
+ SourceLocation loc = R.getNameLoc();
+
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ // FIXME: template-ids inside anonymous structs?
+ if (IndirectFieldDecl *FD = R.getAsSingle<IndirectFieldDecl>())
+ return BuildAnonymousStructUnionMemberReference(SS, R.getNameLoc(), FD);
+
+ // If this is known to be an instance access, go ahead and build an
+ // implicit 'this' expression now.
+ // 'this' expression now.
+ QualType ThisTy = getAndCaptureCurrentThisType();
+ assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'");
+
+ Expr *baseExpr = 0; // null signifies implicit access
+ if (IsKnownInstance) {
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true);
+ }
+
+ return BuildMemberReferenceExpr(baseExpr, ThisTy,
+ /*OpLoc*/ SourceLocation(),
+ /*IsArrow*/ true,
+ SS,
+ /*FirstQualifierInScope*/ 0,
+ R, TemplateArgs);
+}
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index cb5c1e0d0cbe..02a4682cc840 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/SmallString.h"
#include "clang/Lex/Preprocessor.h"
@@ -62,7 +63,7 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
// Create the aggregate string with the appropriate content and location
// information.
- S = StringLiteral::Create(Context, &StrBuf[0], StrBuf.size(),
+ S = StringLiteral::Create(Context, StrBuf,
/*Wide=*/false, /*Pascal=*/false,
Context.getPointerType(Context.CharTy),
&StrLocs[0], StrLocs.size());
@@ -127,7 +128,8 @@ ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
if (EncodedType->isDependentType())
StrTy = Context.DependentTy;
else {
- if (!EncodedType->getAsArrayTypeUnsafe()) // Incomplete array is handled.
+ if (!EncodedType->getAsArrayTypeUnsafe() && //// Incomplete array is handled.
+ !EncodedType->isVoidType()) // void is handled too.
if (RequireCompleteType(AtLoc, EncodedType,
PDiag(diag::err_incomplete_type_objc_at_encode)
<< EncodedTypeInfo->getTypeLoc().getSourceRange()))
@@ -176,12 +178,39 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
SourceRange(LParenLoc, RParenLoc));
if (!Method)
Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
+
+ if (!Method ||
+ Method->getImplementationControl() != ObjCMethodDecl::Optional) {
+ llvm::DenseMap<Selector, SourceLocation>::iterator Pos
+ = ReferencedSelectors.find(Sel);
+ if (Pos == ReferencedSelectors.end())
+ ReferencedSelectors.insert(std::make_pair(Sel, SelLoc));
+ }
- llvm::DenseMap<Selector, SourceLocation>::iterator Pos
- = ReferencedSelectors.find(Sel);
- if (Pos == ReferencedSelectors.end())
- ReferencedSelectors.insert(std::make_pair(Sel, SelLoc));
-
+ // In ARC, forbid the user from using @selector for
+ // retain/release/autorelease/dealloc/retainCount.
+ if (getLangOptions().ObjCAutoRefCount) {
+ switch (Sel.getMethodFamily()) {
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_dealloc:
+ Diag(AtLoc, diag::err_arc_illegal_selector) <<
+ Sel << SourceRange(LParenLoc, RParenLoc);
+ break;
+
+ case OMF_None:
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_init:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_performSelector:
+ break;
+ }
+ }
QualType Ty = Context.getObjCSelType();
return new (Context) ObjCSelectorExpr(Ty, Sel, AtLoc, RParenLoc);
}
@@ -321,11 +350,22 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
Args[i] = Result.take();
}
- unsigned DiagID = isClassMessage ? diag::warn_class_method_not_found :
- diag::warn_inst_method_not_found;
+ unsigned DiagID;
+ if (getLangOptions().ObjCAutoRefCount)
+ DiagID = diag::err_arc_method_not_found;
+ else
+ DiagID = isClassMessage ? diag::warn_class_method_not_found
+ : diag::warn_inst_method_not_found;
Diag(lbrac, DiagID)
<< Sel << isClassMessage << SourceRange(lbrac, rbrac);
- ReturnType = Context.getObjCIdType();
+
+ // In debuggers, we want to use __unknown_anytype for these
+ // results so that clients can cast them.
+ if (getLangOptions().DebuggerSupport) {
+ ReturnType = Context.UnknownAnyTy;
+ } else {
+ ReturnType = Context.getObjCIdType();
+ }
VK = VK_RValue;
return false;
}
@@ -404,17 +444,15 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
return IsError;
}
-bool Sema::isSelfExpr(Expr *RExpr) {
+bool Sema::isSelfExpr(Expr *receiver) {
// 'self' is objc 'self' in an objc method only.
DeclContext *DC = CurContext;
while (isa<BlockDecl>(DC))
DC = DC->getParent();
if (DC && !isa<ObjCMethodDecl>(DC))
return false;
- if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RExpr))
- if (ICE->getCastKind() == CK_LValueToRValue)
- RExpr = ICE->getSubExpr();
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RExpr))
+ receiver = receiver->IgnoreParenLValueCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(receiver))
if (DRE->getDecl()->getIdentifier() == &Context.Idents.get("self"))
return true;
return false;
@@ -489,7 +527,8 @@ ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
/// objective C interface. This is a property reference expression.
ExprResult Sema::
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
- Expr *BaseExpr, DeclarationName MemberName,
+ Expr *BaseExpr, SourceLocation OpLoc,
+ DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super) {
@@ -635,17 +674,19 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
}
// Attempt to correct for typos in property names.
- LookupResult Res(*this, MemberName, MemberLoc, LookupOrdinaryName);
- if (CorrectTypo(Res, 0, 0, IFace, false, CTC_NoKeywords, OPT) &&
- Res.getAsSingle<ObjCPropertyDecl>()) {
- DeclarationName TypoResult = Res.getLookupName();
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName, NULL,
+ NULL, IFace, false, CTC_NoKeywords, OPT);
+ if (ObjCPropertyDecl *Property =
+ Corrected.getCorrectionDeclAs<ObjCPropertyDecl>()) {
+ DeclarationName TypoResult = Corrected.getCorrection();
Diag(MemberLoc, diag::err_property_not_found_suggest)
<< MemberName << QualType(OPT, 0) << TypoResult
<< FixItHint::CreateReplacement(MemberLoc, TypoResult.getAsString());
- ObjCPropertyDecl *Property = Res.getAsSingle<ObjCPropertyDecl>();
Diag(Property->getLocation(), diag::note_previous_decl)
<< Property->getDeclName();
- return HandleExprPropertyRefExpr(OPT, BaseExpr, TypoResult, MemberLoc,
+ return HandleExprPropertyRefExpr(OPT, BaseExpr, OpLoc,
+ TypoResult, MemberLoc,
SuperLoc, SuperType, Super);
}
ObjCInterfaceDecl *ClassDeclared;
@@ -663,6 +704,11 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
return ExprError();
}
}
+ Diag(MemberLoc,
+ diag::err_ivar_access_using_property_syntax_suggest)
+ << MemberName << QualType(OPT, 0) << Ivar->getDeclName()
+ << FixItHint::CreateReplacement(OpLoc, "->");
+ return ExprError();
}
Diag(MemberLoc, diag::err_property_not_found)
@@ -699,7 +745,9 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
T = Context.getObjCObjectPointerType(T);
return HandleExprPropertyRefExpr(T->getAsObjCInterfacePointerType(),
- /*BaseExpr*/0, &propertyName,
+ /*BaseExpr*/0,
+ SourceLocation()/*OpLoc*/,
+ &propertyName,
propertyNameLoc,
receiverNameLoc, T, true);
}
@@ -862,29 +910,30 @@ Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
Method->getClassInterface()->getSuperClass())
CTC = CTC_ObjCMessageReceiver;
- if (DeclarationName Corrected = CorrectTypo(Result, S, 0, 0, false, CTC)) {
- if (Result.isSingleResult()) {
+ if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
+ Result.getLookupKind(), S, NULL,
+ NULL, false, CTC)) {
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
// If we found a declaration, correct when it refers to an Objective-C
// class.
- NamedDecl *ND = Result.getFoundDecl();
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND)) {
Diag(NameLoc, diag::err_unknown_receiver_suggest)
- << Name << Result.getLookupName()
+ << Name << Corrected.getCorrection()
<< FixItHint::CreateReplacement(SourceRange(NameLoc),
ND->getNameAsString());
Diag(ND->getLocation(), diag::note_previous_decl)
- << Corrected;
+ << Corrected.getCorrection();
QualType T = Context.getObjCInterfaceType(Class);
TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
ReceiverType = CreateParsedType(T, TSInfo);
return ObjCClassMessage;
}
- } else if (Result.empty() && Corrected.getAsIdentifierInfo() &&
- Corrected.getAsIdentifierInfo()->isStr("super")) {
+ } else if (Corrected.isKeyword() &&
+ Corrected.getCorrectionAsIdentifierInfo()->isStr("super")) {
// If we've found the keyword "super", this is a send to super.
Diag(NameLoc, diag::err_unknown_receiver_suggest)
- << Name << Corrected
+ << Name << Corrected.getCorrection()
<< FixItHint::CreateReplacement(SourceRange(NameLoc), "super");
return ObjCSuperMessage;
}
@@ -1013,11 +1062,16 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// Find the method we are messaging.
if (!Method) {
if (Class->isForwardDecl()) {
+ if (getLangOptions().ObjCAutoRefCount) {
+ Diag(Loc, diag::err_arc_receiver_forward_class) << ReceiverType;
+ } else {
+ Diag(Loc, diag::warn_receiver_forward_class) << Class->getDeclName();
+ }
+
// A forward class used in messaging is treated as a 'Class'
- Diag(Loc, diag::warn_receiver_forward_class) << Class->getDeclName();
Method = LookupFactoryMethodInGlobalPool(Sel,
SourceRange(LBracLoc, RBracLoc));
- if (Method)
+ if (Method && !getLangOptions().ObjCAutoRefCount)
Diag(Method->getLocation(), diag::note_method_sent_forward_class)
<< Method->getDeclName();
}
@@ -1236,6 +1290,14 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
= ReceiverType->getAsObjCInterfacePointerType()) {
// We allow sending a message to a pointer to an interface (an object).
ClassDecl = OCIType->getInterfaceDecl();
+
+ if (ClassDecl->isForwardDecl() && getLangOptions().ObjCAutoRefCount) {
+ Diag(Loc, diag::err_arc_receiver_forward_instance)
+ << OCIType->getPointeeType()
+ << (Receiver ? Receiver->getSourceRange() : SourceRange(SuperLoc));
+ return ExprError();
+ }
+
// FIXME: consider using LookupInstanceMethodInGlobalPool, since it will be
// faster than the following method (which can do *many* linear searches).
// The idea is to add class info to MethodPool.
@@ -1250,6 +1312,12 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// If we have implementations in scope, check "private" methods.
Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
+ if (!Method && getLangOptions().ObjCAutoRefCount) {
+ Diag(Loc, diag::err_arc_may_not_respond)
+ << OCIType->getPointeeType() << Sel;
+ return ExprError();
+ }
+
if (!Method && (!Receiver || !isSelfExpr(Receiver))) {
// If we still haven't found a method, look in the global pool. This
// behavior isn't very desirable, however we need it for GCC
@@ -1267,10 +1335,12 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
if (Method && DiagnoseUseOfDecl(Method, Loc, forwardClass))
return ExprError();
- } else if (!Context.getObjCIdType().isNull() &&
+ } else if (!getLangOptions().ObjCAutoRefCount &&
+ !Context.getObjCIdType().isNull() &&
(ReceiverType->isPointerType() ||
ReceiverType->isIntegerType())) {
// Implicitly convert integers and pointers to 'id' but emit a warning.
+ // But not in ARC.
Diag(Loc, diag::warn_bad_receiver_type)
<< ReceiverType
<< Receiver->getSourceRange();
@@ -1332,8 +1402,84 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
diag::err_illegal_message_expr_incomplete_type))
return ExprError();
+ // In ARC, forbid the user from sending messages to
+ // retain/release/autorelease/dealloc/retainCount explicitly.
+ if (getLangOptions().ObjCAutoRefCount) {
+ ObjCMethodFamily family =
+ (Method ? Method->getMethodFamily() : Sel.getMethodFamily());
+ switch (family) {
+ case OMF_init:
+ if (Method)
+ checkInitMethod(Method, ReceiverType);
+
+ case OMF_None:
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ break;
+
+ case OMF_dealloc:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ Diag(Loc, diag::err_arc_illegal_explicit_message)
+ << Sel << SelectorLoc;
+ break;
+
+ case OMF_performSelector:
+ if (Method && NumArgs >= 1) {
+ if (ObjCSelectorExpr *SelExp = dyn_cast<ObjCSelectorExpr>(Args[0])) {
+ Selector ArgSel = SelExp->getSelector();
+ ObjCMethodDecl *SelMethod =
+ LookupInstanceMethodInGlobalPool(ArgSel,
+ SelExp->getSourceRange());
+ if (!SelMethod)
+ SelMethod =
+ LookupFactoryMethodInGlobalPool(ArgSel,
+ SelExp->getSourceRange());
+ if (SelMethod) {
+ ObjCMethodFamily SelFamily = SelMethod->getMethodFamily();
+ switch (SelFamily) {
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_init:
+ // Issue error, unless ns_returns_not_retained.
+ if (!SelMethod->hasAttr<NSReturnsNotRetainedAttr>()) {
+ // selector names a +1 method
+ Diag(SelectorLoc,
+ diag::err_arc_perform_selector_retains);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at);
+ }
+ break;
+ default:
+ // +0 call. OK. unless ns_returns_retained.
+ if (SelMethod->hasAttr<NSReturnsRetainedAttr>()) {
+ // selector names a +1 method
+ Diag(SelectorLoc,
+ diag::err_arc_perform_selector_retains);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at);
+ }
+ break;
+ }
+ }
+ } else {
+ // error (may leak).
+ Diag(SelectorLoc, diag::warn_arc_perform_selector_leaks);
+ Diag(Args[0]->getExprLoc(), diag::note_used_here);
+ }
+ }
+ break;
+ }
+ }
+
// Construct the appropriate ObjCMessageExpr instance.
- Expr *Result;
+ ObjCMessageExpr *Result;
if (SuperLoc.isValid())
Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
SuperLoc, /*IsInstanceSuper=*/true,
@@ -1343,6 +1489,27 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
Receiver, Sel, SelectorLoc, Method,
Args, NumArgs, RBracLoc);
+
+ if (getLangOptions().ObjCAutoRefCount) {
+ // In ARC, annotate delegate init calls.
+ if (Result->getMethodFamily() == OMF_init &&
+ (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+ // Only consider init calls *directly* in init implementations,
+ // not within blocks.
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CurContext);
+ if (method && method->getMethodFamily() == OMF_init) {
+ // The implicit assignment to self means we also don't want to
+ // consume the result.
+ Result->setDelegateInitCall(true);
+ return Owned(Result);
+ }
+ }
+
+ // In ARC, check for message sends which are likely to introduce
+ // retain cycles.
+ checkRetainCycles(Result);
+ }
+
return MaybeBindToTemporary(Result);
}
@@ -1364,3 +1531,393 @@ ExprResult Sema::ActOnInstanceMessage(Scope *S,
LBracLoc, SelectorLoc, RBracLoc, move(Args));
}
+enum ARCConversionTypeClass {
+ ACTC_none,
+ ACTC_retainable,
+ ACTC_indirectRetainable
+};
+static ARCConversionTypeClass classifyTypeForARCConversion(QualType type) {
+ ARCConversionTypeClass ACTC = ACTC_retainable;
+
+ // Ignore an outermost reference type.
+ if (const ReferenceType *ref = type->getAs<ReferenceType>())
+ type = ref->getPointeeType();
+
+ // Drill through pointers and arrays recursively.
+ while (true) {
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ type = ptr->getPointeeType();
+ } else if (const ArrayType *array = type->getAsArrayTypeUnsafe()) {
+ type = QualType(array->getElementType()->getBaseElementTypeUnsafe(), 0);
+ } else {
+ break;
+ }
+ ACTC = ACTC_indirectRetainable;
+ }
+
+ if (!type->isObjCRetainableType()) return ACTC_none;
+ return ACTC;
+}
+
+namespace {
+ /// Return true if the given expression can be reasonably converted
+ /// between a retainable pointer type and a C pointer type.
+ struct ARCCastChecker : StmtVisitor<ARCCastChecker, bool> {
+ ASTContext &Context;
+ ARCCastChecker(ASTContext &Context) : Context(Context) {}
+ bool VisitStmt(Stmt *s) {
+ return false;
+ }
+ bool VisitExpr(Expr *e) {
+ return e->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
+ }
+
+ bool VisitParenExpr(ParenExpr *e) {
+ return Visit(e->getSubExpr());
+ }
+ bool VisitCastExpr(CastExpr *e) {
+ switch (e->getCastKind()) {
+ case CK_NullToPointer:
+ return true;
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ case CK_BitCast:
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ return Visit(e->getSubExpr());
+ default:
+ return false;
+ }
+ }
+ bool VisitUnaryExtension(UnaryOperator *e) {
+ return Visit(e->getSubExpr());
+ }
+ bool VisitBinComma(BinaryOperator *e) {
+ return Visit(e->getRHS());
+ }
+ bool VisitConditionalOperator(ConditionalOperator *e) {
+ // Conditional operators are okay if both sides are okay.
+ return Visit(e->getTrueExpr()) && Visit(e->getFalseExpr());
+ }
+ bool VisitObjCStringLiteral(ObjCStringLiteral *e) {
+ // Always white-list Objective-C string literals.
+ return true;
+ }
+ bool VisitStmtExpr(StmtExpr *e) {
+ return Visit(e->getSubStmt()->body_back());
+ }
+ bool VisitDeclRefExpr(DeclRefExpr *e) {
+ // White-list references to global extern strings from system
+ // headers.
+ if (VarDecl *var = dyn_cast<VarDecl>(e->getDecl()))
+ if (var->getStorageClass() == SC_Extern &&
+ var->getType().isConstQualified() &&
+ Context.getSourceManager().isInSystemHeader(var->getLocation()))
+ return true;
+ return false;
+ }
+ };
+}
+
+bool
+Sema::ValidObjCARCNoBridgeCastExpr(Expr *&Exp, QualType castType) {
+ Expr *NewExp = Exp->IgnoreParenCasts();
+
+ if (!isa<ObjCMessageExpr>(NewExp) && !isa<ObjCPropertyRefExpr>(NewExp)
+ && !isa<CallExpr>(NewExp))
+ return false;
+ ObjCMethodDecl *method = 0;
+ bool MethodReturnsPlusOne = false;
+
+ if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(NewExp)) {
+ method = PRE->getExplicitProperty()->getGetterMethodDecl();
+ }
+ else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(NewExp))
+ method = ME->getMethodDecl();
+ else {
+ CallExpr *CE = cast<CallExpr>(NewExp);
+ Decl *CallDecl = CE->getCalleeDecl();
+ if (!CallDecl)
+ return false;
+ if (CallDecl->hasAttr<CFReturnsNotRetainedAttr>())
+ return true;
+ MethodReturnsPlusOne = CallDecl->hasAttr<CFReturnsRetainedAttr>();
+ if (!MethodReturnsPlusOne) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(CallDecl))
+ if (const IdentifierInfo *Id = ND->getIdentifier())
+ if (Id->isStr("__builtin___CFStringMakeConstantString"))
+ return true;
+ }
+ }
+
+ if (!MethodReturnsPlusOne) {
+ if (!method)
+ return false;
+ if (method->hasAttr<CFReturnsNotRetainedAttr>())
+ return true;
+ MethodReturnsPlusOne = method->hasAttr<CFReturnsRetainedAttr>();
+ if (!MethodReturnsPlusOne) {
+ ObjCMethodFamily family = method->getSelector().getMethodFamily();
+ switch (family) {
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ MethodReturnsPlusOne = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (MethodReturnsPlusOne) {
+ TypeSourceInfo *TSInfo =
+ Context.getTrivialTypeSourceInfo(castType, SourceLocation());
+ ExprResult ExpRes = BuildObjCBridgedCast(SourceLocation(), OBC_BridgeTransfer,
+ SourceLocation(), TSInfo, Exp);
+ Exp = ExpRes.take();
+ }
+ return true;
+}
+
+void
+Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
+ Expr *&castExpr, CheckedConversionKind CCK) {
+ QualType castExprType = castExpr->getType();
+
+ ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExprType);
+ ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType);
+ if (exprACTC == castACTC) return;
+ if (exprACTC && castType->isIntegralType(Context)) return;
+
+ // Allow casts between pointers to lifetime types (e.g., __strong id*)
+ // and pointers to void (e.g., cv void *). Casting from void* to lifetime*
+ // must be explicit.
+ if (const PointerType *CastPtr = castType->getAs<PointerType>()) {
+ if (const PointerType *CastExprPtr = castExprType->getAs<PointerType>()) {
+ QualType CastPointee = CastPtr->getPointeeType();
+ QualType CastExprPointee = CastExprPtr->getPointeeType();
+ if ((CCK != CCK_ImplicitConversion &&
+ CastPointee->isObjCIndirectLifetimeType() &&
+ CastExprPointee->isVoidType()) ||
+ (CastPointee->isVoidType() &&
+ CastExprPointee->isObjCIndirectLifetimeType()))
+ return;
+ }
+ }
+
+ if (ARCCastChecker(Context).Visit(castExpr))
+ return;
+
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
+
+ if (makeUnavailableInSystemHeader(loc,
+ "converts between Objective-C and C pointers in -fobjc-arc"))
+ return;
+
+ unsigned srcKind = 0;
+ switch (exprACTC) {
+ case ACTC_none:
+ srcKind = (castExprType->isPointerType() ? 1 : 0);
+ break;
+ case ACTC_retainable:
+ srcKind = (castExprType->isBlockPointerType() ? 2 : 3);
+ break;
+ case ACTC_indirectRetainable:
+ srcKind = 4;
+ break;
+ }
+
+ if (CCK == CCK_CStyleCast) {
+ // Check whether this could be fixed with a bridge cast.
+ SourceLocation AfterLParen = PP.getLocForEndOfToken(castRange.getBegin());
+ SourceLocation NoteLoc = AfterLParen.isValid()? AfterLParen : loc;
+
+ if (castType->isObjCARCBridgableType() &&
+ castExprType->isCARCBridgableType()) {
+ // explicit unbridged casts are allowed if the source of the cast is a
+ // message sent to an objc method (or property access)
+ if (ValidObjCARCNoBridgeCastExpr(castExpr, castType))
+ return;
+ Diag(loc, diag::err_arc_cast_requires_bridge)
+ << 2
+ << castExprType
+ << (castType->isBlockPointerType()? 1 : 0)
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+ Diag(NoteLoc, diag::note_arc_bridge)
+ << FixItHint::CreateInsertion(AfterLParen, "__bridge ");
+ Diag(NoteLoc, diag::note_arc_bridge_transfer)
+ << castExprType
+ << FixItHint::CreateInsertion(AfterLParen, "__bridge_transfer ");
+
+ return;
+ }
+
+ if (castType->isCARCBridgableType() &&
+ castExprType->isObjCARCBridgableType()){
+ Diag(loc, diag::err_arc_cast_requires_bridge)
+ << (castExprType->isBlockPointerType()? 1 : 0)
+ << castExprType
+ << 2
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+
+ Diag(NoteLoc, diag::note_arc_bridge)
+ << FixItHint::CreateInsertion(AfterLParen, "__bridge ");
+ Diag(NoteLoc, diag::note_arc_bridge_retained)
+ << castType
+ << FixItHint::CreateInsertion(AfterLParen, "__bridge_retained ");
+ return;
+ }
+ }
+
+ Diag(loc, diag::err_arc_mismatched_cast)
+ << (CCK != CCK_ImplicitConversion) << srcKind << castExprType << castType
+ << castRange << castExpr->getSourceRange();
+}
+
+bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType,
+ QualType exprType) {
+ QualType canCastType =
+ Context.getCanonicalType(castType).getUnqualifiedType();
+ QualType canExprType =
+ Context.getCanonicalType(exprType).getUnqualifiedType();
+ if (isa<ObjCObjectPointerType>(canCastType) &&
+ castType.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ canExprType->isObjCObjectPointerType()) {
+ if (const ObjCObjectPointerType *ObjT =
+ canExprType->getAs<ObjCObjectPointerType>())
+ if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable())
+ return false;
+ }
+ return true;
+}
+
+/// Look for an ObjCReclaimReturnedObject cast and destroy it.
+static Expr *maybeUndoReclaimObject(Expr *e) {
+ // For now, we just undo operands that are *immediately* reclaim
+ // expressions, which prevents the vast majority of potential
+ // problems here. To catch them all, we'd need to rebuild arbitrary
+ // value-propagating subexpressions --- we can't reliably rebuild
+ // in-place because of expression sharing.
+ if (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ if (ice->getCastKind() == CK_ObjCReclaimReturnedObject)
+ return ice->getSubExpr();
+
+ return e;
+}
+
+ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *SubExpr) {
+ QualType T = TSInfo->getType();
+ QualType FromType = SubExpr->getType();
+
+ bool MustConsume = false;
+ if (T->isDependentType() || SubExpr->isTypeDependent()) {
+ // Okay: we'll build a dependent expression type.
+ } else if (T->isObjCARCBridgableType() && FromType->isCARCBridgableType()) {
+ // Casting CF -> id
+ switch (Kind) {
+ case OBC_Bridge:
+ break;
+
+ case OBC_BridgeRetained:
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
+ << 2
+ << FromType
+ << (T->isBlockPointerType()? 1 : 0)
+ << T
+ << SubExpr->getSourceRange()
+ << Kind;
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge");
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge_transfer)
+ << FromType
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ "__bridge_transfer ");
+
+ Kind = OBC_Bridge;
+ break;
+
+ case OBC_BridgeTransfer:
+ // We must consume the Objective-C object produced by the cast.
+ MustConsume = true;
+ break;
+ }
+ } else if (T->isCARCBridgableType() && FromType->isObjCARCBridgableType()) {
+ // Okay: id -> CF
+ switch (Kind) {
+ case OBC_Bridge:
+ // Reclaiming a value that's going to be __bridge-casted to CF
+ // is very dangerous, so we don't do it.
+ SubExpr = maybeUndoReclaimObject(SubExpr);
+ break;
+
+ case OBC_BridgeRetained:
+ // Produce the object before casting it.
+ SubExpr = ImplicitCastExpr::Create(Context, FromType,
+ CK_ObjCProduceObject,
+ SubExpr, 0, VK_RValue);
+ break;
+
+ case OBC_BridgeTransfer:
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
+ << (FromType->isBlockPointerType()? 1 : 0)
+ << FromType
+ << 2
+ << T
+ << SubExpr->getSourceRange()
+ << Kind;
+
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge ");
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge_retained)
+ << T
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge_retained ");
+
+ Kind = OBC_Bridge;
+ break;
+ }
+ } else {
+ Diag(LParenLoc, diag::err_arc_bridge_cast_incompatible)
+ << FromType << T << Kind
+ << SubExpr->getSourceRange()
+ << TSInfo->getTypeLoc().getSourceRange();
+ return ExprError();
+ }
+
+ Expr *Result = new (Context) ObjCBridgedCastExpr(LParenLoc, Kind,
+ BridgeKeywordLoc,
+ TSInfo, SubExpr);
+
+ if (MustConsume) {
+ ExprNeedsCleanups = true;
+ Result = ImplicitCastExpr::Create(Context, T, CK_ObjCConsumeObject, Result,
+ 0, VK_RValue);
+ }
+
+ return Result;
+}
+
+ExprResult Sema::ActOnObjCBridgedCast(Scope *S,
+ SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ ParsedType Type,
+ SourceLocation RParenLoc,
+ Expr *SubExpr) {
+ TypeSourceInfo *TSInfo = 0;
+ QualType T = GetTypeFromParser(Type, &TSInfo);
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T, LParenLoc);
+ return BuildObjCBridgedCast(LParenLoc, Kind, BridgeKeywordLoc, TSInfo,
+ SubExpr);
+}
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index a33f5d0b2f3e..9fbcbab0b7d0 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -769,7 +769,8 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// subaggregate, brace elision is assumed and the initializer is
// considered for the initialization of the first member of
// the subaggregate.
- if (ElemType->isAggregateType() || ElemType->isVectorType()) {
+ if (!SemaRef.getLangOptions().OpenCL &&
+ (ElemType->isAggregateType() || ElemType->isVectorType())) {
CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList,
StructuredIndex);
++StructuredIndex;
@@ -1184,6 +1185,15 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
continue;
}
+ // Make sure we can use this declaration.
+ if (SemaRef.DiagnoseUseOfDecl(*Field,
+ IList->getInit(Index)->getLocStart())) {
+ ++Index;
+ ++Field;
+ hadError = true;
+ continue;
+ }
+
InitializedEntity MemberEntity =
InitializedEntity::InitializeMember(*Field, &Entity);
CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
@@ -1443,19 +1453,23 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// was a typo for another field name.
LookupResult R(SemaRef, FieldName, D->getFieldLoc(),
Sema::LookupMemberName);
- if (SemaRef.CorrectTypo(R, /*Scope=*/0, /*SS=*/0, RT->getDecl(), false,
- Sema::CTC_NoKeywords) &&
- (ReplacementField = R.getAsSingle<FieldDecl>()) &&
+ TypoCorrection Corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(FieldName, D->getFieldLoc()),
+ Sema::LookupMemberName, /*Scope=*/NULL, /*SS=*/NULL,
+ RT->getDecl(), false, Sema::CTC_NoKeywords);
+ if ((ReplacementField = Corrected.getCorrectionDeclAs<FieldDecl>()) &&
ReplacementField->getDeclContext()->getRedeclContext()
->Equals(RT->getDecl())) {
+ std::string CorrectedStr(
+ Corrected.getAsString(SemaRef.getLangOptions()));
+ std::string CorrectedQuotedStr(
+ Corrected.getQuoted(SemaRef.getLangOptions()));
SemaRef.Diag(D->getFieldLoc(),
diag::err_field_designator_unknown_suggest)
- << FieldName << CurrentObjectType << R.getLookupName()
- << FixItHint::CreateReplacement(D->getFieldLoc(),
- R.getLookupName().getAsString());
+ << FieldName << CurrentObjectType << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(D->getFieldLoc(), CorrectedStr);
SemaRef.Diag(ReplacementField->getLocation(),
- diag::note_previous_decl)
- << ReplacementField->getDeclName();
+ diag::note_previous_decl) << CorrectedQuotedStr;
} else {
SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
<< FieldName << CurrentObjectType;
@@ -1499,6 +1513,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
StructuredList->setInitializedFieldInUnion(*Field);
}
+ // Make sure we can use this declaration.
+ if (SemaRef.DiagnoseUseOfDecl(*Field, D->getFieldLoc())) {
+ ++Index;
+ return true;
+ }
+
// Update the designator with the field declaration.
D->setField(*Field);
@@ -2029,10 +2049,10 @@ InitializedEntity InitializedEntity::InitializeBase(ASTContext &Context,
DeclarationName InitializedEntity::getName() const {
switch (getKind()) {
- case EK_Parameter:
- if (!VariableOrMember)
- return DeclarationName();
- // Fall through
+ case EK_Parameter: {
+ ParmVarDecl *D = reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+ return (D ? D->getDeclName() : DeclarationName());
+ }
case EK_Variable:
case EK_Member:
@@ -2057,10 +2077,12 @@ DeclarationName InitializedEntity::getName() const {
DeclaratorDecl *InitializedEntity::getDecl() const {
switch (getKind()) {
case EK_Variable:
- case EK_Parameter:
case EK_Member:
return VariableOrMember;
+ case EK_Parameter:
+ return reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+
case EK_Result:
case EK_Exception:
case EK_New:
@@ -2123,6 +2145,9 @@ void InitializationSequence::Step::Destroy() {
case SK_StringInit:
case SK_ObjCObjectConversion:
case SK_ArrayInit:
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ case SK_ProduceObjCObject:
break;
case SK_ConversionSequence:
@@ -2131,7 +2156,7 @@ void InitializationSequence::Step::Destroy() {
}
bool InitializationSequence::isDirectReferenceBinding() const {
- return getKind() == ReferenceBinding && Steps.back().Kind == SK_BindReference;
+ return !Steps.empty() && Steps.back().Kind == SK_BindReference;
}
bool InitializationSequence::isAmbiguous() const {
@@ -2306,6 +2331,22 @@ void InitializationSequence::AddArrayInitStep(QualType T) {
Steps.push_back(S);
}
+void InitializationSequence::AddPassByIndirectCopyRestoreStep(QualType type,
+ bool shouldCopy) {
+ Step s;
+ s.Kind = (shouldCopy ? SK_PassByIndirectCopyRestore
+ : SK_PassByIndirectRestore);
+ s.Type = type;
+ Steps.push_back(s);
+}
+
+void InitializationSequence::AddProduceObjCObjectStep(QualType T) {
+ Step S;
+ S.Kind = SK_ProduceObjCObject;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
void InitializationSequence::SetOverloadFailure(FailureKind Failure,
OverloadingResult Result) {
setSequenceKind(FailedSequence);
@@ -2317,6 +2358,33 @@ void InitializationSequence::SetOverloadFailure(FailureKind Failure,
// Attempt initialization
//===----------------------------------------------------------------------===//
+static void MaybeProduceObjCObject(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity) {
+ if (!S.getLangOptions().ObjCAutoRefCount) return;
+
+ /// When initializing a parameter, produce the value if it's marked
+ /// __attribute__((ns_consumed)).
+ if (Entity.getKind() == InitializedEntity::EK_Parameter) {
+ if (!Entity.isParameterConsumed())
+ return;
+
+ assert(Entity.getType()->isObjCRetainableType() &&
+ "consuming an object of unretainable type?");
+ Sequence.AddProduceObjCObjectStep(Entity.getType());
+
+ /// When initializing a return value, if the return type is a
+ /// retainable type, then returns need to immediately retain the
+ /// object. If an autorelease is required, it will be done at the
+ /// last instant.
+ } else if (Entity.getKind() == InitializedEntity::EK_Result) {
+ if (!Entity.getType()->isObjCRetainableType())
+ return;
+
+ Sequence.AddProduceObjCObjectStep(Entity.getType());
+ }
+}
+
/// \brief Attempt list initialization (C++0x [dcl.init.list])
static void TryListInitialization(Sema &S,
const InitializedEntity &Entity,
@@ -2380,13 +2448,16 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
bool DerivedToBase;
bool ObjCConversion;
+ bool ObjCLifetimeConversion;
assert(!S.CompareReferenceRelationship(Initializer->getLocStart(),
T1, T2, DerivedToBase,
- ObjCConversion) &&
+ ObjCConversion,
+ ObjCLifetimeConversion) &&
"Must have incompatible references when binding via conversion");
(void)DerivedToBase;
(void)ObjCConversion;
-
+ (void)ObjCLifetimeConversion;
+
// Build the candidate set directly in the initialization sequence
// structure, so that it will persist if we fail.
OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
@@ -2513,10 +2584,12 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
bool NewDerivedToBase = false;
bool NewObjCConversion = false;
+ bool NewObjCLifetimeConversion = false;
Sema::ReferenceCompareResult NewRefRelationship
= S.CompareReferenceRelationship(DeclLoc, T1,
T2.getNonLValueExprType(S.Context),
- NewDerivedToBase, NewObjCConversion);
+ NewDerivedToBase, NewObjCConversion,
+ NewObjCLifetimeConversion);
if (NewRefRelationship == Sema::Ref_Incompatible) {
// If the type we've converted to is not reference-related to the
// type we're looking for, then there is another conversion step
@@ -2550,8 +2623,6 @@ static void TryReferenceInitialization(Sema &S,
const InitializationKind &Kind,
Expr *Initializer,
InitializationSequence &Sequence) {
- Sequence.setSequenceKind(InitializationSequence::ReferenceBinding);
-
QualType DestType = Entity.getType();
QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
Qualifiers T1Quals;
@@ -2584,10 +2655,11 @@ static void TryReferenceInitialization(Sema &S,
bool isRValueRef = !isLValueRef;
bool DerivedToBase = false;
bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
Expr::Classification InitCategory = Initializer->Classify(S.Context);
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, DerivedToBase,
- ObjCConversion);
+ ObjCConversion, ObjCLifetimeConversion);
// C++0x [dcl.init.ref]p5:
// A reference to type "cv1 T1" is initialized by an expression of type
@@ -2746,11 +2818,15 @@ static void TryReferenceInitialization(Sema &S,
InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
- if (S.TryImplicitConversion(Sequence, TempEntity, Initializer,
+ ImplicitConversionSequence ICS
+ = S.TryImplicitConversion(Initializer, TempEntity.getType(),
/*SuppressUserConversions*/ false,
AllowExplicit,
/*FIXME:InOverloadResolution=*/false,
- /*CStyle=*/Kind.isCStyleOrFunctionalCast())) {
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast(),
+ /*AllowObjCWritebackConversion=*/false);
+
+ if (ICS.isBad()) {
// FIXME: Use the conversion function set stored in ICS to turn
// this into an overloading ambiguity diagnostic. However, we need
// to keep that set as an OverloadCandidateSet rather than as some
@@ -2764,6 +2840,8 @@ static void TryReferenceInitialization(Sema &S,
else
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitFailed);
return;
+ } else {
+ Sequence.AddConversionSequenceStep(ICS, TempEntity.getType());
}
// [...] If T1 is reference-related to T2, cv1 must be the
@@ -2953,10 +3031,8 @@ static void TryDefaultInitialization(Sema &S,
// C++ [dcl.init]p6:
// To default-initialize an object of type T means:
// - if T is an array type, each element is default-initialized;
- QualType DestType = Entity.getType();
- while (const ArrayType *Array = S.Context.getAsArrayType(DestType))
- DestType = Array->getElementType();
-
+ QualType DestType = S.Context.getBaseElementType(Entity.getType());
+
// - if T is a (possibly cv-qualified) class type (Clause 9), the default
// constructor for T is called (and the initialization is ill-formed if
// T has no accessible default constructor);
@@ -2970,8 +3046,16 @@ static void TryDefaultInitialization(Sema &S,
// If a program calls for the default initialization of an object of
// a const-qualified type T, T shall be a class type with a user-provided
// default constructor.
- if (DestType.isConstQualified() && S.getLangOptions().CPlusPlus)
+ if (DestType.isConstQualified() && S.getLangOptions().CPlusPlus) {
Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+
+ // If the destination type has a lifetime property, zero-initialize it.
+ if (DestType.getQualifiers().hasObjCLifetime()) {
+ Sequence.AddZeroInitializationStep(Entity.getType());
+ return;
+ }
}
/// \brief Attempt a user-defined conversion between two types (C++ [dcl.init]),
@@ -3125,6 +3209,86 @@ static void TryUserDefinedConversion(Sema &S,
}
}
+/// The non-zero enum values here are indexes into diagnostic alternatives.
+enum InvalidICRKind { IIK_okay, IIK_nonlocal, IIK_nonscalar };
+
+/// Determines whether this expression is an acceptable ICR source.
+static InvalidICRKind isInvalidICRSource(ASTContext &C, Expr *e,
+ bool isAddressOf) {
+ // Skip parens.
+ e = e->IgnoreParens();
+
+ // Skip address-of nodes.
+ if (UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_AddrOf)
+ return isInvalidICRSource(C, op->getSubExpr(), /*addressof*/ true);
+
+ // Skip certain casts.
+ } else if (CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_NoOp:
+ return isInvalidICRSource(C, ce->getSubExpr(), isAddressOf);
+
+ case CK_ArrayToPointerDecay:
+ return IIK_nonscalar;
+
+ case CK_NullToPointer:
+ return IIK_okay;
+
+ default:
+ break;
+ }
+
+ // If we have a declaration reference, it had better be a local variable.
+ } else if (isa<DeclRefExpr>(e) || isa<BlockDeclRefExpr>(e)) {
+ if (!isAddressOf) return IIK_nonlocal;
+
+ VarDecl *var;
+ if (isa<DeclRefExpr>(e)) {
+ var = dyn_cast<VarDecl>(cast<DeclRefExpr>(e)->getDecl());
+ if (!var) return IIK_nonlocal;
+ } else {
+ var = cast<BlockDeclRefExpr>(e)->getDecl();
+ }
+
+ return (var->hasLocalStorage() ? IIK_okay : IIK_nonlocal);
+
+ // If we have a conditional operator, check both sides.
+ } else if (ConditionalOperator *cond = dyn_cast<ConditionalOperator>(e)) {
+ if (InvalidICRKind iik = isInvalidICRSource(C, cond->getLHS(), isAddressOf))
+ return iik;
+
+ return isInvalidICRSource(C, cond->getRHS(), isAddressOf);
+
+ // These are never scalar.
+ } else if (isa<ArraySubscriptExpr>(e)) {
+ return IIK_nonscalar;
+
+ // Otherwise, it needs to be a null pointer constant.
+ } else {
+ return (e->isNullPointerConstant(C, Expr::NPC_ValueDependentIsNull)
+ ? IIK_okay : IIK_nonlocal);
+ }
+
+ return IIK_nonlocal;
+}
+
+/// Check whether the given expression is a valid operand for an
+/// indirect copy/restore.
+static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) {
+ assert(src->isRValue());
+
+ InvalidICRKind iik = isInvalidICRSource(S.Context, src, false);
+ if (iik == IIK_okay) return;
+
+ S.Diag(src->getExprLoc(), diag::err_arc_nonlocal_writeback)
+ << ((unsigned) iik - 1) // shift index into diagnostic explanations
+ << src->getSourceRange();
+}
+
/// \brief Determine whether we have compatible array types for the
/// purposes of GNU by-copy array initialization.
static bool hasCompatibleArrayTypes(ASTContext &Context,
@@ -3144,6 +3308,53 @@ static bool hasCompatibleArrayTypes(ASTContext &Context,
return Source->isConstantArrayType() && Dest->isIncompleteArrayType();
}
+static bool tryObjCWritebackConversion(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity,
+ Expr *Initializer) {
+ bool ArrayDecay = false;
+ QualType ArgType = Initializer->getType();
+ QualType ArgPointee;
+ if (const ArrayType *ArgArrayType = S.Context.getAsArrayType(ArgType)) {
+ ArrayDecay = true;
+ ArgPointee = ArgArrayType->getElementType();
+ ArgType = S.Context.getPointerType(ArgPointee);
+ }
+
+ // Handle write-back conversion.
+ QualType ConvertedArgType;
+ if (!S.isObjCWritebackConversion(ArgType, Entity.getType(),
+ ConvertedArgType))
+ return false;
+
+ // We should copy unless we're passing to an argument explicitly
+ // marked 'out'.
+ bool ShouldCopy = true;
+ if (ParmVarDecl *param = cast_or_null<ParmVarDecl>(Entity.getDecl()))
+ ShouldCopy = (param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out);
+
+ // Do we need an lvalue conversion?
+ if (ArrayDecay || Initializer->isGLValue()) {
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+
+ QualType ResultType;
+ if (ArrayDecay) {
+ ICS.Standard.First = ICK_Array_To_Pointer;
+ ResultType = S.Context.getPointerType(ArgPointee);
+ } else {
+ ICS.Standard.First = ICK_Lvalue_To_Rvalue;
+ ResultType = Initializer->getType().getNonLValueExprType(S.Context);
+ }
+
+ Sequence.AddConversionSequenceStep(ICS, ResultType);
+ }
+
+ Sequence.AddPassByIndirectCopyRestoreStep(Entity.getType(), ShouldCopy);
+ return true;
+}
+
InitializationSequence::InitializationSequence(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -3255,12 +3466,28 @@ InitializationSequence::InitializationSequence(Sema &S,
return;
}
- // Handle initialization in C
+ // Determine whether we should consider writeback conversions for
+ // Objective-C ARC.
+ bool allowObjCWritebackConversion = S.getLangOptions().ObjCAutoRefCount &&
+ Entity.getKind() == InitializedEntity::EK_Parameter;
+
+ // We're at the end of the line for C: it's either a write-back conversion
+ // or it's a C assignment. There's no need to check anything else.
if (!S.getLangOptions().CPlusPlus) {
+ // If allowed, check whether this is an Objective-C writeback conversion.
+ if (allowObjCWritebackConversion &&
+ tryObjCWritebackConversion(S, *this, Entity, Initializer)) {
+ return;
+ }
+
+ // Handle initialization in C
AddCAssignmentStep(DestType);
+ MaybeProduceObjCObject(S, *this, Entity);
return;
}
+ assert(S.getLangOptions().CPlusPlus);
+
// - If the destination type is a (possibly cv-qualified) class type:
if (DestType->isRecordType()) {
// - If the initialization is direct-initialization, or if it is
@@ -3294,6 +3521,7 @@ InitializationSequence::InitializationSequence(Sema &S,
// type, conversion functions are considered.
if (!SourceType.isNull() && SourceType->isRecordType()) {
TryUserDefinedConversion(S, Entity, Kind, Initializer, *this);
+ MaybeProduceObjCObject(S, *this, Entity);
return;
}
@@ -3302,12 +3530,38 @@ InitializationSequence::InitializationSequence(Sema &S,
// conversions (Clause 4) will be used, if necessary, to convert the
// initializer expression to the cv-unqualified version of the
// destination type; no user-defined conversions are considered.
- if (S.TryImplicitConversion(*this, Entity, Initializer,
- /*SuppressUserConversions*/ true,
+
+ ImplicitConversionSequence ICS
+ = S.TryImplicitConversion(Initializer, Entity.getType(),
+ /*SuppressUserConversions*/true,
/*AllowExplicitConversions*/ false,
/*InOverloadResolution*/ false,
- /*CStyle=*/Kind.isCStyleOrFunctionalCast()))
- {
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast(),
+ allowObjCWritebackConversion);
+
+ if (ICS.isStandard() &&
+ ICS.Standard.Second == ICK_Writeback_Conversion) {
+ // Objective-C ARC writeback conversion.
+
+ // We should copy unless we're passing to an argument explicitly
+ // marked 'out'.
+ bool ShouldCopy = true;
+ if (ParmVarDecl *Param = cast_or_null<ParmVarDecl>(Entity.getDecl()))
+ ShouldCopy = (Param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out);
+
+ // If there was an lvalue adjustment, add it as a separate conversion.
+ if (ICS.Standard.First == ICK_Array_To_Pointer ||
+ ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
+ ImplicitConversionSequence LvalueICS;
+ LvalueICS.setStandard();
+ LvalueICS.Standard.setAsIdentityConversion();
+ LvalueICS.Standard.setAllToTypes(ICS.Standard.getToType(0));
+ LvalueICS.Standard.First = ICS.Standard.First;
+ AddConversionSequenceStep(LvalueICS, ICS.Standard.getToType(0));
+ }
+
+ AddPassByIndirectCopyRestoreStep(Entity.getType(), ShouldCopy);
+ } else if (ICS.isBad()) {
DeclAccessPair dap;
if (Initializer->getType() == Context.OverloadTy &&
!S.ResolveAddressOfOverloadedFunction(Initializer
@@ -3315,6 +3569,10 @@ InitializationSequence::InitializationSequence(Sema &S,
SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
else
SetFailed(InitializationSequence::FK_ConversionFailed);
+ } else {
+ AddConversionSequenceStep(ICS, Entity.getType());
+
+ MaybeProduceObjCObject(S, *this, Entity);
}
}
@@ -3560,7 +3818,7 @@ static ExprResult CopyObject(Sema &S,
<< (int)Entity.getKind() << CurInitExpr->getType()
<< CurInitExpr->getSourceRange();
S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
- << Best->Function->isDeleted();
+ << 1 << Best->Function->isDeleted();
return ExprError();
}
@@ -3633,6 +3891,11 @@ void InitializationSequence::PrintInitLocationNote(Sema &S,
}
}
+static bool isReferenceBinding(const InitializationSequence::Step &s) {
+ return s.Kind == InitializationSequence::SK_BindReference ||
+ s.Kind == InitializationSequence::SK_BindReferenceToTemporary;
+}
+
ExprResult
InitializationSequence::Perform(Sema &S,
const InitializedEntity &Entity,
@@ -3684,19 +3947,9 @@ InitializationSequence::Perform(Sema &S,
}
}
-
- if (Kind.getKind() == InitializationKind::IK_Copy || Kind.isExplicitCast())
- return ExprResult(Args.release()[0]);
-
- if (Args.size() == 0)
- return S.Owned((Expr *)0);
-
- unsigned NumArgs = Args.size();
- return S.Owned(new (S.Context) ParenListExpr(S.Context,
- SourceLocation(),
- (Expr **)Args.release(),
- NumArgs,
- SourceLocation()));
+ assert(Kind.getKind() == InitializationKind::IK_Copy ||
+ Kind.isExplicitCast());
+ return ExprResult(Args.release()[0]);
}
// No steps means no initialization.
@@ -3733,7 +3986,10 @@ InitializationSequence::Perform(Sema &S,
case SK_CAssignment:
case SK_StringInit:
case SK_ObjCObjectConversion:
- case SK_ArrayInit: {
+ case SK_ArrayInit:
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ case SK_ProduceObjCObject: {
assert(Args.size() == 1);
CurInit = Args.get()[0];
if (!CurInit.get()) return ExprError();
@@ -3841,12 +4097,22 @@ InitializationSequence::Perform(Sema &S,
break;
case SK_BindReferenceToTemporary:
- // Reference binding does not have any corresponding ASTs.
-
// Check exception specifications
if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
return ExprError();
+ // Materialize the temporary into memory.
+ CurInit = new (S.Context) MaterializeTemporaryExpr(
+ Entity.getType().getNonReferenceType(),
+ CurInit.get(),
+ Entity.getType()->isLValueReferenceType());
+
+ // If we're binding to an Objective-C object that has lifetime, we
+ // need cleanups.
+ if (S.getLangOptions().ObjCAutoRefCount &&
+ CurInit.get()->getType()->isObjCLifetimeType())
+ S.ExprNeedsCleanups = true;
+
break;
case SK_ExtraneousCopyToTemporary:
@@ -3925,8 +4191,7 @@ InitializationSequence::Perform(Sema &S,
CreatedObject = Conversion->getResultType()->isRecordType();
}
- bool RequiresCopy = !IsCopy &&
- getKind() != InitializationSequence::ReferenceBinding;
+ bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back());
if (RequiresCopy || shouldBindAsTemporary(Entity))
CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
else if (CreatedObject && shouldDestroyTemporary(Entity)) {
@@ -3969,10 +4234,14 @@ InitializationSequence::Perform(Sema &S,
}
case SK_ConversionSequence: {
+ Sema::CheckedConversionKind CCK
+ = Kind.isCStyleCast()? Sema::CCK_CStyleCast
+ : Kind.isFunctionalCast()? Sema::CCK_FunctionalCast
+ : Kind.isExplicitCast()? Sema::CCK_OtherCast
+ : Sema::CCK_ImplicitConversion;
ExprResult CurInitExprRes =
S.PerformImplicitConversion(CurInit.get(), Step->Type, *Step->ICS,
- getAssignmentAction(Entity),
- Kind.isCStyleOrFunctionalCast());
+ getAssignmentAction(Entity), CCK);
if (CurInitExprRes.isInvalid())
return ExprError();
CurInit = move(CurInitExprRes);
@@ -4182,7 +4451,20 @@ InitializationSequence::Perform(Sema &S,
}
}
}
+ break;
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ checkIndirectCopyRestoreSource(S, CurInit.get());
+ CurInit = S.Owned(new (S.Context)
+ ObjCIndirectCopyRestoreExpr(CurInit.take(), Step->Type,
+ Step->Kind == SK_PassByIndirectCopyRestore));
+ break;
+
+ case SK_ProduceObjCObject:
+ CurInit = S.Owned(ImplicitCastExpr::Create(S.Context, Step->Type,
+ CK_ObjCProduceObject,
+ CurInit.take(), 0, VK_RValue));
break;
}
}
@@ -4278,7 +4560,7 @@ bool InitializationSequence::Diagnose(Sema &S,
true);
if (Ovl == OR_Deleted) {
S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
- << Best->Function->isDeleted();
+ << 1 << Best->Function->isDeleted();
} else {
llvm_unreachable("Inconsistent overload resolution?");
}
@@ -4443,7 +4725,7 @@ bool InitializationSequence::Diagnose(Sema &S,
= FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
if (Ovl == OR_Deleted) {
S.Diag(Best->Function->getLocation(), diag::note_unavailable_here)
- << Best->Function->isDeleted();
+ << 1 << Best->Function->isDeleted();
} else {
llvm_unreachable("Inconsistent overload resolution?");
}
@@ -4587,10 +4869,6 @@ void InitializationSequence::dump(llvm::raw_ostream &OS) const {
case NormalSequence:
OS << "Normal sequence: ";
break;
-
- case ReferenceBinding:
- OS << "Reference binding: ";
- break;
}
for (step_iterator S = step_begin(), SEnd = step_end(); S != SEnd; ++S) {
@@ -4674,6 +4952,18 @@ void InitializationSequence::dump(llvm::raw_ostream &OS) const {
case SK_ArrayInit:
OS << "array initialization";
break;
+
+ case SK_PassByIndirectCopyRestore:
+ OS << "pass by indirect copy and restore";
+ break;
+
+ case SK_PassByIndirectRestore:
+ OS << "pass by indirect restore";
+ break;
+
+ case SK_ProduceObjCObject:
+ OS << "Objective-C object retension";
+ break;
}
}
}
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index 92ade1efcf01..0e448e31207d 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -20,6 +20,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/TypoCorrection.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
@@ -42,6 +43,7 @@
#include <iterator>
#include <utility>
#include <algorithm>
+#include <map>
using namespace clang;
using namespace sema;
@@ -207,6 +209,7 @@ static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
bool Redeclaration) {
unsigned IDNS = 0;
switch (NameKind) {
+ case Sema::LookupObjCImplicitSelfParam:
case Sema::LookupOrdinaryName:
case Sema::LookupRedeclarationWithLinkage:
IDNS = Decl::IDNS_Ordinary;
@@ -1095,7 +1098,10 @@ bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
if (LeftStartingScope && !((*I)->hasLinkage()))
continue;
}
-
+ else if (NameKind == LookupObjCImplicitSelfParam &&
+ !isa<ImplicitParamDecl>(*I))
+ continue;
+
R.addDecl(*I);
if ((*I)->getAttr<OverloadableAttr>()) {
@@ -1379,6 +1385,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// Look for this member in our base classes
CXXRecordDecl::BaseMatchesCallback *BaseCallback = 0;
switch (R.getLookupKind()) {
+ case LookupObjCImplicitSelfParam:
case LookupOrdinaryName:
case LookupMemberName:
case LookupRedeclarationWithLinkage:
@@ -2137,15 +2144,15 @@ void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
}
}
-Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
+Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis) {
- D = D->getDefinition();
- assert((D && !D->isBeingDefined()) &&
+ RD = RD->getDefinition();
+ assert((RD && !RD->isBeingDefined()) &&
"doing special member lookup into record that isn't fully complete");
if (RValueThis || ConstThis || VolatileThis)
assert((SM == CXXCopyAssignment || SM == CXXMoveAssignment) &&
@@ -2155,7 +2162,7 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
"parameter-less special members can't have qualified arguments");
llvm::FoldingSetNodeID ID;
- ID.AddPointer(D);
+ ID.AddPointer(RD);
ID.AddInteger(SM);
ID.AddInteger(ConstArg);
ID.AddInteger(VolatileArg);
@@ -2176,9 +2183,9 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
SpecialMemberCache.InsertNode(Result, InsertPoint);
if (SM == CXXDestructor) {
- if (!D->hasDeclaredDestructor())
- DeclareImplicitDestructor(D);
- CXXDestructorDecl *DD = D->getDestructor();
+ if (!RD->hasDeclaredDestructor())
+ DeclareImplicitDestructor(RD);
+ CXXDestructorDecl *DD = RD->getDestructor();
assert(DD && "record without a destructor");
Result->setMethod(DD);
Result->setSuccess(DD->isDeleted());
@@ -2188,7 +2195,7 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
// Prepare for overload resolution. Here we construct a synthetic argument
// if necessary and make sure that implicit functions are declared.
- CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(D));
+ CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(RD));
DeclarationName Name;
Expr *Arg = 0;
unsigned NumArgs;
@@ -2196,18 +2203,18 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
if (SM == CXXDefaultConstructor) {
Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
NumArgs = 0;
- if (D->needsImplicitDefaultConstructor())
- DeclareImplicitDefaultConstructor(D);
+ if (RD->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(RD);
} else {
if (SM == CXXCopyConstructor || SM == CXXMoveConstructor) {
Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
- if (!D->hasDeclaredCopyConstructor())
- DeclareImplicitCopyConstructor(D);
+ if (!RD->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(RD);
// TODO: Move constructors
} else {
Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
- if (!D->hasDeclaredCopyAssignment())
- DeclareImplicitCopyAssignment(D);
+ if (!RD->hasDeclaredCopyAssignment())
+ DeclareImplicitCopyAssignment(RD);
// TODO: Move assignment
}
@@ -2225,7 +2232,7 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
// there is no semantic difference for class types in this restricted
// case.
ExprValueKind VK;
- if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ if (SM == CXXCopyConstructor || SM == CXXCopyAssignment)
VK = VK_LValue;
else
VK = VK_RValue;
@@ -2240,7 +2247,7 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
ThisTy.addConst();
if (VolatileThis)
ThisTy.addVolatile();
- Expr::Classification ObjectClassification =
+ Expr::Classification Classification =
(new (Context) OpaqueValueExpr(SourceLocation(), ThisTy,
RValueThis ? VK_RValue : VK_LValue))->
Classify(Context);
@@ -2252,16 +2259,33 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
DeclContext::lookup_iterator I, E;
Result->setConstParamMatch(false);
- llvm::tie(I, E) = D->lookup(Name);
+ llvm::tie(I, E) = RD->lookup(Name);
assert((I != E) &&
"lookup for a constructor or assignment operator was empty");
for ( ; I != E; ++I) {
- if ((*I)->isInvalidDecl())
+ Decl *Cand = *I;
+
+ if (Cand->isInvalidDecl())
continue;
- if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) {
- AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public), &Arg, NumArgs,
- OCS, true);
+ if (UsingShadowDecl *U = dyn_cast<UsingShadowDecl>(Cand)) {
+ // FIXME: [namespace.udecl]p15 says that we should only consider a
+ // using declaration here if it does not match a declaration in the
+ // derived class. We do not implement this correctly in other cases
+ // either.
+ Cand = U->getTargetDecl();
+
+ if (Cand->isInvalidDecl())
+ continue;
+ }
+
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand)) {
+ if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ AddMethodCandidate(M, DeclAccessPair::make(M, AS_public), RD, ThisTy,
+ Classification, &Arg, NumArgs, OCS, true);
+ else
+ AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public), &Arg,
+ NumArgs, OCS, true);
// Here we're looking for a const parameter to speed up creation of
// implicit copy methods.
@@ -2269,13 +2293,21 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *D,
(SM == CXXCopyConstructor &&
cast<CXXConstructorDecl>(M)->isCopyConstructor())) {
QualType ArgType = M->getType()->getAs<FunctionProtoType>()->getArgType(0);
- if (ArgType->getPointeeType().isConstQualified())
+ if (!ArgType->isReferenceType() ||
+ ArgType->getPointeeType().isConstQualified())
Result->setConstParamMatch(true);
}
+ } else if (FunctionTemplateDecl *Tmpl =
+ dyn_cast<FunctionTemplateDecl>(Cand)) {
+ if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ AddMethodTemplateCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
+ RD, 0, ThisTy, Classification, &Arg, NumArgs,
+ OCS, true);
+ else
+ AddTemplateOverloadCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
+ 0, &Arg, NumArgs, OCS, true);
} else {
- FunctionTemplateDecl *Tmpl = cast<FunctionTemplateDecl>(*I);
- AddTemplateOverloadCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
- 0, &Arg, NumArgs, OCS, true);
+ assert(isa<UsingDecl>(Cand) && "illegal Kind of operator = Decl");
}
}
@@ -2310,10 +2342,10 @@ CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) {
return cast_or_null<CXXConstructorDecl>(Result->getMethod());
}
-/// \brief Look up the copy constructor for the given class.
-CXXConstructorDecl *Sema::LookupCopyConstructor(CXXRecordDecl *Class,
- unsigned Quals,
- bool *ConstParamMatch) {
+/// \brief Look up the copying constructor for the given class.
+CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
+ unsigned Quals,
+ bool *ConstParamMatch) {
assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
"non-const, non-volatile qualifiers for copy ctor arg");
SpecialMemberOverloadResult *Result =
@@ -2341,6 +2373,27 @@ DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
return Class->lookup(Name);
}
+/// \brief Look up the copying assignment operator for the given class.
+CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
+ unsigned Quals, bool RValueThis,
+ unsigned ThisQuals,
+ bool *ConstParamMatch) {
+ assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment arg");
+ assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment this");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXCopyAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis,
+ ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
+
+ if (ConstParamMatch)
+ *ConstParamMatch = Result->hasConstParamMatch();
+
+ return Result->getMethod();
+}
+
/// \brief Look for the destructor of the given class.
///
/// During semantic analysis, this routine should be used in lieu of
@@ -2995,6 +3048,12 @@ LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
//===----------------------------------------------------------------------===//
namespace {
+
+typedef llvm::StringMap<TypoCorrection, llvm::BumpPtrAllocator> TypoResultsMap;
+typedef std::map<unsigned, TypoResultsMap *> TypoEditDistanceMap;
+
+static const unsigned MaxTypoDistanceResultSets = 5;
+
class TypoCorrectionConsumer : public VisibleDeclConsumer {
/// \brief The name written that is a typo in the source.
llvm::StringRef Typo;
@@ -3002,33 +3061,55 @@ class TypoCorrectionConsumer : public VisibleDeclConsumer {
/// \brief The results found that have the smallest edit distance
/// found (so far) with the typo name.
///
- /// The boolean value indicates whether there is a keyword with this name.
- llvm::StringMap<bool, llvm::BumpPtrAllocator> BestResults;
+ /// The pointer value being set to the current DeclContext indicates
+ /// whether there is a keyword with this name.
+ TypoEditDistanceMap BestResults;
+
+ /// \brief The worst of the best N edit distances found so far.
+ unsigned MaxEditDistance;
- /// \brief The best edit distance found so far.
- unsigned BestEditDistance;
+ Sema &SemaRef;
public:
- explicit TypoCorrectionConsumer(IdentifierInfo *Typo)
+ explicit TypoCorrectionConsumer(Sema &SemaRef, IdentifierInfo *Typo)
: Typo(Typo->getName()),
- BestEditDistance((std::numeric_limits<unsigned>::max)()) { }
+ MaxEditDistance((std::numeric_limits<unsigned>::max)()),
+ SemaRef(SemaRef) { }
+ ~TypoCorrectionConsumer() {
+ for (TypoEditDistanceMap::iterator I = BestResults.begin(),
+ IEnd = BestResults.end();
+ I != IEnd;
+ ++I)
+ delete I->second;
+ }
+
virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, bool InBaseClass);
void FoundName(llvm::StringRef Name);
- void addKeywordResult(ASTContext &Context, llvm::StringRef Keyword);
-
- typedef llvm::StringMap<bool, llvm::BumpPtrAllocator>::iterator iterator;
- iterator begin() { return BestResults.begin(); }
- iterator end() { return BestResults.end(); }
- void erase(iterator I) { BestResults.erase(I); }
+ void addKeywordResult(llvm::StringRef Keyword);
+ void addName(llvm::StringRef Name, NamedDecl *ND, unsigned Distance,
+ NestedNameSpecifier *NNS=NULL);
+ void addCorrection(TypoCorrection Correction);
+
+ typedef TypoResultsMap::iterator result_iterator;
+ typedef TypoEditDistanceMap::iterator distance_iterator;
+ distance_iterator begin() { return BestResults.begin(); }
+ distance_iterator end() { return BestResults.end(); }
+ void erase(distance_iterator I) { BestResults.erase(I); }
unsigned size() const { return BestResults.size(); }
bool empty() const { return BestResults.empty(); }
- bool &operator[](llvm::StringRef Name) {
- return BestResults[Name];
+ TypoCorrection &operator[](llvm::StringRef Name) {
+ return (*BestResults.begin()->second)[Name];
+ }
+
+ unsigned getMaxEditDistance() const {
+ return MaxEditDistance;
}
- unsigned getBestEditDistance() const { return BestEditDistance; }
+ unsigned getBestEditDistance() {
+ return (BestResults.empty()) ? MaxEditDistance : BestResults.begin()->first;
+ }
};
}
@@ -3053,55 +3134,181 @@ void TypoCorrectionConsumer::FoundName(llvm::StringRef Name) {
// Use a simple length-based heuristic to determine the minimum possible
// edit distance. If the minimum isn't good enough, bail out early.
unsigned MinED = abs((int)Name.size() - (int)Typo.size());
- if (MinED > BestEditDistance || (MinED && Typo.size() / MinED < 3))
+ if (MinED > MaxEditDistance || (MinED && Typo.size() / MinED < 3))
return;
// Compute an upper bound on the allowable edit distance, so that the
// edit-distance algorithm can short-circuit.
unsigned UpperBound =
- std::min(unsigned((Typo.size() + 2) / 3), BestEditDistance);
+ std::min(unsigned((Typo.size() + 2) / 3), MaxEditDistance);
// Compute the edit distance between the typo and the name of this
// entity. If this edit distance is not worse than the best edit
// distance we've seen so far, add it to the list of results.
unsigned ED = Typo.edit_distance(Name, true, UpperBound);
- if (ED == 0)
- return;
- if (ED < BestEditDistance) {
- // This result is better than any we've seen before; clear out
- // the previous results.
- BestResults.clear();
- BestEditDistance = ED;
- } else if (ED > BestEditDistance) {
+ if (ED > MaxEditDistance) {
// This result is worse than the best results we've seen so far;
// ignore it.
return;
}
- // Add this name to the list of results. By not assigning a value, we
- // keep the current value if we've seen this name before (either as a
- // keyword or as a declaration), or get the default value (not a keyword)
- // if we haven't seen it before.
- (void)BestResults[Name];
+ addName(Name, NULL, ED);
}
-void TypoCorrectionConsumer::addKeywordResult(ASTContext &Context,
- llvm::StringRef Keyword) {
+void TypoCorrectionConsumer::addKeywordResult(llvm::StringRef Keyword) {
// Compute the edit distance between the typo and this keyword.
// If this edit distance is not worse than the best edit
// distance we've seen so far, add it to the list of results.
unsigned ED = Typo.edit_distance(Keyword);
- if (ED < BestEditDistance) {
- BestResults.clear();
- BestEditDistance = ED;
- } else if (ED > BestEditDistance) {
+ if (ED > MaxEditDistance) {
// This result is worse than the best results we've seen so far;
// ignore it.
return;
}
- BestResults[Keyword] = true;
+ addName(Keyword, TypoCorrection::KeywordDecl(), ED);
+}
+
+void TypoCorrectionConsumer::addName(llvm::StringRef Name,
+ NamedDecl *ND,
+ unsigned Distance,
+ NestedNameSpecifier *NNS) {
+ addCorrection(TypoCorrection(&SemaRef.Context.Idents.get(Name),
+ ND, NNS, Distance));
+}
+
+void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
+ llvm::StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName();
+ TypoResultsMap *& Map = BestResults[Correction.getEditDistance()];
+ if (!Map)
+ Map = new TypoResultsMap;
+
+ TypoCorrection &CurrentCorrection = (*Map)[Name];
+ if (!CurrentCorrection ||
+ // FIXME: The following should be rolled up into an operator< on
+ // TypoCorrection with a more principled definition.
+ CurrentCorrection.isKeyword() < Correction.isKeyword() ||
+ Correction.getAsString(SemaRef.getLangOptions()) <
+ CurrentCorrection.getAsString(SemaRef.getLangOptions()))
+ CurrentCorrection = Correction;
+
+ while (BestResults.size() > MaxTypoDistanceResultSets) {
+ TypoEditDistanceMap::iterator Last = BestResults.end();
+ --Last;
+ delete Last->second;
+ BestResults.erase(Last);
+ }
+}
+
+namespace {
+
+class SpecifierInfo {
+ public:
+ DeclContext* DeclCtx;
+ NestedNameSpecifier* NameSpecifier;
+ unsigned EditDistance;
+
+ SpecifierInfo(DeclContext *Ctx, NestedNameSpecifier *NNS, unsigned ED)
+ : DeclCtx(Ctx), NameSpecifier(NNS), EditDistance(ED) {}
+};
+
+typedef llvm::SmallVector<DeclContext*, 4> DeclContextList;
+typedef llvm::SmallVector<SpecifierInfo, 16> SpecifierInfoList;
+
+class NamespaceSpecifierSet {
+ ASTContext &Context;
+ DeclContextList CurContextChain;
+ bool isSorted;
+
+ SpecifierInfoList Specifiers;
+ llvm::SmallSetVector<unsigned, 4> Distances;
+ llvm::DenseMap<unsigned, SpecifierInfoList> DistanceMap;
+
+ /// \brief Helper for building the list of DeclContexts between the current
+ /// context and the top of the translation unit
+ static DeclContextList BuildContextChain(DeclContext *Start);
+
+ void SortNamespaces();
+
+ public:
+ explicit NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext)
+ : Context(Context), CurContextChain(BuildContextChain(CurContext)),
+ isSorted(true) {}
+
+ /// \brief Add the namespace to the set, computing the corresponding
+ /// NestedNameSpecifier and its distance in the process.
+ void AddNamespace(NamespaceDecl *ND);
+
+ typedef SpecifierInfoList::iterator iterator;
+ iterator begin() {
+ if (!isSorted) SortNamespaces();
+ return Specifiers.begin();
+ }
+ iterator end() { return Specifiers.end(); }
+};
+
+}
+
+DeclContextList NamespaceSpecifierSet::BuildContextChain(DeclContext *Start) {
+ assert(Start && "Bulding a context chain from a null context");
+ DeclContextList Chain;
+ for (DeclContext *DC = Start->getPrimaryContext(); DC != NULL;
+ DC = DC->getLookupParent()) {
+ NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(DC);
+ if (!DC->isInlineNamespace() && !DC->isTransparentContext() &&
+ !(ND && ND->isAnonymousNamespace()))
+ Chain.push_back(DC->getPrimaryContext());
+ }
+ return Chain;
+}
+
+void NamespaceSpecifierSet::SortNamespaces() {
+ llvm::SmallVector<unsigned, 4> sortedDistances;
+ sortedDistances.append(Distances.begin(), Distances.end());
+
+ if (sortedDistances.size() > 1)
+ std::sort(sortedDistances.begin(), sortedDistances.end());
+
+ Specifiers.clear();
+ for (llvm::SmallVector<unsigned, 4>::iterator DI = sortedDistances.begin(),
+ DIEnd = sortedDistances.end();
+ DI != DIEnd; ++DI) {
+ SpecifierInfoList &SpecList = DistanceMap[*DI];
+ Specifiers.append(SpecList.begin(), SpecList.end());
+ }
+
+ isSorted = true;
+}
+
+void NamespaceSpecifierSet::AddNamespace(NamespaceDecl *ND) {
+ DeclContext *Ctx = cast<DeclContext>(ND);
+ NestedNameSpecifier *NNS = NULL;
+ unsigned NumSpecifiers = 0;
+ DeclContextList NamespaceDeclChain(BuildContextChain(Ctx));
+
+ // Eliminate common elements from the two DeclContext chains
+ for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
+ CEnd = CurContextChain.rend();
+ C != CEnd && !NamespaceDeclChain.empty() &&
+ NamespaceDeclChain.back() == *C; ++C) {
+ NamespaceDeclChain.pop_back();
+ }
+
+ // Build the NestedNameSpecifier from what is left of the NamespaceDeclChain
+ for (DeclContextList::reverse_iterator C = NamespaceDeclChain.rbegin(),
+ CEnd = NamespaceDeclChain.rend();
+ C != CEnd; ++C) {
+ NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C);
+ if (ND) {
+ NNS = NestedNameSpecifier::Create(Context, NNS, ND);
+ ++NumSpecifiers;
+ }
+ }
+
+ isSorted = false;
+ Distances.insert(NumSpecifiers);
+ DistanceMap[NumSpecifiers].push_back(SpecifierInfo(Ctx, NNS, NumSpecifiers));
}
/// \brief Perform name lookup for a possible result for typo correction.
@@ -3155,14 +3362,193 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
}
}
+/// \brief Add keywords to the consumer as possible typo corrections.
+static void AddKeywordsToConsumer(Sema &SemaRef,
+ TypoCorrectionConsumer &Consumer,
+ Scope *S, Sema::CorrectTypoContext CTC) {
+ // Add context-dependent keywords.
+ bool WantTypeSpecifiers = false;
+ bool WantExpressionKeywords = false;
+ bool WantCXXNamedCasts = false;
+ bool WantRemainingKeywords = false;
+ switch (CTC) {
+ case Sema::CTC_Unknown:
+ WantTypeSpecifiers = true;
+ WantExpressionKeywords = true;
+ WantCXXNamedCasts = true;
+ WantRemainingKeywords = true;
+
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
+ if (Method->getClassInterface() &&
+ Method->getClassInterface()->getSuperClass())
+ Consumer.addKeywordResult("super");
+
+ break;
+
+ case Sema::CTC_NoKeywords:
+ break;
+
+ case Sema::CTC_Type:
+ WantTypeSpecifiers = true;
+ break;
+
+ case Sema::CTC_ObjCMessageReceiver:
+ Consumer.addKeywordResult("super");
+ // Fall through to handle message receivers like expressions.
+
+ case Sema::CTC_Expression:
+ if (SemaRef.getLangOptions().CPlusPlus)
+ WantTypeSpecifiers = true;
+ WantExpressionKeywords = true;
+ // Fall through to get C++ named casts.
+
+ case Sema::CTC_CXXCasts:
+ WantCXXNamedCasts = true;
+ break;
+
+ case Sema::CTC_ObjCPropertyLookup:
+ // FIXME: Add "isa"?
+ break;
+
+ case Sema::CTC_MemberLookup:
+ if (SemaRef.getLangOptions().CPlusPlus)
+ Consumer.addKeywordResult("template");
+ break;
+
+ case Sema::CTC_ObjCIvarLookup:
+ break;
+ }
+
+ if (WantTypeSpecifiers) {
+ // Add type-specifier keywords to the set of results.
+ const char *CTypeSpecs[] = {
+ "char", "const", "double", "enum", "float", "int", "long", "short",
+ "signed", "struct", "union", "unsigned", "void", "volatile",
+ "_Complex", "_Imaginary",
+ // storage-specifiers as well
+ "extern", "inline", "static", "typedef"
+ };
+
+ const unsigned NumCTypeSpecs = sizeof(CTypeSpecs) / sizeof(CTypeSpecs[0]);
+ for (unsigned I = 0; I != NumCTypeSpecs; ++I)
+ Consumer.addKeywordResult(CTypeSpecs[I]);
+
+ if (SemaRef.getLangOptions().C99)
+ Consumer.addKeywordResult("restrict");
+ if (SemaRef.getLangOptions().Bool || SemaRef.getLangOptions().CPlusPlus)
+ Consumer.addKeywordResult("bool");
+ else if (SemaRef.getLangOptions().C99)
+ Consumer.addKeywordResult("_Bool");
+
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ Consumer.addKeywordResult("class");
+ Consumer.addKeywordResult("typename");
+ Consumer.addKeywordResult("wchar_t");
+
+ if (SemaRef.getLangOptions().CPlusPlus0x) {
+ Consumer.addKeywordResult("char16_t");
+ Consumer.addKeywordResult("char32_t");
+ Consumer.addKeywordResult("constexpr");
+ Consumer.addKeywordResult("decltype");
+ Consumer.addKeywordResult("thread_local");
+ }
+ }
+
+ if (SemaRef.getLangOptions().GNUMode)
+ Consumer.addKeywordResult("typeof");
+ }
+
+ if (WantCXXNamedCasts && SemaRef.getLangOptions().CPlusPlus) {
+ Consumer.addKeywordResult("const_cast");
+ Consumer.addKeywordResult("dynamic_cast");
+ Consumer.addKeywordResult("reinterpret_cast");
+ Consumer.addKeywordResult("static_cast");
+ }
+
+ if (WantExpressionKeywords) {
+ Consumer.addKeywordResult("sizeof");
+ if (SemaRef.getLangOptions().Bool || SemaRef.getLangOptions().CPlusPlus) {
+ Consumer.addKeywordResult("false");
+ Consumer.addKeywordResult("true");
+ }
+
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ const char *CXXExprs[] = {
+ "delete", "new", "operator", "throw", "typeid"
+ };
+ const unsigned NumCXXExprs = sizeof(CXXExprs) / sizeof(CXXExprs[0]);
+ for (unsigned I = 0; I != NumCXXExprs; ++I)
+ Consumer.addKeywordResult(CXXExprs[I]);
+
+ if (isa<CXXMethodDecl>(SemaRef.CurContext) &&
+ cast<CXXMethodDecl>(SemaRef.CurContext)->isInstance())
+ Consumer.addKeywordResult("this");
+
+ if (SemaRef.getLangOptions().CPlusPlus0x) {
+ Consumer.addKeywordResult("alignof");
+ Consumer.addKeywordResult("nullptr");
+ }
+ }
+ }
+
+ if (WantRemainingKeywords) {
+ if (SemaRef.getCurFunctionOrMethodDecl() || SemaRef.getCurBlock()) {
+ // Statements.
+ const char *CStmts[] = {
+ "do", "else", "for", "goto", "if", "return", "switch", "while" };
+ const unsigned NumCStmts = sizeof(CStmts) / sizeof(CStmts[0]);
+ for (unsigned I = 0; I != NumCStmts; ++I)
+ Consumer.addKeywordResult(CStmts[I]);
+
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ Consumer.addKeywordResult("catch");
+ Consumer.addKeywordResult("try");
+ }
+
+ if (S && S->getBreakParent())
+ Consumer.addKeywordResult("break");
+
+ if (S && S->getContinueParent())
+ Consumer.addKeywordResult("continue");
+
+ if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ Consumer.addKeywordResult("case");
+ Consumer.addKeywordResult("default");
+ }
+ } else {
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ Consumer.addKeywordResult("namespace");
+ Consumer.addKeywordResult("template");
+ }
+
+ if (S && S->isClassScope()) {
+ Consumer.addKeywordResult("explicit");
+ Consumer.addKeywordResult("friend");
+ Consumer.addKeywordResult("mutable");
+ Consumer.addKeywordResult("private");
+ Consumer.addKeywordResult("protected");
+ Consumer.addKeywordResult("public");
+ Consumer.addKeywordResult("virtual");
+ }
+ }
+
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ Consumer.addKeywordResult("using");
+
+ if (SemaRef.getLangOptions().CPlusPlus0x)
+ Consumer.addKeywordResult("static_assert");
+ }
+ }
+}
+
/// \brief Try to "correct" a typo in the source code by finding
/// visible declarations whose names are similar to the name that was
/// present in the source code.
///
-/// \param Res the \c LookupResult structure that contains the name
-/// that was present in the source code along with the name-lookup
-/// criteria used to search for the name. On success, this structure
-/// will contain the results of name lookup.
+/// \param TypoName the \c DeclarationNameInfo structure that contains
+/// the name that was present in the source code along with its location.
+///
+/// \param LookupKind the name-lookup criteria used to search for the name.
///
/// \param S the scope in which name lookup occurs.
///
@@ -3181,60 +3567,64 @@ static void LookupPotentialTypoResult(Sema &SemaRef,
/// \param OPT when non-NULL, the search for visible declarations will
/// also walk the protocols in the qualified interfaces of \p OPT.
///
-/// \returns the corrected name if the typo was corrected, otherwise returns an
-/// empty \c DeclarationName. When a typo was corrected, the result structure
-/// may contain the results of name lookup for the correct name or it may be
-/// empty.
-DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
- DeclContext *MemberContext,
- bool EnteringContext,
- CorrectTypoContext CTC,
- const ObjCObjectPointerType *OPT) {
+/// \returns a \c TypoCorrection containing the corrected name if the typo
+/// along with information such as the \c NamedDecl where the corrected name
+/// was declared, and any additional \c NestedNameSpecifier needed to access
+/// it (C++ only). The \c TypoCorrection is empty if there is no correction.
+TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
+ Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ CorrectTypoContext CTC,
+ const ObjCObjectPointerType *OPT) {
if (Diags.hasFatalErrorOccurred() || !getLangOptions().SpellChecking)
- return DeclarationName();
+ return TypoCorrection();
// We only attempt to correct typos for identifiers.
- IdentifierInfo *Typo = Res.getLookupName().getAsIdentifierInfo();
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
if (!Typo)
- return DeclarationName();
+ return TypoCorrection();
// If the scope specifier itself was invalid, don't try to correct
// typos.
if (SS && SS->isInvalid())
- return DeclarationName();
+ return TypoCorrection();
// Never try to correct typos during template deduction or
// instantiation.
if (!ActiveTemplateInstantiations.empty())
- return DeclarationName();
+ return TypoCorrection();
- TypoCorrectionConsumer Consumer(Typo);
+ NamespaceSpecifierSet Namespaces(Context, CurContext);
+
+ TypoCorrectionConsumer Consumer(*this, Typo);
// Perform name lookup to find visible, similarly-named entities.
bool IsUnqualifiedLookup = false;
if (MemberContext) {
- LookupVisibleDecls(MemberContext, Res.getLookupKind(), Consumer);
+ LookupVisibleDecls(MemberContext, LookupKind, Consumer);
// Look in qualified interfaces.
if (OPT) {
for (ObjCObjectPointerType::qual_iterator
I = OPT->qual_begin(), E = OPT->qual_end();
I != E; ++I)
- LookupVisibleDecls(*I, Res.getLookupKind(), Consumer);
+ LookupVisibleDecls(*I, LookupKind, Consumer);
}
} else if (SS && SS->isSet()) {
DeclContext *DC = computeDeclContext(*SS, EnteringContext);
if (!DC)
- return DeclarationName();
+ return TypoCorrection();
// Provide a stop gap for files that are just seriously broken. Trying
// to correct all typos can turn into a HUGE performance penalty, causing
// some files to take minutes to get rejected by the parser.
if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
- return DeclarationName();
+ return TypoCorrection();
++TyposCorrected;
- LookupVisibleDecls(DC, Res.getLookupKind(), Consumer);
+ LookupVisibleDecls(DC, LookupKind, Consumer);
} else {
IsUnqualifiedLookup = true;
UnqualifiedTyposCorrectedMap::iterator Cached
@@ -3244,7 +3634,7 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
// to correct all typos can turn into a HUGE performance penalty, causing
// some files to take minutes to get rejected by the parser.
if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
- return DeclarationName();
+ return TypoCorrection();
// For unqualified lookup, look through all of the names that we have
// seen in this translation unit.
@@ -3268,313 +3658,225 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
} else {
// Use the cached value, unless it's a keyword. In the keyword case, we'll
// end up adding the keyword below.
- if (Cached->second.first.empty())
- return DeclarationName();
+ if (!Cached->second)
+ return TypoCorrection();
- if (!Cached->second.second)
- Consumer.FoundName(Cached->second.first);
+ if (!Cached->second.isKeyword())
+ Consumer.addCorrection(Cached->second);
}
}
- // Add context-dependent keywords.
- bool WantTypeSpecifiers = false;
- bool WantExpressionKeywords = false;
- bool WantCXXNamedCasts = false;
- bool WantRemainingKeywords = false;
- switch (CTC) {
- case CTC_Unknown:
- WantTypeSpecifiers = true;
- WantExpressionKeywords = true;
- WantCXXNamedCasts = true;
- WantRemainingKeywords = true;
-
- if (ObjCMethodDecl *Method = getCurMethodDecl())
- if (Method->getClassInterface() &&
- Method->getClassInterface()->getSuperClass())
- Consumer.addKeywordResult(Context, "super");
-
- break;
-
- case CTC_NoKeywords:
- break;
-
- case CTC_Type:
- WantTypeSpecifiers = true;
- break;
-
- case CTC_ObjCMessageReceiver:
- Consumer.addKeywordResult(Context, "super");
- // Fall through to handle message receivers like expressions.
-
- case CTC_Expression:
- if (getLangOptions().CPlusPlus)
- WantTypeSpecifiers = true;
- WantExpressionKeywords = true;
- // Fall through to get C++ named casts.
-
- case CTC_CXXCasts:
- WantCXXNamedCasts = true;
- break;
-
- case CTC_ObjCPropertyLookup:
- // FIXME: Add "isa"?
- break;
+ AddKeywordsToConsumer(*this, Consumer, S, CTC);
- case CTC_MemberLookup:
- if (getLangOptions().CPlusPlus)
- Consumer.addKeywordResult(Context, "template");
- break;
+ // If we haven't found anything, we're done.
+ if (Consumer.empty()) {
+ // If this was an unqualified lookup, note that no correction was found.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
- case CTC_ObjCIvarLookup:
- break;
+ return TypoCorrection();
}
- if (WantTypeSpecifiers) {
- // Add type-specifier keywords to the set of results.
- const char *CTypeSpecs[] = {
- "char", "const", "double", "enum", "float", "int", "long", "short",
- "signed", "struct", "union", "unsigned", "void", "volatile", "_Bool",
- "_Complex", "_Imaginary",
- // storage-specifiers as well
- "extern", "inline", "static", "typedef"
- };
-
- const unsigned NumCTypeSpecs = sizeof(CTypeSpecs) / sizeof(CTypeSpecs[0]);
- for (unsigned I = 0; I != NumCTypeSpecs; ++I)
- Consumer.addKeywordResult(Context, CTypeSpecs[I]);
-
- if (getLangOptions().C99)
- Consumer.addKeywordResult(Context, "restrict");
- if (getLangOptions().Bool || getLangOptions().CPlusPlus)
- Consumer.addKeywordResult(Context, "bool");
-
- if (getLangOptions().CPlusPlus) {
- Consumer.addKeywordResult(Context, "class");
- Consumer.addKeywordResult(Context, "typename");
- Consumer.addKeywordResult(Context, "wchar_t");
-
- if (getLangOptions().CPlusPlus0x) {
- Consumer.addKeywordResult(Context, "char16_t");
- Consumer.addKeywordResult(Context, "char32_t");
- Consumer.addKeywordResult(Context, "constexpr");
- Consumer.addKeywordResult(Context, "decltype");
- Consumer.addKeywordResult(Context, "thread_local");
- }
- }
+ // Make sure that the user typed at least 3 characters for each correction
+ // made. Otherwise, we don't even both looking at the results.
+ unsigned ED = Consumer.getBestEditDistance();
+ if (ED > 0 && Typo->getName().size() / ED < 3) {
+ // If this was an unqualified lookup, note that no correction was found.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
- if (getLangOptions().GNUMode)
- Consumer.addKeywordResult(Context, "typeof");
+ return TypoCorrection();
}
- if (WantCXXNamedCasts && getLangOptions().CPlusPlus) {
- Consumer.addKeywordResult(Context, "const_cast");
- Consumer.addKeywordResult(Context, "dynamic_cast");
- Consumer.addKeywordResult(Context, "reinterpret_cast");
- Consumer.addKeywordResult(Context, "static_cast");
+ // Build the NestedNameSpecifiers for the KnownNamespaces
+ if (getLangOptions().CPlusPlus) {
+ // Load any externally-known namespaces.
+ if (ExternalSource && !LoadedExternalKnownNamespaces) {
+ llvm::SmallVector<NamespaceDecl *, 4> ExternalKnownNamespaces;
+ LoadedExternalKnownNamespaces = true;
+ ExternalSource->ReadKnownNamespaces(ExternalKnownNamespaces);
+ for (unsigned I = 0, N = ExternalKnownNamespaces.size(); I != N; ++I)
+ KnownNamespaces[ExternalKnownNamespaces[I]] = true;
+ }
+
+ for (llvm::DenseMap<NamespaceDecl*, bool>::iterator
+ KNI = KnownNamespaces.begin(),
+ KNIEnd = KnownNamespaces.end();
+ KNI != KNIEnd; ++KNI)
+ Namespaces.AddNamespace(KNI->first);
}
- if (WantExpressionKeywords) {
- Consumer.addKeywordResult(Context, "sizeof");
- if (getLangOptions().Bool || getLangOptions().CPlusPlus) {
- Consumer.addKeywordResult(Context, "false");
- Consumer.addKeywordResult(Context, "true");
- }
+ // Weed out any names that could not be found by name lookup.
+ llvm::SmallPtrSet<IdentifierInfo*, 16> QualifiedResults;
+ LookupResult TmpRes(*this, TypoName, LookupKind);
+ TmpRes.suppressDiagnostics();
+ while (!Consumer.empty()) {
+ TypoCorrectionConsumer::distance_iterator DI = Consumer.begin();
+ unsigned ED = DI->first;
+ for (TypoCorrectionConsumer::result_iterator I = DI->second->begin(),
+ IEnd = DI->second->end();
+ I != IEnd; /* Increment in loop. */) {
+ // If the item already has been looked up or is a keyword, keep it
+ if (I->second.isResolved()) {
+ ++I;
+ continue;
+ }
- if (getLangOptions().CPlusPlus) {
- const char *CXXExprs[] = {
- "delete", "new", "operator", "throw", "typeid"
- };
- const unsigned NumCXXExprs = sizeof(CXXExprs) / sizeof(CXXExprs[0]);
- for (unsigned I = 0; I != NumCXXExprs; ++I)
- Consumer.addKeywordResult(Context, CXXExprs[I]);
+ // Perform name lookup on this name.
+ IdentifierInfo *Name = I->second.getCorrectionAsIdentifierInfo();
+ LookupPotentialTypoResult(*this, TmpRes, Name, S, SS, MemberContext,
+ EnteringContext, CTC);
- if (isa<CXXMethodDecl>(CurContext) &&
- cast<CXXMethodDecl>(CurContext)->isInstance())
- Consumer.addKeywordResult(Context, "this");
+ switch (TmpRes.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ QualifiedResults.insert(Name);
+ // We didn't find this name in our scope, or didn't like what we found;
+ // ignore it.
+ {
+ TypoCorrectionConsumer::result_iterator Next = I;
+ ++Next;
+ DI->second->erase(I);
+ I = Next;
+ }
+ break;
- if (getLangOptions().CPlusPlus0x) {
- Consumer.addKeywordResult(Context, "alignof");
- Consumer.addKeywordResult(Context, "nullptr");
+ case LookupResult::Ambiguous:
+ // We don't deal with ambiguities.
+ return TypoCorrection();
+
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ I->second.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
+ // FIXME: This sets the CorrectionDecl to NULL for overloaded functions.
+ // It would be nice to find the right one with overload resolution.
+ ++I;
+ break;
}
}
- }
-
- if (WantRemainingKeywords) {
- if (getCurFunctionOrMethodDecl() || getCurBlock()) {
- // Statements.
- const char *CStmts[] = {
- "do", "else", "for", "goto", "if", "return", "switch", "while" };
- const unsigned NumCStmts = sizeof(CStmts) / sizeof(CStmts[0]);
- for (unsigned I = 0; I != NumCStmts; ++I)
- Consumer.addKeywordResult(Context, CStmts[I]);
-
- if (getLangOptions().CPlusPlus) {
- Consumer.addKeywordResult(Context, "catch");
- Consumer.addKeywordResult(Context, "try");
- }
- if (S && S->getBreakParent())
- Consumer.addKeywordResult(Context, "break");
-
- if (S && S->getContinueParent())
- Consumer.addKeywordResult(Context, "continue");
-
- if (!getCurFunction()->SwitchStack.empty()) {
- Consumer.addKeywordResult(Context, "case");
- Consumer.addKeywordResult(Context, "default");
- }
- } else {
- if (getLangOptions().CPlusPlus) {
- Consumer.addKeywordResult(Context, "namespace");
- Consumer.addKeywordResult(Context, "template");
- }
+ if (DI->second->empty())
+ Consumer.erase(DI);
+ else if (!getLangOptions().CPlusPlus || QualifiedResults.empty() || !ED)
+ // If there are results in the closest possible bucket, stop
+ break;
- if (S && S->isClassScope()) {
- Consumer.addKeywordResult(Context, "explicit");
- Consumer.addKeywordResult(Context, "friend");
- Consumer.addKeywordResult(Context, "mutable");
- Consumer.addKeywordResult(Context, "private");
- Consumer.addKeywordResult(Context, "protected");
- Consumer.addKeywordResult(Context, "public");
- Consumer.addKeywordResult(Context, "virtual");
+ // Only perform the qualified lookups for C++
+ if (getLangOptions().CPlusPlus) {
+ TmpRes.suppressDiagnostics();
+ for (llvm::SmallPtrSet<IdentifierInfo*,
+ 16>::iterator QRI = QualifiedResults.begin(),
+ QRIEnd = QualifiedResults.end();
+ QRI != QRIEnd; ++QRI) {
+ for (NamespaceSpecifierSet::iterator NI = Namespaces.begin(),
+ NIEnd = Namespaces.end();
+ NI != NIEnd; ++NI) {
+ DeclContext *Ctx = NI->DeclCtx;
+ unsigned QualifiedED = ED + NI->EditDistance;
+
+ // Stop searching once the namespaces are too far away to create
+ // acceptable corrections for this identifier (since the namespaces
+ // are sorted in ascending order by edit distance)
+ if (QualifiedED > Consumer.getMaxEditDistance()) break;
+
+ TmpRes.clear();
+ TmpRes.setLookupName(*QRI);
+ if (!LookupQualifiedName(TmpRes, Ctx)) continue;
+
+ switch (TmpRes.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ Consumer.addName((*QRI)->getName(), TmpRes.getAsSingle<NamedDecl>(),
+ QualifiedED, NI->NameSpecifier);
+ break;
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ break;
+ }
+ }
}
}
- if (getLangOptions().CPlusPlus) {
- Consumer.addKeywordResult(Context, "using");
-
- if (getLangOptions().CPlusPlus0x)
- Consumer.addKeywordResult(Context, "static_assert");
- }
+ QualifiedResults.clear();
}
- // If we haven't found anything, we're done.
- if (Consumer.empty()) {
- // If this was an unqualified lookup, note that no correction was found.
- if (IsUnqualifiedLookup)
- (void)UnqualifiedTyposCorrected[Typo];
+ // No corrections remain...
+ if (Consumer.empty()) return TypoCorrection();
- return DeclarationName();
- }
+ TypoResultsMap &BestResults = *Consumer.begin()->second;
+ ED = Consumer.begin()->first;
- // Make sure that the user typed at least 3 characters for each correction
- // made. Otherwise, we don't even both looking at the results.
-
- // We also suppress exact matches; those should be handled by a
- // different mechanism (e.g., one that introduces qualification in
- // C++).
- unsigned ED = Consumer.getBestEditDistance();
if (ED > 0 && Typo->getName().size() / ED < 3) {
// If this was an unqualified lookup, note that no correction was found.
if (IsUnqualifiedLookup)
(void)UnqualifiedTyposCorrected[Typo];
- return DeclarationName();
+ return TypoCorrection();
}
- // Weed out any names that could not be found by name lookup.
- bool LastLookupWasAccepted = false;
- for (TypoCorrectionConsumer::iterator I = Consumer.begin(),
- IEnd = Consumer.end();
- I != IEnd; /* Increment in loop. */) {
- // Keywords are always found.
- if (I->second) {
- ++I;
- continue;
- }
-
- // Perform name lookup on this name.
- IdentifierInfo *Name = &Context.Idents.get(I->getKey());
- LookupPotentialTypoResult(*this, Res, Name, S, SS, MemberContext,
- EnteringContext, CTC);
-
- switch (Res.getResultKind()) {
- case LookupResult::NotFound:
- case LookupResult::NotFoundInCurrentInstantiation:
- case LookupResult::Ambiguous:
- // We didn't find this name in our scope, or didn't like what we found;
- // ignore it.
- Res.suppressDiagnostics();
- {
- TypoCorrectionConsumer::iterator Next = I;
- ++Next;
- Consumer.erase(I);
- I = Next;
- }
- LastLookupWasAccepted = false;
- break;
-
- case LookupResult::Found:
- case LookupResult::FoundOverloaded:
- case LookupResult::FoundUnresolvedValue:
- ++I;
- LastLookupWasAccepted = true;
- break;
- }
-
- if (Res.isAmbiguous()) {
- // We don't deal with ambiguities.
- Res.suppressDiagnostics();
- Res.clear();
- return DeclarationName();
+ // If we have multiple possible corrections, eliminate the ones where we
+ // added namespace qualifiers to try to resolve the ambiguity (and to favor
+ // corrections without additional namespace qualifiers)
+ if (getLangOptions().CPlusPlus && BestResults.size() > 1) {
+ TypoCorrectionConsumer::distance_iterator DI = Consumer.begin();
+ for (TypoCorrectionConsumer::result_iterator I = DI->second->begin(),
+ IEnd = DI->second->end();
+ I != IEnd; /* Increment in loop. */) {
+ if (I->second.getCorrectionSpecifier() != NULL) {
+ TypoCorrectionConsumer::result_iterator Cur = I;
+ ++I;
+ DI->second->erase(Cur);
+ } else ++I;
}
}
// If only a single name remains, return that result.
- if (Consumer.size() == 1) {
- IdentifierInfo *Name = &Context.Idents.get(Consumer.begin()->getKey());
- if (Consumer.begin()->second) {
- Res.suppressDiagnostics();
- Res.clear();
-
- // Don't correct to a keyword that's the same as the typo; the keyword
- // wasn't actually in scope.
- if (ED == 0) {
- Res.setLookupName(Typo);
- return DeclarationName();
- }
+ if (BestResults.size() == 1) {
+ const llvm::StringMapEntry<TypoCorrection> &Correction = *(BestResults.begin());
+ const TypoCorrection &Result = Correction.second;
- } else if (!LastLookupWasAccepted) {
- // Perform name lookup on this name.
- LookupPotentialTypoResult(*this, Res, Name, S, SS, MemberContext,
- EnteringContext, CTC);
- }
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (ED == 0 && Result.isKeyword()) return TypoCorrection();
// Record the correction for unqualified lookup.
if (IsUnqualifiedLookup)
- UnqualifiedTyposCorrected[Typo]
- = std::make_pair(Name->getName(), Consumer.begin()->second);
+ UnqualifiedTyposCorrected[Typo] = Result;
- return &Context.Idents.get(Consumer.begin()->getKey());
+ return Result;
}
- else if (Consumer.size() > 1 && CTC == CTC_ObjCMessageReceiver
- && Consumer["super"]) {
- // Prefix 'super' when we're completing in a message-receiver
+ else if (BestResults.size() > 1 && CTC == CTC_ObjCMessageReceiver
+ && BestResults["super"].isKeyword()) {
+ // Prefer 'super' when we're completing in a message-receiver
// context.
- Res.suppressDiagnostics();
- Res.clear();
// Don't correct to a keyword that's the same as the typo; the keyword
// wasn't actually in scope.
- if (ED == 0) {
- Res.setLookupName(Typo);
- return DeclarationName();
- }
+ if (ED == 0) return TypoCorrection();
// Record the correction for unqualified lookup.
if (IsUnqualifiedLookup)
- UnqualifiedTyposCorrected[Typo]
- = std::make_pair("super", Consumer.begin()->second);
+ UnqualifiedTyposCorrected[Typo] = BestResults["super"];
- return &Context.Idents.get("super");
+ return BestResults["super"];
}
- Res.suppressDiagnostics();
- Res.setLookupName(Typo);
- Res.clear();
- // Record the correction for unqualified lookup.
if (IsUnqualifiedLookup)
(void)UnqualifiedTyposCorrected[Typo];
- return DeclarationName();
+ return TypoCorrection();
+}
+
+std::string TypoCorrection::getAsString(const LangOptions &LO) const {
+ if (CorrectionNameSpec) {
+ std::string tmpBuffer;
+ llvm::raw_string_ostream PrefixOStream(tmpBuffer);
+ CorrectionNameSpec->print(PrefixOStream, PrintingPolicy(LO));
+ return PrefixOStream.str() + CorrectionName.getAsString();
+ }
+
+ return CorrectionName.getAsString();
}
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index 6c4469cef9ef..d826ea8d84a4 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -24,6 +24,51 @@ using namespace clang;
// Grammar actions.
//===----------------------------------------------------------------------===//
+/// Check the internal consistency of a property declaration.
+static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) {
+ if (property->isInvalidDecl()) return;
+
+ ObjCPropertyDecl::PropertyAttributeKind propertyKind
+ = property->getPropertyAttributes();
+ Qualifiers::ObjCLifetime propertyLifetime
+ = property->getType().getObjCLifetime();
+
+ // Nothing to do if we don't have a lifetime.
+ if (propertyLifetime == Qualifiers::OCL_None) return;
+
+ Qualifiers::ObjCLifetime expectedLifetime;
+ unsigned selector;
+
+ // Strong properties should have either strong or no lifetime.
+ if (propertyKind & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy)) {
+ expectedLifetime = Qualifiers::OCL_Strong;
+ selector = 0;
+ } else if (propertyKind & ObjCPropertyDecl::OBJC_PR_weak) {
+ expectedLifetime = Qualifiers::OCL_Weak;
+ selector = 1;
+ } else if (propertyKind & (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained) &&
+ property->getType()->isObjCRetainableType()) {
+ expectedLifetime = Qualifiers::OCL_ExplicitNone;
+ selector = 2;
+ } else {
+ // We have a lifetime qualifier but no dominating property
+ // attribute. That's okay.
+ return;
+ }
+
+ if (propertyLifetime == expectedLifetime) return;
+
+ property->setInvalidDecl();
+ S.Diag(property->getLocation(),
+ diag::err_arc_inconsistent_property_ownership)
+ << property->getDeclName()
+ << selector
+ << propertyLifetime;
+}
+
Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
FieldDeclarator &FD,
ObjCDeclSpec &ODS,
@@ -34,6 +79,14 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC) {
unsigned Attributes = ODS.getPropertyAttributes();
+ TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
+ QualType T = TSI->getType();
+ if ((getLangOptions().getGCMode() != LangOptions::NonGC &&
+ T.isObjCGCWeak()) ||
+ (getLangOptions().ObjCAutoRefCount &&
+ T.getObjCLifetime() == Qualifiers::OCL_Weak))
+ Attributes |= ObjCDeclSpec::DQ_PR_weak;
+
bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
// default is readwrite!
!(Attributes & ObjCDeclSpec::DQ_PR_readonly));
@@ -42,9 +95,10 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
bool isAssign = ((Attributes & ObjCDeclSpec::DQ_PR_assign) ||
(isReadWrite &&
!(Attributes & ObjCDeclSpec::DQ_PR_retain) &&
- !(Attributes & ObjCDeclSpec::DQ_PR_copy)));
-
- TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
+ !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_copy) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_weak)));
// Proceed with constructing the ObjCPropertDecls.
ObjCContainerDecl *ClassDecl =
@@ -58,20 +112,27 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
Attributes,
isOverridingProperty, TSI,
MethodImplKind);
- if (Res)
+ if (Res) {
CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+ if (getLangOptions().ObjCAutoRefCount)
+ checkARCPropertyDecl(*this, cast<ObjCPropertyDecl>(Res));
+ }
return Res;
}
- Decl *Res = CreatePropertyDecl(S, ClassDecl, AtLoc, FD,
- GetterSel, SetterSel,
- isAssign, isReadWrite,
- Attributes, TSI, MethodImplKind);
+ ObjCPropertyDecl *Res = CreatePropertyDecl(S, ClassDecl, AtLoc, FD,
+ GetterSel, SetterSel,
+ isAssign, isReadWrite,
+ Attributes, TSI, MethodImplKind);
if (lexicalDC)
Res->setLexicalDeclContext(lexicalDC);
// Validate the attributes on the @property.
CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+
+ if (getLangOptions().ObjCAutoRefCount)
+ checkARCPropertyDecl(*this, Res);
+
return Res;
}
@@ -118,6 +179,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
// Set setter/getter selector name. Needed later.
PDecl->setGetterName(GetterSel);
PDecl->setSetterName(SetterSel);
+ ProcessDeclAttributes(S, PDecl, FD.D);
DC->addDecl(PDecl);
// We need to look in the @interface to see if the @property was
@@ -139,10 +201,6 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
CreatePropertyDecl(S, CCPrimary, AtLoc,
FD, GetterSel, SetterSel, isAssign, isReadWrite,
Attributes, T, MethodImplKind, DC);
- // Mark written attribute as having no attribute because
- // this is not a user-written property declaration in primary
- // class.
- PDecl->setPropertyAttributesAsWritten(ObjCPropertyDecl::OBJC_PR_noattr);
// A case of continuation class adding a new property in the class. This
// is not what it was meant for. However, gcc supports it and so should we.
@@ -158,6 +216,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) {
unsigned retainCopyNonatomic =
(ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
ObjCPropertyDecl::OBJC_PR_copy |
ObjCPropertyDecl::OBJC_PR_nonatomic);
if ((Attributes & retainCopyNonatomic) !=
@@ -189,6 +248,8 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
PIDecl->makeitReadWriteAttribute();
if (Attributes & ObjCDeclSpec::DQ_PR_retain)
PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
if (Attributes & ObjCDeclSpec::DQ_PR_copy)
PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
PIDecl->setSetterName(SetterSel);
@@ -272,6 +333,35 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
PDecl->setGetterName(GetterSel);
PDecl->setSetterName(SetterSel);
+ unsigned attributesAsWritten = 0;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+ if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
+
+ PDecl->setPropertyAttributesAsWritten(
+ (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten);
+
if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
@@ -287,9 +377,18 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
if (Attributes & ObjCDeclSpec::DQ_PR_retain)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+
if (Attributes & ObjCDeclSpec::DQ_PR_copy)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
if (isAssign)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
@@ -298,8 +397,12 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
else if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
- PDecl->setPropertyAttributesAsWritten(PDecl->getPropertyAttributes());
-
+ // 'unsafe_unretained' is alias for 'assign'.
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ if (isAssign)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
if (MethodImplKind == tok::objc_required)
PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
else if (MethodImplKind == tok::objc_optional)
@@ -308,6 +411,93 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
return PDecl;
}
+static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
+ ObjCPropertyDecl *property,
+ ObjCIvarDecl *ivar) {
+ if (property->isInvalidDecl() || ivar->isInvalidDecl()) return;
+
+ QualType propertyType = property->getType();
+ Qualifiers::ObjCLifetime propertyLifetime = propertyType.getObjCLifetime();
+ ObjCPropertyDecl::PropertyAttributeKind propertyKind
+ = property->getPropertyAttributes();
+
+ QualType ivarType = ivar->getType();
+ Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
+
+ // Case 1: strong properties.
+ if (propertyLifetime == Qualifiers::OCL_Strong ||
+ (propertyKind & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy))) {
+ switch (ivarLifetime) {
+ case Qualifiers::OCL_Strong:
+ // Okay.
+ return;
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_Autoreleasing:
+ // These aren't valid lifetimes for object ivars; don't diagnose twice.
+ return;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ S.Diag(propertyImplLoc, diag::err_arc_strong_property_ownership)
+ << property->getDeclName()
+ << ivar->getDeclName()
+ << ivarLifetime;
+ break;
+ }
+
+ // Case 2: weak properties.
+ } else if (propertyLifetime == Qualifiers::OCL_Weak ||
+ (propertyKind & ObjCPropertyDecl::OBJC_PR_weak)) {
+ switch (ivarLifetime) {
+ case Qualifiers::OCL_Weak:
+ // Okay.
+ return;
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_Autoreleasing:
+ // These aren't valid lifetimes for object ivars; don't diagnose twice.
+ return;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ S.Diag(propertyImplLoc, diag::error_weak_property)
+ << property->getDeclName()
+ << ivar->getDeclName();
+ break;
+ }
+
+ // Case 3: assign properties.
+ } else if ((propertyKind & ObjCPropertyDecl::OBJC_PR_assign) &&
+ propertyType->isObjCRetainableType()) {
+ switch (ivarLifetime) {
+ case Qualifiers::OCL_ExplicitNone:
+ // Okay.
+ return;
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_Autoreleasing:
+ // These aren't valid lifetimes for object ivars; don't diagnose twice.
+ return;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ S.Diag(propertyImplLoc, diag::err_arc_assign_property_ownership)
+ << property->getDeclName()
+ << ivar->getDeclName();
+ break;
+ }
+
+ // Any other property should be ignored.
+ } else {
+ return;
+ }
+
+ S.Diag(property->getLocation(), diag::note_property_declare);
+}
+
/// ActOnPropertyImplDecl - This routine performs semantic checks and
/// builds the AST node for a property implementation declaration; declared
@@ -396,9 +586,18 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ObjCIvarDecl *Ivar = 0;
// Check that we have a valid, previously declared ivar for @synthesize
if (Synthesize) {
+ if (getLangOptions().ObjCAutoRefCount &&
+ !property->hasWrittenStorageAttribute() &&
+ property->getType()->isObjCRetainableType()) {
+ Diag(PropertyLoc, diag::err_arc_objc_property_default_assign_on_object);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+
// @synthesize
if (!PropertyIvar)
PropertyIvar = PropertyId;
+ ObjCPropertyDecl::PropertyAttributeKind kind
+ = property->getPropertyAttributes();
QualType PropType = Context.getCanonicalType(property->getType());
QualType PropertyIvarType = PropType;
if (PropType->isReferenceType())
@@ -407,6 +606,45 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
ObjCInterfaceDecl *ClassDeclared;
Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared);
if (!Ivar) {
+ // In ARC, give the ivar a lifetime qualifier based on its
+ // property attributes.
+ if (getLangOptions().ObjCAutoRefCount &&
+ !PropertyIvarType.getObjCLifetime()) {
+
+ // retain/copy have retaining lifetime.
+ if (kind & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy)) {
+ Qualifiers qs;
+ qs.addObjCLifetime(Qualifiers::OCL_Strong);
+ PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs);
+ }
+ else if (kind & ObjCPropertyDecl::OBJC_PR_weak) {
+ if (!getLangOptions().ObjCRuntimeHasWeak) {
+ Diag(PropertyLoc, diag::err_arc_weak_no_runtime);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+ Qualifiers qs;
+ qs.addObjCLifetime(Qualifiers::OCL_Weak);
+ PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs);
+ }
+ else if (kind & ObjCPropertyDecl::OBJC_PR_assign &&
+ PropertyIvarType->isObjCRetainableType()) {
+ // assume that an 'assign' property synthesizes __unsafe_unretained
+ // ivar
+ Qualifiers qs;
+ qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs);
+ }
+ }
+
+ if (kind & ObjCPropertyDecl::OBJC_PR_weak &&
+ !getLangOptions().ObjCAutoRefCount &&
+ getLangOptions().getGCMode() == LangOptions::NonGC) {
+ Diag(PropertyLoc, diag::error_synthesize_weak_non_arc_or_gc);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+
Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl,
PropertyLoc, PropertyLoc, PropertyIvar,
PropertyIvarType, /*Dinfo=*/0,
@@ -435,7 +673,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (PropertyIvarType != IvarType) {
bool compat = false;
if (isa<ObjCObjectPointerType>(PropertyIvarType)
- && isa<ObjCObjectPointerType>(IvarType))
+ && isa<ObjCObjectPointerType>(IvarType))
compat =
Context.canAssignObjCInterfaces(
PropertyIvarType->getAs<ObjCObjectPointerType>(),
@@ -470,12 +708,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// Fall thru - see previous comment
}
// __weak is explicit. So it works on Canonical type.
- if (PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
- getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
+ getLangOptions().getGCMode() != LangOptions::NonGC)) {
Diag(PropertyLoc, diag::error_weak_property)
<< property->getDeclName() << Ivar->getDeclName();
// Fall thru - see previous comment
}
+ // Fall thru - see previous comment
if ((property->getType()->isObjCObjectPointerType() ||
PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
getLangOptions().getGCMode() != LangOptions::NonGC) {
@@ -484,9 +723,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// Fall thru - see previous comment
}
}
+ if (getLangOptions().ObjCAutoRefCount)
+ checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
} else if (PropertyIvar)
// @dynamic
Diag(PropertyLoc, diag::error_dynamic_property_ivar_decl);
+
assert (property && "ActOnPropertyImplDecl - property declaration missing");
ObjCPropertyImplDecl *PIDecl =
ObjCPropertyImplDecl::Create(Context, CurContext, AtLoc, PropertyLoc,
@@ -523,6 +765,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
PIDecl->setGetterCXXConstructor(ResExpr);
}
}
+ if (property->hasAttr<NSReturnsNotRetainedAttr>() &&
+ !getterMethod->hasAttr<NSReturnsNotRetainedAttr>()) {
+ Diag(getterMethod->getLocation(),
+ diag::warn_property_getter_owning_mismatch);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
}
if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) {
setterMethod->createImplicitParams(Context, IDecl);
@@ -632,10 +880,19 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
!= (SAttr & ObjCPropertyDecl::OBJC_PR_copy))
Diag(Property->getLocation(), diag::warn_property_attribute)
<< Property->getDeclName() << "copy" << inheritedName;
- else if ((CAttr & ObjCPropertyDecl::OBJC_PR_retain)
- != (SAttr & ObjCPropertyDecl::OBJC_PR_retain))
- Diag(Property->getLocation(), diag::warn_property_attribute)
- << Property->getDeclName() << "retain" << inheritedName;
+ else {
+ unsigned CAttrRetain =
+ (CAttr &
+ (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ unsigned SAttrRetain =
+ (SAttr &
+ (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ bool CStrong = (CAttrRetain != 0);
+ bool SStrong = (SAttrRetain != 0);
+ if (CStrong != SStrong)
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "retain (or strong)" << inheritedName;
+ }
if ((CAttr & ObjCPropertyDecl::OBJC_PR_nonatomic)
!= (SAttr & ObjCPropertyDecl::OBJC_PR_nonatomic))
@@ -653,13 +910,16 @@ Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
QualType RHSType =
Context.getCanonicalType(Property->getType());
- if (!Context.typesAreCompatible(LHSType, RHSType)) {
- // FIXME: Incorporate this test with typesAreCompatible.
- if (LHSType->isObjCQualifiedIdType() && RHSType->isObjCQualifiedIdType())
- if (Context.ObjCQualifiedIdTypesAreCompatible(LHSType, RHSType, false))
- return;
- Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
- << Property->getType() << SuperProperty->getType() << inheritedName;
+ if (!Context.propertyTypesAreCompatible(LHSType, RHSType)) {
+ // Do cases not handled in above.
+ // FIXME. For future support of covariant property types, revisit this.
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ if (!isObjCPointerConversion(RHSType, LHSType,
+ ConvertedType, IncompatibleObjC) ||
+ IncompatibleObjC)
+ Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
+ << Property->getType() << SuperProperty->getType() << inheritedName;
}
}
@@ -1050,7 +1310,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
// Is there a matching propery synthesize/dynamic?
if (Prop->isInvalidDecl() ||
Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional ||
- PropImplMap.count(Prop))
+ PropImplMap.count(Prop) || Prop->hasAttr<UnavailableAttr>())
continue;
if (!InsMap.count(Prop->getGetterName())) {
Diag(Prop->getLocation(),
@@ -1135,6 +1395,35 @@ Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
}
}
+void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) {
+ if (getLangOptions().getGCMode() == LangOptions::GCOnly)
+ return;
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if (PD && !PD->hasAttr<NSReturnsNotRetainedAttr>() &&
+ !D->getInstanceMethod(PD->getGetterName())) {
+ ObjCMethodDecl *method = PD->getGetterMethodDecl();
+ if (!method)
+ continue;
+ ObjCMethodFamily family = method->getMethodFamily();
+ if (family == OMF_alloc || family == OMF_copy ||
+ family == OMF_mutableCopy || family == OMF_new) {
+ if (getLangOptions().ObjCAutoRefCount)
+ Diag(PID->getLocation(), diag::err_ownin_getter_rule);
+ else
+ Diag(PID->getLocation(), diag::warn_ownin_getter_rule);
+ Diag(PD->getLocation(), diag::note_property_declare);
+ }
+ }
+ }
+}
+
/// AddPropertyAttrs - Propagates attributes from a property to the
/// implicitly-declared getter or setter for that property.
static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
@@ -1214,6 +1503,9 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
// and the real context should be the same.
if (lexicalDC)
GetterMethod->setLexicalDeclContext(lexicalDC);
+ if (property->hasAttr<NSReturnsNotRetainedAttr>())
+ GetterMethod->addAttr(
+ ::new (Context) NSReturnsNotRetainedAttr(Loc, Context));
} else
// A user declared getter will be synthesize when @synthesize of
// the property with the same name is seen in the @implementation
@@ -1245,7 +1537,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod,
Loc, Loc,
property->getIdentifier(),
- property->getType(),
+ property->getType().getUnqualifiedType(),
/*TInfo=*/0,
SC_None,
SC_None,
@@ -1287,7 +1579,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
SourceLocation Loc,
unsigned &Attributes) {
// FIXME: Improve the reported location.
- if (!PDecl)
+ if (!PDecl || PDecl->isInvalidDecl())
return;
ObjCPropertyDecl *PropertyDecl = cast<ObjCPropertyDecl>(PDecl);
@@ -1297,12 +1589,16 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
(Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain))) {
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong))) {
const char * which = (Attributes & ObjCDeclSpec::DQ_PR_readwrite) ?
"readwrite" :
(Attributes & ObjCDeclSpec::DQ_PR_assign) ?
"assign" :
+ (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) ?
+ "unsafe_unretained" :
(Attributes & ObjCDeclSpec::DQ_PR_copy) ?
"copy" : "retain";
@@ -1313,14 +1609,15 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
}
// Check for copy or retain on non-object types.
- if ((Attributes & (ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain)) &&
- !PropertyTy->isObjCObjectPointerType() &&
- !PropertyTy->isBlockPointerType() &&
- !Context.isObjCNSObjectType(PropertyTy) &&
+ if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) &&
+ !PropertyTy->isObjCRetainableType() &&
!PropertyDecl->getAttr<ObjCNSObjectAttr>()) {
Diag(Loc, diag::err_objc_property_requires_object)
- << (Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain");
- Attributes &= ~(ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain);
+ << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" :
+ Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
+ Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong);
}
// Check for more than one of { assign, copy, retain }.
@@ -1335,18 +1632,75 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
<< "assign" << "retain";
Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
}
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (getLangOptions().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "copy";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (getLangOptions().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
} else if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "copy" << "retain";
Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
}
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ }
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "retain" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "strong" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
}
// Warn if user supplied no assignment attribute, property is
// readwrite, and this is an object type.
if (!(Attributes & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_copy |
- ObjCDeclSpec::DQ_PR_retain)) &&
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong |
+ ObjCDeclSpec::DQ_PR_weak)) &&
!(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
PropertyTy->isObjCObjectPointerType()) {
// Skip this warning in gc-only mode.
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 4bba6f8877bf..437b2b5c6d43 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -38,8 +38,10 @@ using namespace sema;
/// function.
static ExprResult
CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn,
- SourceLocation Loc = SourceLocation()) {
- ExprResult E = S.Owned(new (S.Context) DeclRefExpr(Fn, Fn->getType(), VK_LValue, Loc));
+ SourceLocation Loc = SourceLocation(),
+ const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
+ ExprResult E = S.Owned(new (S.Context) DeclRefExpr(Fn, Fn->getType(),
+ VK_LValue, Loc, LocInfo));
E = S.DefaultFunctionArrayConversion(E.take());
if (E.isInvalid())
return ExprError();
@@ -49,7 +51,8 @@ CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn,
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
- bool CStyle);
+ bool CStyle,
+ bool AllowObjCWritebackConversion);
static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
@@ -106,6 +109,7 @@ GetConversionCategory(ImplicitConversionKind Kind) {
ICC_Conversion,
ICC_Conversion,
ICC_Conversion,
+ ICC_Conversion,
ICC_Conversion
};
return Category[(int)Kind];
@@ -138,7 +142,8 @@ ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind) {
ICR_Conversion,
ICR_Complex_Real_Conversion,
ICR_Conversion,
- ICR_Conversion
+ ICR_Conversion,
+ ICR_Writeback_Conversion
};
return Rank[(int)Kind];
}
@@ -170,6 +175,7 @@ const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Complex-real conversion",
"Block Pointer conversion",
"Transparent Union Conversion"
+ "Writeback conversion"
};
return Name[Kind];
}
@@ -181,12 +187,14 @@ void StandardConversionSequence::setAsIdentityConversion() {
Second = ICK_Identity;
Third = ICK_Identity;
DeprecatedStringLiteralToCharPtr = false;
+ QualificationIncludesObjCLifetime = false;
ReferenceBinding = false;
DirectBinding = false;
IsLvalueReference = true;
BindsToFunctionLvalue = false;
BindsToRvalue = false;
BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ObjCLifetimeConversionBinding = false;
CopyConstructor = 0;
}
@@ -730,6 +738,15 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return false;
}
+/// \brief Checks availability of the function depending on the current
+/// function context. Inside an unavailable function, unavailability is ignored.
+///
+/// \returns true if \arg FD is unavailable and current context is inside
+/// an available function, false otherwise.
+bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
+ return FD->isUnavailable() && !cast<Decl>(CurContext)->isUnavailable();
+}
+
/// TryImplicitConversion - Attempt to perform an implicit conversion
/// from the given expression (Expr) to the given type (ToType). This
/// function returns an implicit conversion sequence that can be used
@@ -753,15 +770,20 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
/// not permitted.
/// If @p AllowExplicit, then explicit user-defined conversions are
/// permitted.
+///
+/// \param AllowObjCWritebackConversion Whether we allow the Objective-C
+/// writeback conversion, which allows __autoreleasing id* parameters to
+/// be initialized with __strong id* or __weak id* arguments.
static ImplicitConversionSequence
TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
- bool CStyle) {
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
ImplicitConversionSequence ICS;
if (IsStandardConversion(S, From, ToType, InOverloadResolution,
- ICS.Standard, CStyle)) {
+ ICS.Standard, CStyle, AllowObjCWritebackConversion)){
ICS.setStandard();
return ICS;
}
@@ -867,24 +889,17 @@ TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
return ICS;
}
-bool Sema::TryImplicitConversion(InitializationSequence &Sequence,
- const InitializedEntity &Entity,
- Expr *Initializer,
- bool SuppressUserConversions,
- bool AllowExplicitConversions,
- bool InOverloadResolution,
- bool CStyle) {
- ImplicitConversionSequence ICS
- = clang::TryImplicitConversion(*this, Initializer, Entity.getType(),
- SuppressUserConversions,
- AllowExplicitConversions,
- InOverloadResolution,
- CStyle);
- if (ICS.isBad()) return true;
-
- // Perform the actual conversion.
- Sequence.AddConversionSequenceStep(ICS, Entity.getType());
- return false;
+ImplicitConversionSequence
+Sema::TryImplicitConversion(Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ return clang::TryImplicitConversion(*this, From, ToType,
+ SuppressUserConversions, AllowExplicit,
+ InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion);
}
/// PerformImplicitConversion - Perform an implicit conversion of the
@@ -903,18 +918,25 @@ ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action, bool AllowExplicit,
ImplicitConversionSequence& ICS) {
+ // Objective-C ARC: Determine whether we will allow the writeback conversion.
+ bool AllowObjCWritebackConversion
+ = getLangOptions().ObjCAutoRefCount &&
+ (Action == AA_Passing || Action == AA_Sending);
+
+
ICS = clang::TryImplicitConversion(*this, From, ToType,
/*SuppressUserConversions=*/false,
AllowExplicit,
/*InOverloadResolution=*/false,
- /*CStyle=*/false);
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
/// \brief Determine whether the conversion from FromType to ToType is a valid
/// conversion that strips "noreturn" off the nested function type.
-static bool IsNoReturnConversion(ASTContext &Context, QualType FromType,
- QualType ToType, QualType &ResultTy) {
+bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType,
+ QualType &ResultTy) {
if (Context.hasSameUnqualifiedType(FromType, ToType))
return false;
@@ -1016,7 +1038,8 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
- bool CStyle) {
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
QualType FromType = From->getType();
// Standard conversions (C++ [conv])
@@ -1054,7 +1077,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
S.ExtractUnqualifiedFunctionType(ToType), FromType)) {
QualType resultTy;
// if the function type matches except for [[noreturn]], it's ok
- if (!IsNoReturnConversion(S.Context, FromType,
+ if (!S.IsNoReturnConversion(FromType,
S.ExtractUnqualifiedFunctionType(ToType), resultTy))
// otherwise, only a boolean conversion is standard
if (!ToType->isBooleanType())
@@ -1123,6 +1146,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// conversion (4.4). (C++ 4.2p2)
SCS.Second = ICK_Identity;
SCS.Third = ICK_Qualification;
+ SCS.QualificationIncludesObjCLifetime = false;
SCS.setAllToTypes(FromType);
return true;
}
@@ -1199,7 +1223,10 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.Second = ICK_Floating_Integral;
FromType = ToType.getUnqualifiedType();
} else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) {
- SCS.Second = ICK_Block_Pointer_Conversion;
+ SCS.Second = ICK_Block_Pointer_Conversion;
+ } else if (AllowObjCWritebackConversion &&
+ S.isObjCWritebackConversion(FromType, ToType, FromType)) {
+ SCS.Second = ICK_Writeback_Conversion;
} else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution,
FromType, IncompatibleObjC)) {
// Pointer conversions (C++ 4.10).
@@ -1218,7 +1245,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// Compatible conversions (Clang extension for C function overloading)
SCS.Second = ICK_Compatible_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if (IsNoReturnConversion(S.Context, FromType, ToType, FromType)) {
+ } else if (S.IsNoReturnConversion(FromType, ToType, FromType)) {
// Treat a conversion that strips "noreturn" as an identity conversion.
SCS.Second = ICK_NoReturn_Adjustment;
} else if (IsTransparentUnionStandardConversion(S, From, ToType,
@@ -1235,8 +1262,11 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
QualType CanonFrom;
QualType CanonTo;
// The third conversion can be a qualification conversion (C++ 4p1).
- if (S.IsQualificationConversion(FromType, ToType, CStyle)) {
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(FromType, ToType, CStyle,
+ ObjCLifetimeConversion)) {
SCS.Third = ICK_Qualification;
+ SCS.QualificationIncludesObjCLifetime = ObjCLifetimeConversion;
FromType = ToType;
CanonFrom = S.Context.getCanonicalType(FromType);
CanonTo = S.Context.getCanonicalType(ToType);
@@ -1253,7 +1283,8 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
if (CanonFrom.getLocalUnqualifiedType()
== CanonTo.getLocalUnqualifiedType() &&
(CanonFrom.getLocalCVRQualifiers() != CanonTo.getLocalCVRQualifiers()
- || CanonFrom.getObjCGCAttr() != CanonTo.getObjCGCAttr())) {
+ || CanonFrom.getObjCGCAttr() != CanonTo.getObjCGCAttr()
+ || CanonFrom.getObjCLifetime() != CanonTo.getObjCLifetime())) {
FromType = ToType;
CanonFrom = CanonTo;
}
@@ -1284,7 +1315,8 @@ IsTransparentUnionStandardConversion(Sema &S, Expr* From,
for (RecordDecl::field_iterator it = UD->field_begin(),
itend = UD->field_end();
it != itend; ++it) {
- if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS, CStyle)) {
+ if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
+ CStyle, /*ObjCWritebackConversion=*/false)) {
ToType = it->getType();
return true;
}
@@ -1479,16 +1511,18 @@ bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
/// same type qualifiers as FromPtr has on its pointee type. ToType,
/// if non-empty, will be a pointer to ToType that may or may not have
/// the right set of qualifiers on its pointee.
+///
static QualType
BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
QualType ToPointee, QualType ToType,
- ASTContext &Context) {
+ ASTContext &Context,
+ bool StripObjCLifetime = false) {
assert((FromPtr->getTypeClass() == Type::Pointer ||
FromPtr->getTypeClass() == Type::ObjCObjectPointer) &&
"Invalid similarly-qualified pointer type");
- /// \brief Conversions to 'id' subsume cv-qualifier conversions.
- if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
+ /// Conversions to 'id' subsume cv-qualifier conversions.
+ if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
return ToType.getUnqualifiedType();
QualType CanonFromPointee
@@ -1496,6 +1530,9 @@ BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
QualType CanonToPointee = Context.getCanonicalType(ToPointee);
Qualifiers Quals = CanonFromPointee.getQualifiers();
+ if (StripObjCLifetime)
+ Quals.removeObjCLifetime();
+
// Exact qualifier match -> return the pointer type we're converting to.
if (CanonToPointee.getLocalQualifiers() == Quals) {
// ToType is exactly what we need. Return it.
@@ -1599,7 +1636,8 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
// Beyond this point, both types need to be pointers
// , including objective-c pointers.
QualType ToPointeeType = ToTypePtr->getPointeeType();
- if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType()) {
+ if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
+ !getLangOptions().ObjCAutoRefCount) {
ConvertedType = BuildSimilarlyQualifiedPointerType(
FromType->getAs<ObjCObjectPointerType>(),
ToPointeeType,
@@ -1624,7 +1662,8 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
ToPointeeType->isVoidType()) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
- ToType, Context);
+ ToType, Context,
+ /*StripObjCLifetime=*/true);
return true;
}
@@ -1814,6 +1853,7 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
ToPointeeType->getAs<ObjCObjectPointerType>() &&
isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
IncompatibleObjC)) {
+
ConvertedType = Context.getPointerType(ConvertedType);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
@@ -1885,6 +1925,73 @@ bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
return false;
}
+/// \brief Determine whether this is an Objective-C writeback conversion,
+/// used for parameter passing when performing automatic reference counting.
+///
+/// \param FromType The type we're converting form.
+///
+/// \param ToType The type we're converting to.
+///
+/// \param ConvertedType The type that will be produced after applying
+/// this conversion.
+bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
+ QualType &ConvertedType) {
+ if (!getLangOptions().ObjCAutoRefCount ||
+ Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // Parameter must be a pointer to __autoreleasing (with no other qualifiers).
+ QualType ToPointee;
+ if (const PointerType *ToPointer = ToType->getAs<PointerType>())
+ ToPointee = ToPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers ToQuals = ToPointee.getQualifiers();
+ if (!ToPointee->isObjCLifetimeType() ||
+ ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
+ !ToQuals.withoutObjCGLifetime().empty())
+ return false;
+
+ // Argument must be a pointer to __strong to __weak.
+ QualType FromPointee;
+ if (const PointerType *FromPointer = FromType->getAs<PointerType>())
+ FromPointee = FromPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers FromQuals = FromPointee.getQualifiers();
+ if (!FromPointee->isObjCLifetimeType() ||
+ (FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
+ FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
+ return false;
+
+ // Make sure that we have compatible qualifiers.
+ FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ if (!ToQuals.compatiblyIncludes(FromQuals))
+ return false;
+
+ // Remove qualifiers from the pointee type we're converting from; they
+ // aren't used in the compatibility check belong, and we'll be adding back
+ // qualifiers (with __autoreleasing) if the compatibility check succeeds.
+ FromPointee = FromPointee.getUnqualifiedType();
+
+ // The unqualified form of the pointee types must be compatible.
+ ToPointee = ToPointee.getUnqualifiedType();
+ bool IncompatibleObjC;
+ if (Context.typesAreCompatible(FromPointee, ToPointee))
+ FromPointee = ToPointee;
+ else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
+ IncompatibleObjC))
+ return false;
+
+ /// \brief Construct the type we're converting to, which is a pointer to
+ /// __autoreleasing pointee.
+ FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
+ ConvertedType = Context.getPointerType(FromPointee);
+ return true;
+}
+
bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType) {
QualType ToPointeeType;
@@ -2178,12 +2285,17 @@ bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
/// IsQualificationConversion - Determines whether the conversion from
/// an rvalue of type FromType to ToType is a qualification conversion
/// (C++ 4.4).
+///
+/// \param ObjCLifetimeConversion Output parameter that will be set to indicate
+/// when the qualification conversion involves a change in the Objective-C
+/// object lifetime.
bool
Sema::IsQualificationConversion(QualType FromType, QualType ToType,
- bool CStyle) {
+ bool CStyle, bool &ObjCLifetimeConversion) {
FromType = Context.getCanonicalType(FromType);
ToType = Context.getCanonicalType(ToType);
-
+ ObjCLifetimeConversion = false;
+
// If FromType and ToType are the same type, this is not a
// qualification conversion.
if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType())
@@ -2206,6 +2318,21 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType,
Qualifiers FromQuals = FromType.getQualifiers();
Qualifiers ToQuals = ToType.getQualifiers();
+ // Objective-C ARC:
+ // Check Objective-C lifetime conversions.
+ if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() &&
+ UnwrappedAnyPointer) {
+ if (ToQuals.compatiblyIncludesObjCLifetime(FromQuals)) {
+ ObjCLifetimeConversion = true;
+ FromQuals.removeObjCLifetime();
+ ToQuals.removeObjCLifetime();
+ } else {
+ // Qualification conversions cannot cast between different
+ // Objective-C lifetime qualifiers.
+ return false;
+ }
+ }
+
// Allow addition/removal of GC attributes but not changing GC attributes.
if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() &&
(!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) {
@@ -2713,6 +2840,15 @@ CompareStandardConversionSequences(Sema &S,
QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
if (UnqualT1 == UnqualT2) {
+ // Objective-C++ ARC: If the references refer to objects with different
+ // lifetimes, prefer bindings that don't change lifetime.
+ if (SCS1.ObjCLifetimeConversionBinding !=
+ SCS2.ObjCLifetimeConversionBinding) {
+ return SCS1.ObjCLifetimeConversionBinding
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
// If the type is an array type, promote the element qualifiers to the
// type for comparison.
if (isa<ArrayType>(T1) && T1Quals)
@@ -2722,7 +2858,7 @@ CompareStandardConversionSequences(Sema &S,
if (T2.isMoreQualifiedThan(T1))
return ImplicitConversionSequence::Better;
else if (T1.isMoreQualifiedThan(T2))
- return ImplicitConversionSequence::Worse;
+ return ImplicitConversionSequence::Worse;
}
}
@@ -2770,6 +2906,17 @@ CompareQualificationConversions(Sema &S,
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
+
+ // Objective-C++ ARC:
+ // Prefer qualification conversions not involving a change in lifetime
+ // to qualification conversions that do not change lifetime.
+ if (SCS1.QualificationIncludesObjCLifetime !=
+ SCS2.QualificationIncludesObjCLifetime) {
+ Result = SCS1.QualificationIncludesObjCLifetime
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
@@ -3039,7 +3186,8 @@ Sema::ReferenceCompareResult
Sema::CompareReferenceRelationship(SourceLocation Loc,
QualType OrigT1, QualType OrigT2,
bool &DerivedToBase,
- bool &ObjCConversion) {
+ bool &ObjCConversion,
+ bool &ObjCLifetimeConversion) {
assert(!OrigT1->isReferenceType() &&
"T1 must be the pointee type of the reference type");
assert(!OrigT2->isReferenceType() && "T2 cannot be a reference type");
@@ -3056,6 +3204,7 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
// T1 is a base class of T2.
DerivedToBase = false;
ObjCConversion = false;
+ ObjCLifetimeConversion = false;
if (UnqualT1 == UnqualT2) {
// Nothing to do.
} else if (!RequireCompleteType(Loc, OrigT2, PDiag()) &&
@@ -3090,9 +3239,16 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
// qualifiers when performing these computations, so that e.g., an int in
// address space 1 is not reference-compatible with an int in address
// space 2.
+ if (T1Quals.getObjCLifetime() != T2Quals.getObjCLifetime() &&
+ T1Quals.compatiblyIncludesObjCLifetime(T2Quals)) {
+ T1Quals.removeObjCLifetime();
+ T2Quals.removeObjCLifetime();
+ ObjCLifetimeConversion = true;
+ }
+
if (T1Quals == T2Quals)
return Ref_Compatible;
- else if (T1.isMoreQualifiedThan(T2))
+ else if (T1Quals.compatiblyIncludes(T2Quals))
return Ref_Compatible_With_Added_Qualification;
else
return Ref_Related;
@@ -3135,13 +3291,14 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
if (AllowRvalues) {
bool DerivedToBase = false;
bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
if (!ConvTemplate &&
S.CompareReferenceRelationship(
DeclLoc,
Conv->getConversionType().getNonReferenceType()
.getUnqualifiedType(),
DeclType.getNonReferenceType().getUnqualifiedType(),
- DerivedToBase, ObjCConversion) ==
+ DerivedToBase, ObjCConversion, ObjCLifetimeConversion) ==
Sema::Ref_Incompatible)
continue;
} else {
@@ -3242,10 +3399,11 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
bool isRValRef = DeclType->isRValueReferenceType();
bool DerivedToBase = false;
bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
Expr::Classification InitCategory = Init->Classify(S.Context);
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase,
- ObjCConversion);
+ ObjCConversion, ObjCLifetimeConversion);
// C++0x [dcl.init.ref]p5:
@@ -3283,6 +3441,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
ICS.Standard.BindsToRvalue = false;
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
ICS.Standard.CopyConstructor = 0;
// Nothing more to do: the inaccessibility/ambiguity check for
@@ -3328,7 +3487,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// -- If the initializer expression
//
// -- is an xvalue, class prvalue, array prvalue or function
- // lvalue and "cv1T1" is reference-compatible with "cv2 T2", or
+ // lvalue and "cv1 T1" is reference-compatible with "cv2 T2", or
if (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification &&
(InitCategory.isXValue() ||
(InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
@@ -3356,6 +3515,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
ICS.Standard.BindsToRvalue = InitCategory.isRValue();
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
ICS.Standard.CopyConstructor = 0;
return ICS;
}
@@ -3398,7 +3558,17 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// we would be reference-compatible or reference-compatible with
// added qualification. But that wasn't the case, so the reference
// initialization fails.
- return ICS;
+ //
+ // Note that we only want to check address spaces and cvr-qualifiers here.
+ // ObjC GC and lifetime qualifiers aren't important.
+ Qualifiers T1Quals = T1.getQualifiers();
+ Qualifiers T2Quals = T2.getQualifiers();
+ T1Quals.removeObjCGCAttr();
+ T1Quals.removeObjCLifetime();
+ T2Quals.removeObjCGCAttr();
+ T2Quals.removeObjCLifetime();
+ if (!T1Quals.compatiblyIncludes(T2Quals))
+ return ICS;
}
// If at least one of the types is a class type, the types are not
@@ -3429,7 +3599,8 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
/*AllowExplicit=*/false,
/*InOverloadResolution=*/false,
- /*CStyle=*/false);
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
// Of course, that's still a reference binding.
if (ICS.isStandard()) {
@@ -3438,12 +3609,14 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
ICS.Standard.BindsToRvalue = true;
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = false;
} else if (ICS.isUserDefined()) {
ICS.UserDefined.After.ReferenceBinding = true;
ICS.Standard.IsLvalueReference = !isRValRef;
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
ICS.Standard.BindsToRvalue = true;
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = false;
}
return ICS;
@@ -3458,7 +3631,8 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
static ImplicitConversionSequence
TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
- bool InOverloadResolution) {
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion) {
if (ToType->isReferenceType())
return TryReferenceInit(S, From, ToType,
/*FIXME:*/From->getLocStart(),
@@ -3469,7 +3643,8 @@ TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
SuppressUserConversions,
/*AllowExplicit=*/false,
InOverloadResolution,
- /*CStyle=*/false);
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion);
}
/// TryObjectArgumentInitialization - Try to initialize the object
@@ -3659,7 +3834,8 @@ TryContextuallyConvertToBool(Sema &S, Expr *From) {
/*SuppressUserConversions=*/false,
/*AllowExplicit=*/true,
/*InOverloadResolution=*/false,
- /*CStyle=*/false);
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
}
/// PerformContextuallyConvertToBool - Perform a contextual conversion
@@ -3686,7 +3862,8 @@ TryContextuallyConvertToObjCId(Sema &S, Expr *From) {
/*SuppressUserConversions=*/false,
/*AllowExplicit=*/true,
/*InOverloadResolution=*/false,
- /*CStyle=*/false);
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
}
/// PerformContextuallyConvertToObjCId - Perform a contextual conversion
@@ -3980,7 +4157,9 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
Candidate.Conversions[ArgIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
SuppressUserConversions,
- /*InOverloadResolution=*/true);
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOptions().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -4153,7 +4332,9 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
Candidate.Conversions[ArgIdx + 1]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
SuppressUserConversions,
- /*InOverloadResolution=*/true);
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOptions().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -4356,25 +4537,26 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
CK_FunctionToPointerDecay,
&ConversionRef, VK_RValue);
- QualType CallResultType
- = Conversion->getConversionType().getNonLValueExprType(Context);
- if (RequireCompleteType(From->getLocStart(), CallResultType, 0)) {
+ QualType ConversionType = Conversion->getConversionType();
+ if (RequireCompleteType(From->getLocStart(), ConversionType, 0)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_final_conversion;
return;
}
- ExprValueKind VK = Expr::getValueKindForType(Conversion->getConversionType());
+ ExprValueKind VK = Expr::getValueKindForType(ConversionType);
// Note that it is safe to allocate CallExpr on the stack here because
// there are 0 arguments (i.e., nothing is allocated using ASTContext's
// allocator).
+ QualType CallResultType = ConversionType.getNonLValueExprType(Context);
CallExpr Call(Context, &ConversionFn, 0, 0, CallResultType, VK,
From->getLocStart());
ImplicitConversionSequence ICS =
TryCopyInitialization(*this, &Call, ToType,
/*SuppressUserConversions=*/true,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/false);
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
@@ -4544,7 +4726,9 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
Candidate.Conversions[ArgIdx + 1]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
/*SuppressUserConversions=*/false,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/
+ getLangOptions().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -4662,7 +4846,9 @@ void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
Candidate.Conversions[ArgIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamTys[ArgIdx],
ArgIdx == 0 && IsAssignmentOperator,
- /*InOverloadResolution=*/false);
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/
+ getLangOptions().ObjCAutoRefCount);
}
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
@@ -6417,7 +6603,8 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
// Best is the best viable function.
if (Best->Function &&
- (Best->Function->isDeleted() || Best->Function->isUnavailable()))
+ (Best->Function->isDeleted() ||
+ S.isFunctionConsideredUnavailable(Best->Function)))
return OR_Deleted;
return OR_Success;
@@ -6619,6 +6806,17 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
return;
}
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
<< (unsigned) FnKind << FnDesc
@@ -6901,7 +7099,8 @@ void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
FunctionDecl *Fn = Cand->Function;
// Note deleted candidates, but only if they're viable.
- if (Cand->Viable && (Fn->isDeleted() || Fn->isUnavailable())) {
+ if (Cand->Viable && (Fn->isDeleted() ||
+ S.isFunctionConsideredUnavailable(Fn))) {
std::string FnDesc;
OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc);
@@ -7152,7 +7351,9 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
= TryCopyInitialization(S, Args[ConvIdx],
Cand->BuiltinTypes.ParamTypes[ConvIdx],
SuppressUserConversions,
- /*InOverloadResolution*/ true);
+ /*InOverloadResolution*/ true,
+ /*AllowObjCWritebackConversion=*/
+ S.getLangOptions().ObjCAutoRefCount);
return;
}
@@ -7163,7 +7364,9 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
Cand->Conversions[ConvIdx]
= TryCopyInitialization(S, Args[ArgIdx], Proto->getArgType(ArgIdx),
SuppressUserConversions,
- /*InOverloadResolution=*/true);
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ S.getLangOptions().ObjCAutoRefCount);
else
Cand->Conversions[ConvIdx].setEllipsis();
}
@@ -7407,8 +7610,8 @@ private:
QualType ResultTy;
if (Context.hasSameUnqualifiedType(TargetFunctionType,
FunDecl->getType()) ||
- IsNoReturnConversion(Context, FunDecl->getType(), TargetFunctionType,
- ResultTy)) {
+ S.IsNoReturnConversion(FunDecl->getType(), TargetFunctionType,
+ ResultTy)) {
Matches.push_back(std::make_pair(CurAccessFunPair,
cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
FoundNonTemplateFunction = true;
@@ -7742,13 +7945,17 @@ static void AddOverloadedCallCandidate(Sema &S,
TemplateArgumentListInfo *ExplicitTemplateArgs,
Expr **Args, unsigned NumArgs,
OverloadCandidateSet &CandidateSet,
- bool PartialOverloading) {
+ bool PartialOverloading,
+ bool KnownValid) {
NamedDecl *Callee = FoundDecl.getDecl();
if (isa<UsingShadowDecl>(Callee))
Callee = cast<UsingShadowDecl>(Callee)->getTargetDecl();
if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Callee)) {
- assert(!ExplicitTemplateArgs && "Explicit template arguments?");
+ if (ExplicitTemplateArgs) {
+ assert(!KnownValid && "Explicit template arguments?");
+ return;
+ }
S.AddOverloadCandidate(Func, FoundDecl, Args, NumArgs, CandidateSet,
false, PartialOverloading);
return;
@@ -7762,9 +7969,7 @@ static void AddOverloadedCallCandidate(Sema &S,
return;
}
- assert(false && "unhandled case in overloaded call candidate");
-
- // do nothing?
+ assert(!KnownValid && "unhandled case in overloaded call candidate");
}
/// \brief Add the overload candidates named by callee and/or found by argument
@@ -7815,7 +8020,7 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
E = ULE->decls_end(); I != E; ++I)
AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs,
Args, NumArgs, CandidateSet,
- PartialOverloading);
+ PartialOverloading, /*KnownValid*/ true);
if (ULE->requiresADL())
AddArgumentDependentLookupCandidates(ULE->getName(), /*Operator*/ false,
@@ -7857,13 +8062,15 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
AddOverloadedCallCandidate(SemaRef, I.getPair(),
ExplicitTemplateArgs, Args, NumArgs,
- Candidates, false);
+ Candidates, false, /*KnownValid*/ false);
OverloadCandidateSet::iterator Best;
- if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success)
+ if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success) {
// No viable functions. Don't bother the user with notes for functions
// which don't work and shouldn't be found anyway.
+ R.clear();
return false;
+ }
// Find the namespaces where ADL would have looked, and suggest
// declaring the function there instead.
@@ -8673,7 +8880,10 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ResultTy = ResultTy.getNonLValueExprType(Context);
// Build the actual expression node.
- ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, LLoc);
+ DeclarationNameLoc LocInfo;
+ LocInfo.CXXOperatorName.BeginOpNameLoc = LLoc.getRawEncoding();
+ LocInfo.CXXOperatorName.EndOpNameLoc = RLoc.getRawEncoding();
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, LLoc, LocInfo);
if (FnExpr.isInvalid())
return ExprError();
@@ -8963,13 +9173,14 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
TheCall->getMethodDecl()->isPure()) {
const CXXMethodDecl *MD = TheCall->getMethodDecl();
- if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts()))
+ if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts())) {
Diag(MemExpr->getLocStart(),
diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
<< MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
<< MD->getParent()->getDeclName();
Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName();
+ }
}
return MaybeBindToTemporary(TheCall);
}
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index d7c0a543eede..65f431d46075 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -66,8 +66,30 @@ void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
// If we have an invalid decl, just return.
if (DG.isNull() || !DG.isSingleDecl()) return;
+ VarDecl *var = cast<VarDecl>(DG.getSingleDecl());
+
// suppress any potential 'unused variable' warning.
- DG.getSingleDecl()->setUsed();
+ var->setUsed();
+
+ // foreach variables are never actually initialized in the way that
+ // the parser came up with.
+ var->setInit(0);
+
+ // In ARC, we don't need to retain the iteration variable of a fast
+ // enumeration loop. Rather than actually trying to catch that
+ // during declaration processing, we remove the consequences here.
+ if (getLangOptions().ObjCAutoRefCount) {
+ QualType type = var->getType();
+
+ // Only do this if we inferred the lifetime. Inferred lifetime
+ // will show up as a local qualifier because explicit lifetime
+ // should have shown up as an AttributedType instead.
+ if (type.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Add 'const' and mark the variable as pseudo-strong.
+ var->setType(type.withConst());
+ var->setARCPseudoStrong(true);
+ }
+ }
}
void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
@@ -114,6 +136,10 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
}
}
} else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ if (getLangOptions().ObjCAutoRefCount && ME->isDelegateInitCall()) {
+ Diag(Loc, diag::err_arc_unused_init_message) << R1;
+ return;
+ }
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
Diag(Loc, diag::warn_unused_call) << R1 << R2 << "warn_unused_result";
@@ -951,14 +977,13 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
return StmtError(Diag((*DS->decl_begin())->getLocation(),
diag::err_toomany_element_decls));
- Decl *D = DS->getSingleDecl();
- FirstType = cast<ValueDecl>(D)->getType();
+ VarDecl *D = cast<VarDecl>(DS->getSingleDecl());
+ FirstType = D->getType();
// C99 6.8.5p3: The declaration part of a 'for' statement shall only
// declare identifiers for objects having storage class 'auto' or
// 'register'.
- VarDecl *VD = cast<VarDecl>(D);
- if (VD->isLocalVarDecl() && !VD->hasLocalStorage())
- return StmtError(Diag(VD->getLocation(),
+ if (!D->hasLocalStorage())
+ return StmtError(Diag(D->getLocation(),
diag::err_non_variable_decl_in_for));
} else {
Expr *FirstE = cast<Expr>(First);
@@ -1047,6 +1072,13 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
Decl->setTypeSourceInfo(InitTSI);
Decl->setType(InitTSI->getType());
+ // In ARC, infer lifetime.
+ // FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
+ // we're doing the equivalent of fast iteration.
+ if (SemaRef.getLangOptions().ObjCAutoRefCount &&
+ SemaRef.inferObjCARCLifetime(Decl))
+ Decl->setInvalidDecl();
+
SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false,
/*TypeMayContainAuto=*/false);
SemaRef.FinalizeDeclaration(Decl);
@@ -1374,6 +1406,9 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
if (LoopVar->isInvalidDecl())
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
}
+ } else {
+ // The range is implicitly used as a placeholder when it is dependent.
+ RangeVar->setUsed();
}
return Owned(new (Context) CXXForRangeStmt(RangeDS,
@@ -1508,7 +1543,8 @@ ExprResult
Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
- Expr *Value) {
+ Expr *Value,
+ bool AllowNRVO) {
// C++0x [class.copy]p33:
// When the criteria for elision of a copy operation are met or would
// be met save for the fact that the source object is a function
@@ -1516,7 +1552,8 @@ Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
// overload resolution to select the constructor for the copy is first
// performed as if the object were designated by an rvalue.
ExprResult Res = ExprError();
- if (NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true)) {
+ if (AllowNRVO &&
+ (NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true))) {
ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack,
Value->getType(), CK_LValueToRValue,
Value, VK_XValue);
@@ -1726,8 +1763,17 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (D != diag::ext_return_has_void_expr ||
!getLangOptions().CPlusPlus) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+
+ int FunctionKind = 0;
+ if (isa<ObjCMethodDecl>(CurDecl))
+ FunctionKind = 1;
+ else if (isa<CXXConstructorDecl>(CurDecl))
+ FunctionKind = 2;
+ else if (isa<CXXDestructorDecl>(CurDecl))
+ FunctionKind = 3;
+
Diag(ReturnLoc, D)
- << CurDecl->getDeclName() << isa<ObjCMethodDecl>(CurDecl)
+ << CurDecl->getDeclName() << FunctionKind
<< RetValExp->getSourceRange();
}
}
@@ -1756,7 +1802,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// overlap restriction of subclause 6.5.16.1 does not apply to the case of
// function return.
- // In C++ the return statement is handled via a copy initialization.
+ // In C++ the return statement is handled via a copy initialization,
// the C version of which boils down to CheckSingleAssignmentConstraints.
NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
@@ -1797,7 +1843,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (getLangOptions().CPlusPlus && FnRetType->isRecordType() &&
!CurContext->isDependentContext())
FunctionScopes.back()->Returns.push_back(Result);
-
+
return Owned(Result);
}
@@ -1955,7 +2001,7 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
llvm::StringRef Clobber = Literal->getString();
- if (!Context.Target.isValidGCCRegisterName(Clobber))
+ if (!Context.Target.isValidClobber(Clobber))
return StmtError(Diag(Literal->getLocStart(),
diag::err_asm_unknown_register_name) << Clobber);
}
@@ -2179,6 +2225,12 @@ Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
HandlerBlock));
}
+StmtResult
+Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) {
+ getCurFunction()->setHasBranchProtectedScope();
+ return Owned(new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body));
+}
+
namespace {
class TypeWithHandler {
@@ -2309,4 +2361,3 @@ Sema::ActOnSEHFinallyBlock(SourceLocation Loc,
assert(Block);
return Owned(SEHFinallyStmt::Create(Context,Loc,Block));
}
-
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 5d4caacd8124..3ac190e1d4c8 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -294,26 +294,31 @@ void Sema::LookupTemplateName(LookupResult &Found,
if (Found.empty() && !isDependent) {
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
- if (DeclarationName Corrected = CorrectTypo(Found, S, &SS, LookupCtx,
- false, CTC_CXXCasts)) {
+ Found.clear();
+ if (TypoCorrection Corrected = CorrectTypo(Found.getLookupNameInfo(),
+ Found.getLookupKind(), S, &SS,
+ LookupCtx, false,
+ CTC_CXXCasts)) {
+ Found.setLookupName(Corrected.getCorrection());
+ if (Corrected.getCorrectionDecl())
+ Found.addDecl(Corrected.getCorrectionDecl());
FilterAcceptableTemplateNames(Found);
if (!Found.empty()) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOptions()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_template_suggest)
- << Name << LookupCtx << Found.getLookupName() << SS.getRange()
- << FixItHint::CreateReplacement(Found.getNameLoc(),
- Found.getLookupName().getAsString());
+ << Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
else
Diag(Found.getNameLoc(), diag::err_no_template_suggest)
- << Name << Found.getLookupName()
- << FixItHint::CreateReplacement(Found.getNameLoc(),
- Found.getLookupName().getAsString());
+ << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
if (TemplateDecl *Template = Found.getAsSingle<TemplateDecl>())
Diag(Template->getLocation(), diag::note_previous_decl)
- << Template->getDeclName();
+ << CorrectedQuotedStr;
}
} else {
- Found.clear();
Found.setLookupName(Name);
}
}
@@ -1856,7 +1861,8 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
QualType Sema::CheckTemplateIdType(TemplateName Name,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
- DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ DependentTemplateName *DTN
+ = Name.getUnderlying().getAsDependentTemplateName();
if (DTN && DTN->isIdentifier())
// When building a template-id where the template-name is dependent,
// assume the template is a type template. Either our assumption is
@@ -1892,6 +1898,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
QualType CanonType;
+ bool InstantiationDependent = false;
if (TypeAliasTemplateDecl *AliasTemplate
= dyn_cast<TypeAliasTemplateDecl>(Template)) {
// Find the canonical type for this type alias template specialization.
@@ -1917,7 +1924,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
return QualType();
} else if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs)) {
+ TemplateArgs, InstantiationDependent)) {
// This class template specialization is a dependent
// type. Therefore, its canonical type is another class template
// specialization type that contains all of the converted
@@ -2357,8 +2364,20 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
return true;
// Add the converted template type argument.
- Converted.push_back(
- TemplateArgument(Context.getCanonicalType(Arg.getAsType())));
+ QualType ArgType = Context.getCanonicalType(Arg.getAsType());
+
+ // Objective-C ARC:
+ // If an explicitly-specified template argument type is a lifetime type
+ // with no lifetime qualifier, the __strong lifetime qualifier is inferred.
+ if (getLangOptions().ObjCAutoRefCount &&
+ ArgType->isObjCLifetimeType() &&
+ !ArgType.getObjCLifetime()) {
+ Qualifiers Qs;
+ Qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ ArgType = Context.getQualifiedType(ArgType, Qs);
+ }
+
+ Converted.push_back(TemplateArgument(ArgType));
return false;
}
@@ -2912,16 +2931,6 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
// arguments, just break out now and we'll fill in the argument pack below.
if ((*Param)->isTemplateParameterPack())
break;
-
- // If our template is a template template parameter that hasn't acquired
- // its proper context yet (e.g., because we're using the template template
- // parameter in the signature of a function template, before we've built
- // the function template itself), don't attempt substitution of default
- // template arguments at this point: we don't have enough context to
- // do it properly.
- if (isTemplateTemplateParameter &&
- Template->getDeclContext()->isTranslationUnit())
- break;
// We have a default template argument that we will use.
TemplateArgumentLoc Arg;
@@ -3307,8 +3316,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
QualType ArgType = Arg->getType();
// See through any implicit casts we added to fix the type.
- while (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
- Arg = Cast->getSubExpr();
+ Arg = Arg->IgnoreImpCasts();
// C++ [temp.arg.nontype]p1:
//
@@ -3321,7 +3329,6 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// expressed as & id-expression where the & is optional if
// the name refers to a function or array, or if the
// corresponding template-parameter is a reference; or
- DeclRefExpr *DRE = 0;
// In C++98/03 mode, give an extension warning on any extra parentheses.
// See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
@@ -3337,29 +3344,30 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
Arg = Parens->getSubExpr();
}
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
bool AddressTaken = false;
SourceLocation AddrOpLoc;
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
if (UnOp->getOpcode() == UO_AddrOf) {
- // Support &__uuidof(class_with_uuid) as a non-type template argument.
- // Very common in Microsoft COM headers.
- if (S.getLangOptions().Microsoft &&
- isa<CXXUuidofExpr>(UnOp->getSubExpr())) {
- Converted = TemplateArgument(ArgIn);
- return false;
- }
-
- DRE = dyn_cast<DeclRefExpr>(UnOp->getSubExpr());
+ Arg = UnOp->getSubExpr();
AddressTaken = true;
AddrOpLoc = UnOp->getOperatorLoc();
}
- } else {
- if (S.getLangOptions().Microsoft && isa<CXXUuidofExpr>(Arg)) {
- Converted = TemplateArgument(ArgIn);
- return false;
- }
- DRE = dyn_cast<DeclRefExpr>(Arg);
}
+
+ if (S.getLangOptions().Microsoft && isa<CXXUuidofExpr>(Arg)) {
+ Converted = TemplateArgument(ArgIn);
+ return false;
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
if (!DRE) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
@@ -3513,9 +3521,11 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
}
+ bool ObjCLifetimeConversion;
if (ParamType->isPointerType() &&
!ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType() &&
- S.IsQualificationConversion(ArgType, ParamType, false)) {
+ S.IsQualificationConversion(ArgType, ParamType, false,
+ ObjCLifetimeConversion)) {
// For pointer-to-object types, qualification conversions are
// permitted.
} else {
@@ -3552,10 +3562,10 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// We can't perform this conversion or binding.
if (ParamType->isReferenceType())
S.Diag(Arg->getLocStart(), diag::err_template_arg_no_ref_bind)
- << ParamType << Arg->getType() << Arg->getSourceRange();
+ << ParamType << ArgIn->getType() << Arg->getSourceRange();
else
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
- << Arg->getType() << ParamType << Arg->getSourceRange();
+ << ArgIn->getType() << ParamType << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
@@ -3599,6 +3609,10 @@ bool Sema::CheckTemplateArgumentPointerToMember(Expr *Arg,
Arg = Parens->getSubExpr();
}
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
// A pointer-to-member constant written &Class::member.
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
if (UnOp->getOpcode() == UO_AddrOf) {
@@ -3875,8 +3889,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return Owned(Arg);
}
+ bool ObjCLifetimeConversion;
if (IsQualificationConversion(ArgType, ParamType.getNonReferenceType(),
- false)) {
+ false, ObjCLifetimeConversion)) {
Arg = ImpCastExprToType(Arg, ParamType, CK_NoOp, CastCategory(Arg)).take();
} else if (!Context.hasSameUnqualifiedType(ArgType,
ParamType.getNonReferenceType())) {
@@ -3943,9 +3958,11 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// member, qualification conversions (4.4) are applied.
assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
+ bool ObjCLifetimeConversion;
if (Context.hasSameUnqualifiedType(ParamType, ArgType)) {
// Types match exactly: nothing more to do here.
- } else if (IsQualificationConversion(ArgType, ParamType, false)) {
+ } else if (IsQualificationConversion(ArgType, ParamType, false,
+ ObjCLifetimeConversion)) {
Arg = ImpCastExprToType(Arg, ParamType, CK_NoOp, CastCategory(Arg)).take();
} else {
// We can't perform this conversion.
@@ -4053,8 +4070,10 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
// We might need to perform a trailing qualification conversion, since
// the element type on the parameter could be more qualified than the
// element type in the expression we constructed.
+ bool ObjCLifetimeConversion;
if (IsQualificationConversion(((Expr*) RefExpr.get())->getType(),
- ParamType.getUnqualifiedType(), false))
+ ParamType.getUnqualifiedType(), false,
+ ObjCLifetimeConversion))
RefExpr = ImpCastExprToType(RefExpr.take(), ParamType.getUnqualifiedType(), CK_NoOp);
assert(!RefExpr.isInvalid() &&
@@ -4818,10 +4837,12 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Converted))
return true;
+ bool InstantiationDependent;
if (!Name.isDependent() &&
!TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs.getArgumentArray(),
- TemplateArgs.size())) {
+ TemplateArgs.size(),
+ InstantiationDependent)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< ClassTemplate->getDeclName();
isPartialSpecialization = false;
@@ -5346,7 +5367,7 @@ Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
/// explicitly provided as in, e.g., \c void sort<>(char*, char*);
/// as it anyway contains info on the angle brackets locations.
///
-/// \param PrevDecl the set of declarations that may be specialized by
+/// \param Previous the set of declarations that may be specialized by
/// this function specialization.
bool
Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
@@ -5410,13 +5431,12 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
FunctionTemplateSpecializationInfo *SpecInfo
= Specialization->getTemplateSpecializationInfo();
assert(SpecInfo && "Function template specialization info missing?");
- {
- // Note: do not overwrite location info if previous template
- // specialization kind was explicit.
- TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind();
- if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation)
- Specialization->setLocation(FD->getLocation());
- }
+
+ // Note: do not overwrite location info if previous template
+ // specialization kind was explicit.
+ TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind();
+ if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation)
+ Specialization->setLocation(FD->getLocation());
// FIXME: Check if the prior specialization has a point of instantiation.
// If so, we have run afoul of .
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 7d0ce8b2e71d..dcb4ff286060 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -12,7 +12,6 @@
#include "clang/Sema/Sema.h"
#include "clang/Sema/DeclSpec.h"
-#include "clang/Sema/SemaDiagnostic.h" // FIXME: temporary!
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/AST/ASTContext.h"
@@ -820,6 +819,11 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
ParamQs.hasAddressSpace())
return true;
+ // Mismatched (but not missing) Objective-C lifetime qualifiers.
+ if (ParamQs.getObjCLifetime() != ArgQs.getObjCLifetime() &&
+ ParamQs.hasObjCLifetime())
+ return true;
+
// CVR qualifier superset.
return (ParamQs.getCVRQualifiers() != ArgQs.getCVRQualifiers()) &&
((ParamQs.getCVRQualifiers() | ArgQs.getCVRQualifiers())
@@ -1010,6 +1014,17 @@ DeduceTemplateArguments(Sema &S,
DeducedQs.removeObjCGCAttr();
if (ParamQs.hasAddressSpace())
DeducedQs.removeAddressSpace();
+ if (ParamQs.hasObjCLifetime())
+ DeducedQs.removeObjCLifetime();
+
+ // Objective-C ARC:
+ // If template deduction would produce an argument type with lifetime type
+ // but no lifetime qualifier, the __strong lifetime qualifier is inferred.
+ if (S.getLangOptions().ObjCAutoRefCount &&
+ DeducedType->isObjCLifetimeType() &&
+ !DeducedQs.hasObjCLifetime())
+ DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong);
+
DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(),
DeducedQs);
@@ -1054,10 +1069,39 @@ DeduceTemplateArguments(Sema &S,
}
switch (Param->getTypeClass()) {
- // No deduction possible for these types
+ // Non-canonical types cannot appear here.
+#define NON_CANONICAL_TYPE(Class, Base) \
+ case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class);
+#define TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ llvm_unreachable("Type nodes handled above");
+
+ // These types cannot be used in templates or cannot be dependent, so
+ // deduction always fails.
case Type::Builtin:
+ case Type::VariableArray:
+ case Type::Vector:
+ case Type::FunctionNoProto:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
return Sema::TDK_NonDeducedMismatch;
+ // _Complex T [placeholder extension]
+ case Type::Complex:
+ if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>())
+ return DeduceTemplateArguments(S, TemplateParams,
+ cast<ComplexType>(Param)->getElementType(),
+ ComplexArg->getElementType(),
+ Info, Deduced, TDF);
+
+ return Sema::TDK_NonDeducedMismatch;
+
// T *
case Type::Pointer: {
QualType PointeeType;
@@ -1361,17 +1405,107 @@ DeduceTemplateArguments(Sema &S,
Deduced, 0);
}
+ // (clang extension)
+ //
+ // T __attribute__(((ext_vector_type(<integral constant>))))
+ case Type::ExtVector: {
+ const ExtVectorType *VectorParam = cast<ExtVectorType>(Param);
+ if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ // Make sure that the vectors have the same number of elements.
+ if (VectorParam->getNumElements() != VectorArg->getNumElements())
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Perform deduction on the element types.
+ return DeduceTemplateArguments(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced,
+ TDF);
+ }
+
+ if (const DependentSizedExtVectorType *VectorArg
+ = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ // We can't check the number of elements, since the argument has a
+ // dependent number of elements. This can only occur during partial
+ // ordering.
+
+ // Perform deduction on the element types.
+ return DeduceTemplateArguments(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced,
+ TDF);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // (clang extension)
+ //
+ // T __attribute__(((ext_vector_type(N))))
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *VectorParam
+ = cast<DependentSizedExtVectorType>(Param);
+
+ if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced,
+ TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
+ ArgSize = VectorArg->getNumElements();
+ return DeduceNonTypeTemplateArgument(S, NTTP, ArgSize, S.Context.IntTy,
+ false, Info, Deduced);
+ }
+
+ if (const DependentSizedExtVectorType *VectorArg
+ = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced,
+ TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ return DeduceNonTypeTemplateArgument(S, NTTP, VectorArg->getSizeExpr(),
+ Info, Deduced);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::DependentName:
+ case Type::UnresolvedUsing:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Auto:
+ case Type::DependentTemplateSpecialization:
+ case Type::PackExpansion:
// No template argument deduction for these types
return Sema::TDK_Success;
-
- default:
- break;
}
- // FIXME: Many more cases to go (to go).
return Sema::TDK_Success;
}
@@ -2189,15 +2323,108 @@ Sema::SubstituteExplicitTemplateArguments(
return TDK_Success;
}
+/// \brief Check whether the deduced argument type for a call to a function
+/// template matches the actual argument type per C++ [temp.deduct.call]p4.
+static bool
+CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
+ QualType DeducedA) {
+ ASTContext &Context = S.Context;
+
+ QualType A = OriginalArg.OriginalArgType;
+ QualType OriginalParamType = OriginalArg.OriginalParamType;
+
+ // Check for type equality (top-level cv-qualifiers are ignored).
+ if (Context.hasSameUnqualifiedType(A, DeducedA))
+ return false;
+
+ // Strip off references on the argument types; they aren't needed for
+ // the following checks.
+ if (const ReferenceType *DeducedARef = DeducedA->getAs<ReferenceType>())
+ DeducedA = DeducedARef->getPointeeType();
+ if (const ReferenceType *ARef = A->getAs<ReferenceType>())
+ A = ARef->getPointeeType();
+
+ // C++ [temp.deduct.call]p4:
+ // [...] However, there are three cases that allow a difference:
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
+ // the transformed A.
+ if (const ReferenceType *OriginalParamRef
+ = OriginalParamType->getAs<ReferenceType>()) {
+ // We don't want to keep the reference around any more.
+ OriginalParamType = OriginalParamRef->getPointeeType();
+
+ Qualifiers AQuals = A.getQualifiers();
+ Qualifiers DeducedAQuals = DeducedA.getQualifiers();
+ if (AQuals == DeducedAQuals) {
+ // Qualifiers match; there's nothing to do.
+ } else if (!DeducedAQuals.compatiblyIncludes(AQuals)) {
+ return true;
+ } else {
+ // Qualifiers are compatible, so have the argument type adopt the
+ // deduced argument type's qualifiers as if we had performed the
+ // qualification conversion.
+ A = Context.getQualifiedType(A.getUnqualifiedType(), DeducedAQuals);
+ }
+ }
+
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a qualification
+ // conversion.
+ //
+ // Also allow conversions which merely strip [[noreturn]] from function types
+ // (recursively) as an extension.
+ // FIXME: Currently, this doesn't place nicely with qualfication conversions.
+ bool ObjCLifetimeConversion = false;
+ QualType ResultTy;
+ if ((A->isAnyPointerType() || A->isMemberPointerType()) &&
+ (S.IsQualificationConversion(A, DeducedA, false,
+ ObjCLifetimeConversion) ||
+ S.IsNoReturnConversion(A, DeducedA, ResultTy)))
+ return false;
+
+
+ // - If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. [...]
+ // [...] Likewise, if P is a pointer to a class of the form
+ // simple-template-id, the transformed A can be a pointer to a
+ // derived class pointed to by the deduced A.
+ if (const PointerType *OriginalParamPtr
+ = OriginalParamType->getAs<PointerType>()) {
+ if (const PointerType *DeducedAPtr = DeducedA->getAs<PointerType>()) {
+ if (const PointerType *APtr = A->getAs<PointerType>()) {
+ if (A->getPointeeType()->isRecordType()) {
+ OriginalParamType = OriginalParamPtr->getPointeeType();
+ DeducedA = DeducedAPtr->getPointeeType();
+ A = APtr->getPointeeType();
+ }
+ }
+ }
+ }
+
+ if (Context.hasSameUnqualifiedType(A, DeducedA))
+ return false;
+
+ if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
+ S.IsDerivedFrom(A, DeducedA))
+ return false;
+
+ return true;
+}
+
/// \brief Finish template argument deduction for a function template,
/// checking the deduced template arguments for completeness and forming
/// the function template specialization.
+///
+/// \param OriginalCallArgs If non-NULL, the original call arguments against
+/// which the deduced argument types should be compared.
Sema::TemplateDeductionResult
Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
llvm::SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
- TemplateDeductionInfo &Info) {
+ TemplateDeductionInfo &Info,
+ llvm::SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs) {
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
@@ -2351,6 +2578,24 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
!Trap.hasErrorOccurred())
Info.take();
+ if (OriginalCallArgs) {
+ // C++ [temp.deduct.call]p4:
+ // In general, the deduction process attempts to find template argument
+ // values that will make the deduced A identical to A (after the type A
+ // is transformed as described above). [...]
+ for (unsigned I = 0, N = OriginalCallArgs->size(); I != N; ++I) {
+ OriginalCallArg OriginalArg = (*OriginalCallArgs)[I];
+ unsigned ParamIdx = OriginalArg.ArgIdx;
+
+ if (ParamIdx >= Specialization->getNumParams())
+ continue;
+
+ QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
+ if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA))
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+
// There may have been an error that did not prevent us from constructing a
// declaration. Mark the declaration invalid and return with a substitution
// failure.
@@ -2594,6 +2839,10 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
return false;
}
+static bool hasDeducibleTemplateParameters(Sema &S,
+ FunctionTemplateDecl *FunctionTemplate,
+ QualType T);
+
/// \brief Perform template argument deduction from a function call
/// (C++ [temp.deduct.call]).
///
@@ -2675,10 +2924,12 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
// Deduce template arguments from the function parameters.
Deduced.resize(TemplateParams->size());
unsigned ArgIdx = 0;
+ llvm::SmallVector<OriginalCallArg, 4> OriginalCallArgs;
for (unsigned ParamIdx = 0, NumParams = ParamTypes.size();
ParamIdx != NumParams; ++ParamIdx) {
- QualType ParamType = ParamTypes[ParamIdx];
-
+ QualType OrigParamType = ParamTypes[ParamIdx];
+ QualType ParamType = OrigParamType;
+
const PackExpansionType *ParamExpansion
= dyn_cast<PackExpansionType>(ParamType);
if (!ParamExpansion) {
@@ -2688,20 +2939,25 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
Expr *Arg = Args[ArgIdx++];
QualType ArgType = Arg->getType();
+
unsigned TDF = 0;
if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
ParamType, ArgType, Arg,
TDF))
continue;
+ // Keep track of the argument type and corresponding parameter index,
+ // so we can check for compatibility between the deduced A and A.
+ if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
+ ArgType));
+
if (TemplateDeductionResult Result
= ::DeduceTemplateArguments(*this, TemplateParams,
ParamType, ArgType, Info, Deduced,
TDF))
return Result;
- // FIXME: we need to check that the deduced A is the same as A,
- // modulo the various allowed differences.
continue;
}
@@ -2747,9 +3003,11 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
for (; ArgIdx < NumArgs; ++ArgIdx) {
HasAnyArguments = true;
- ParamType = ParamPattern;
+ QualType OrigParamType = ParamPattern;
+ ParamType = OrigParamType;
Expr *Arg = Args[ArgIdx];
QualType ArgType = Arg->getType();
+
unsigned TDF = 0;
if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
ParamType, ArgType, Arg,
@@ -2760,6 +3018,12 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
break;
}
+ // Keep track of the argument type and corresponding argument index,
+ // so we can check for compatibility between the deduced A and A.
+ if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
+ ArgType));
+
if (TemplateDeductionResult Result
= ::DeduceTemplateArguments(*this, TemplateParams,
ParamType, ArgType, Info, Deduced,
@@ -2792,7 +3056,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
NumExplicitlySpecified,
- Specialization, Info);
+ Specialization, Info, &OriginalCallArgs);
}
/// \brief Deduce template arguments when taking the address of a function
@@ -2969,9 +3233,6 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
P, A, Info, Deduced, TDF))
return Result;
- // FIXME: we need to check that the deduced A is the same as A,
- // modulo the various allowed differences.
-
// Finish template argument deduction.
LocalInstantiationScope InstScope(*this);
FunctionDecl *Spec = 0;
@@ -3099,8 +3360,19 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *Init,
QualType DeducedType = Deduced[0].getAsType();
if (DeducedType.isNull())
return false;
-
+
Result = SubstituteAutoTransform(*this, DeducedType).TransformType(Type);
+
+ // Check that the deduced argument type is compatible with the original
+ // argument type per C++ [temp.deduct.call]p4.
+ if (Result &&
+ CheckOriginalCallArgDeduction(*this,
+ Sema::OriginalCallArg(FuncParam,0,InitType),
+ Result->getType())) {
+ Result = 0;
+ return false;
+ }
+
return true;
}
@@ -3227,8 +3499,6 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
case TPOC_Other:
// - In other contexts (14.6.6.2) the function template's function type
// is used.
- // FIXME: Don't we actually want to perform the adjustments on the parameter
- // types?
if (DeduceTemplateArguments(S, TemplateParams, FD2->getType(),
FD1->getType(), Info, Deduced, TDF_None,
/*PartialOrdering=*/true, RefParamComparisons))
@@ -4010,3 +4280,23 @@ Sema::MarkDeducedTemplateParameters(FunctionTemplateDecl *FunctionTemplate,
::MarkUsedTemplateParameters(*this, Function->getParamDecl(I)->getType(),
true, TemplateParams->getDepth(), Deduced);
}
+
+bool hasDeducibleTemplateParameters(Sema &S,
+ FunctionTemplateDecl *FunctionTemplate,
+ QualType T) {
+ if (!T->isDependentType())
+ return false;
+
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ llvm::SmallVector<bool, 4> Deduced;
+ Deduced.resize(TemplateParams->size());
+ ::MarkUsedTemplateParameters(S, T, true, TemplateParams->getDepth(),
+ Deduced);
+
+ for (unsigned I = 0, N = Deduced.size(); I != N; ++I)
+ if (Deduced[I])
+ return true;
+
+ return false;
+}
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 3c1964175b10..1988f1445814 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -62,8 +62,20 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
if (!Ctx) {
Ctx = D->getDeclContext();
- assert((!D->isTemplateParameter() || !Ctx->isTranslationUnit()) &&
- "Template parameter doesn't have its context yet!");
+ // If we have a template template parameter with translation unit context,
+ // then we're performing substitution into a default template argument of
+ // this template template parameter before we've constructed the template
+ // that will own this template template parameter. In this case, we
+ // use empty template parameter lists for all of the outer templates
+ // to avoid performing any substitutions.
+ if (Ctx->isTranslationUnit()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
+ Result.addOuterTemplateArguments(0, 0);
+ return Result;
+ }
+ }
}
while (!Ctx->isFileContext()) {
@@ -788,6 +800,11 @@ namespace {
getSema().CallsUndergoingInstantiation.pop_back();
return move(Result);
}
+
+ private:
+ ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
+ SourceLocation loc,
+ const TemplateArgument &arg);
};
}
@@ -795,7 +812,7 @@ bool TemplateInstantiator::AlreadyTransformed(QualType T) {
if (T.isNull())
return true;
- if (T->isDependentType() || T->isVariablyModifiedType())
+ if (T->isInstantiationDependentType() || T->isVariablyModifiedType())
return false;
getSema().MarkDeclarationsReferencedInType(Loc, T);
@@ -980,13 +997,14 @@ TemplateName TemplateInstantiator::TransformTemplateName(CXXScopeSpec &SS,
TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && "Null template template argument");
-
+
// We don't ever want to substitute for a qualified template name, since
// the qualifier is handled separately. So, look through the qualified
// template name to its underlying declaration.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Template = TemplateName(QTN->getTemplateDecl());
-
+
+ Template = getSema().Context.getSubstTemplateTemplateParm(TTP, Template);
return Template;
}
}
@@ -1065,46 +1083,65 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
}
+ return transformNonTypeTemplateParmRef(NTTP, E->getLocation(), Arg);
+}
+
+ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
+ NonTypeTemplateParmDecl *parm,
+ SourceLocation loc,
+ const TemplateArgument &arg) {
+ ExprResult result;
+ QualType type;
+
// The template argument itself might be an expression, in which
// case we just return that expression.
- if (Arg.getKind() == TemplateArgument::Expression)
- return SemaRef.Owned(Arg.getAsExpr());
+ if (arg.getKind() == TemplateArgument::Expression) {
+ Expr *argExpr = arg.getAsExpr();
+ result = SemaRef.Owned(argExpr);
+ type = argExpr->getType();
- if (Arg.getKind() == TemplateArgument::Declaration) {
- ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
+ } else if (arg.getKind() == TemplateArgument::Declaration) {
+ ValueDecl *VD = cast<ValueDecl>(arg.getAsDecl());
// Find the instantiation of the template argument. This is
// required for nested templates.
VD = cast_or_null<ValueDecl>(
- getSema().FindInstantiatedDecl(E->getLocation(),
- VD, TemplateArgs));
+ getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
if (!VD)
return ExprError();
// Derive the type we want the substituted decl to have. This had
// better be non-dependent, or these checks will have serious problems.
- QualType TargetType;
- if (NTTP->isExpandedParameterPack())
- TargetType = NTTP->getExpansionType(
- getSema().ArgumentPackSubstitutionIndex);
- else if (NTTP->isParameterPack() &&
- isa<PackExpansionType>(NTTP->getType())) {
- TargetType = SemaRef.SubstType(
- cast<PackExpansionType>(NTTP->getType())->getPattern(),
- TemplateArgs, E->getLocation(),
- NTTP->getDeclName());
- } else
- TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
- E->getLocation(), NTTP->getDeclName());
- assert(!TargetType.isNull() && "type substitution failed for param type");
- assert(!TargetType->isDependentType() && "param type still dependent");
- return SemaRef.BuildExpressionFromDeclTemplateArgument(Arg,
- TargetType,
- E->getLocation());
+ if (parm->isExpandedParameterPack()) {
+ type = parm->getExpansionType(SemaRef.ArgumentPackSubstitutionIndex);
+ } else if (parm->isParameterPack() &&
+ isa<PackExpansionType>(parm->getType())) {
+ type = SemaRef.SubstType(
+ cast<PackExpansionType>(parm->getType())->getPattern(),
+ TemplateArgs, loc, parm->getDeclName());
+ } else {
+ type = SemaRef.SubstType(parm->getType(), TemplateArgs,
+ loc, parm->getDeclName());
+ }
+ assert(!type.isNull() && "type substitution failed for param type");
+ assert(!type->isDependentType() && "param type still dependent");
+ result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, type, loc);
+
+ if (!result.isInvalid()) type = result.get()->getType();
+ } else {
+ result = SemaRef.BuildExpressionFromIntegralTemplateArgument(arg, loc);
+
+ // Note that this type can be different from the type of 'result',
+ // e.g. if it's an enum type.
+ type = arg.getIntegralType();
}
+ if (result.isInvalid()) return ExprError();
- return SemaRef.BuildExpressionFromIntegralTemplateArgument(Arg,
- E->getSourceRange().getBegin());
+ Expr *resultExpr = result.take();
+ return SemaRef.Owned(new (SemaRef.Context)
+ SubstNonTypeTemplateParmExpr(type,
+ resultExpr->getValueKind(),
+ loc, parm, resultExpr));
}
ExprResult
@@ -1120,36 +1157,9 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
assert(Index < ArgPack.pack_size() && "Substitution index out-of-range");
const TemplateArgument &Arg = ArgPack.pack_begin()[Index];
- if (Arg.getKind() == TemplateArgument::Expression)
- return SemaRef.Owned(Arg.getAsExpr());
-
- if (Arg.getKind() == TemplateArgument::Declaration) {
- ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
-
- // Find the instantiation of the template argument. This is
- // required for nested templates.
- VD = cast_or_null<ValueDecl>(
- getSema().FindInstantiatedDecl(E->getParameterPackLocation(),
- VD, TemplateArgs));
- if (!VD)
- return ExprError();
-
- QualType T;
- NonTypeTemplateParmDecl *NTTP = E->getParameterPack();
- if (NTTP->isExpandedParameterPack())
- T = NTTP->getExpansionType(getSema().ArgumentPackSubstitutionIndex);
- else if (const PackExpansionType *Expansion
- = dyn_cast<PackExpansionType>(NTTP->getType()))
- T = SemaRef.SubstType(Expansion->getPattern(), TemplateArgs,
- E->getParameterPackLocation(), NTTP->getDeclName());
- else
- T = E->getType();
- return SemaRef.BuildExpressionFromDeclTemplateArgument(Arg, T,
- E->getParameterPackLocation());
- }
-
- return SemaRef.BuildExpressionFromIntegralTemplateArgument(Arg,
- E->getParameterPackLocation());
+ return transformNonTypeTemplateParmRef(E->getParameterPack(),
+ E->getParameterPackLocation(),
+ Arg);
}
ExprResult
@@ -1327,7 +1337,7 @@ TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
"Cannot perform an instantiation without some context on the "
"instantiation stack");
- if (!T->getType()->isDependentType() &&
+ if (!T->getType()->isInstantiationDependentType() &&
!T->getType()->isVariablyModifiedType())
return T;
@@ -1346,7 +1356,7 @@ TypeSourceInfo *Sema::SubstType(TypeLoc TL,
if (TL.getType().isNull())
return 0;
- if (!TL.getType()->isDependentType() &&
+ if (!TL.getType()->isInstantiationDependentType() &&
!TL.getType()->isVariablyModifiedType()) {
// FIXME: Make a copy of the TypeLoc data here, so that we can
// return a new TypeSourceInfo. Inefficient!
@@ -1375,7 +1385,7 @@ QualType Sema::SubstType(QualType T,
// If T is not a dependent type or a variably-modified type, there
// is nothing to do.
- if (!T->isDependentType() && !T->isVariablyModifiedType())
+ if (!T->isInstantiationDependentType() && !T->isVariablyModifiedType())
return T;
TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
@@ -1383,7 +1393,8 @@ QualType Sema::SubstType(QualType T,
}
static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
- if (T->getType()->isDependentType() || T->getType()->isVariablyModifiedType())
+ if (T->getType()->isInstantiationDependentType() ||
+ T->getType()->isVariablyModifiedType())
return true;
TypeLoc TL = T->getTypeLoc().IgnoreParens();
@@ -1397,7 +1408,7 @@ static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
// The parameter's type as written might be dependent even if the
// decayed type was not dependent.
if (TypeSourceInfo *TSInfo = P->getTypeSourceInfo())
- if (TSInfo->getType()->isDependentType())
+ if (TSInfo->getType()->isInstantiationDependentType())
return true;
// TODO: currently we always rebuild expressions. When we
@@ -1798,9 +1809,11 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
ExprResult NewInit = SubstExpr(OldInit, TemplateArgs);
// If the initialization is no longer dependent, check it now.
- if ((OldField->getType()->isDependentType() || OldInit->isTypeDependent())
- && !NewField->getType()->isDependentType()
- && !NewInit.get()->isTypeDependent()) {
+ if ((OldField->getType()->isDependentType() || OldInit->isTypeDependent() ||
+ OldInit->isValueDependent()) &&
+ !NewField->getType()->isDependentType() &&
+ !NewInit.get()->isTypeDependent() &&
+ !NewInit.get()->isValueDependent()) {
// FIXME: handle list-initialization
SourceLocation EqualLoc = NewField->getLocation();
NewInit = PerformCopyInitialization(
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index e78aa2991e74..29385e50b8c1 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -59,7 +59,7 @@ bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl,
// FIXME: Is this still too simple?
void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
- Decl *Tmpl, Decl *New) {
+ const Decl *Tmpl, Decl *New) {
for (AttrVec::const_iterator i = Tmpl->attr_begin(), e = Tmpl->attr_end();
i != e; ++i) {
const Attr *TmplAttr = *i;
@@ -132,7 +132,7 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
bool IsTypeAlias) {
bool Invalid = false;
TypeSourceInfo *DI = D->getTypeSourceInfo();
- if (DI->getType()->isDependentType() ||
+ if (DI->getType()->isInstantiationDependentType() ||
DI->getType()->isVariablyModifiedType()) {
DI = SemaRef.SubstType(DI, TemplateArgs,
D->getLocation(), D->getDeclName());
@@ -415,8 +415,10 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
!Var->isCXXForRangeDecl())
SemaRef.ActOnUninitializedDecl(Var, false);
- // Diagnose unused local variables.
- if (!Var->isInvalidDecl() && Owner->isFunctionOrMethod() && !Var->isUsed())
+ // Diagnose unused local variables with dependent types, where the diagnostic
+ // will have been deferred.
+ if (!Var->isInvalidDecl() && Owner->isFunctionOrMethod() && !Var->isUsed() &&
+ D->getType()->isDependentType())
SemaRef.DiagnoseUnusedDecl(Var);
return Var;
@@ -433,7 +435,7 @@ Decl *TemplateDeclInstantiator::VisitAccessSpecDecl(AccessSpecDecl *D) {
Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
bool Invalid = false;
TypeSourceInfo *DI = D->getTypeSourceInfo();
- if (DI->getType()->isDependentType() ||
+ if (DI->getType()->isInstantiationDependentType() ||
DI->getType()->isVariablyModifiedType()) {
DI = SemaRef.SubstType(DI, TemplateArgs,
D->getLocation(), D->getDeclName());
@@ -1088,9 +1090,26 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
Function->setLexicalDeclContext(LexicalDC);
// Attach the parameters
- for (unsigned P = 0; P < Params.size(); ++P)
- if (Params[P])
- Params[P]->setOwningFunction(Function);
+ if (isa<FunctionProtoType>(Function->getType().IgnoreParens())) {
+ // Adopt the already-instantiated parameters into our own context.
+ for (unsigned P = 0; P < Params.size(); ++P)
+ if (Params[P])
+ Params[P]->setOwningFunction(Function);
+ } else {
+ // Since we were instantiated via a typedef of a function type, create
+ // new parameters.
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "No function prototype in template instantiation?");
+ for (FunctionProtoType::arg_type_iterator AI = Proto->arg_type_begin(),
+ AE = Proto->arg_type_end(); AI != AE; ++AI) {
+ ParmVarDecl *Param
+ = SemaRef.BuildParmVarDeclForTypedef(Function, Function->getLocation(),
+ *AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ }
Function->setParams(Params.data(), Params.size());
SourceLocation InstantiateAtPOI;
@@ -2264,7 +2283,12 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
EPI));
}
- SemaRef.InstantiateAttrs(TemplateArgs, Tmpl, New);
+ const FunctionDecl* Definition = Tmpl;
+
+ // Get the definition. Leaves the variable unchanged if undefined.
+ Tmpl->isDefined(Definition);
+
+ SemaRef.InstantiateAttrs(TemplateArgs, Definition, New);
return false;
}
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 86d3bc170909..daa1523363c0 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -716,17 +716,19 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
- if (DeclarationName CorrectedName = CorrectTypo(R, S, 0, 0, false,
- CTC_NoKeywords)) {
- if (NamedDecl *CorrectedResult = R.getAsSingle<NamedDecl>())
+ if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
+ R.getLookupKind(), S, 0, 0,
+ false, CTC_NoKeywords)) {
+ if (NamedDecl *CorrectedResult = Corrected.getCorrectionDecl())
if (CorrectedResult->isParameterPack()) {
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOptions()));
ParameterPack = CorrectedResult;
Diag(NameLoc, diag::err_sizeof_pack_no_pack_name_suggest)
- << &Name << CorrectedName
- << FixItHint::CreateReplacement(NameLoc,
- CorrectedName.getAsString());
+ << &Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(
+ NameLoc, Corrected.getAsString(getLangOptions()));
Diag(ParameterPack->getLocation(), diag::note_parameter_pack_here)
- << CorrectedName;
+ << CorrectedQuotedStr;
}
}
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index 5fd8afa6acf4..f3e73ec5a723 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -26,36 +26,11 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
-/// \brief Perform adjustment on the parameter type of a function.
-///
-/// This routine adjusts the given parameter type @p T to the actual
-/// parameter type used by semantic analysis (C99 6.7.5.3p[7,8],
-/// C++ [dcl.fct]p3). The adjusted parameter type is returned.
-QualType Sema::adjustParameterType(QualType T) {
- // C99 6.7.5.3p7:
- // A declaration of a parameter as "array of type" shall be
- // adjusted to "qualified pointer to type", where the type
- // qualifiers (if any) are those specified within the [ and ] of
- // the array type derivation.
- if (T->isArrayType())
- return Context.getArrayDecayedType(T);
-
- // C99 6.7.5.3p8:
- // A declaration of a parameter as "function returning type"
- // shall be adjusted to "pointer to function returning type", as
- // in 6.3.2.1.
- if (T->isFunctionType())
- return Context.getPointerType(T);
-
- return T;
-}
-
-
-
/// isOmittedBlockReturnType - Return true if this declarator is missing a
/// return type because this is a omitted return type on a block literal.
static bool isOmittedBlockReturnType(const Declarator &D) {
@@ -86,6 +61,11 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
useInstantiationLoc = true;
break;
+ case AttributeList::AT_objc_ownership:
+ diagID = diag::warn_objc_object_attribute_wrong_type;
+ useInstantiationLoc = true;
+ break;
+
default:
// Assume everything else was a function attribute.
diagID = diag::warn_function_attribute_wrong_type;
@@ -110,7 +90,8 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
// objc_gc applies to Objective-C pointers or, otherwise, to the
// smallest available pointer type (i.e. 'void*' in 'void**').
#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
- case AttributeList::AT_objc_gc
+ case AttributeList::AT_objc_gc: \
+ case AttributeList::AT_objc_ownership
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
@@ -295,11 +276,15 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
static bool handleObjCGCTypeAttr(TypeProcessingState &state,
AttributeList &attr, QualType &type);
+static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type);
+
static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
AttributeList &attr, QualType &type) {
- // Right now, we have exactly one of these attributes: objc_gc.
- assert(attr.getKind() == AttributeList::AT_objc_gc);
- return handleObjCGCTypeAttr(state, attr, type);
+ if (attr.getKind() == AttributeList::AT_objc_gc)
+ return handleObjCGCTypeAttr(state, attr, type);
+ assert(attr.getKind() == AttributeList::AT_objc_ownership);
+ return handleObjCOwnershipTypeAttr(state, attr, type);
}
/// Given that an objc_gc attribute was written somewhere on a
@@ -447,7 +432,12 @@ distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
return true;
}
- return handleFunctionTypeAttr(state, attr, declSpecType);
+ if (handleFunctionTypeAttr(state, attr, declSpecType)) {
+ spliceAttrOutOfList(attr, attrList);
+ return true;
+ }
+
+ return false;
}
/// A function type attribute was written in the decl spec. Try to
@@ -512,6 +502,11 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType);
break;
+ case AttributeList::AT_ns_returns_retained:
+ if (!state.getSema().getLangOptions().ObjCAutoRefCount)
+ break;
+ // fallthrough
+
FUNCTION_TYPE_ATTRS_CASELIST:
distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType);
break;
@@ -560,6 +555,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*args*/ 0, 0,
/*type quals*/ 0,
/*ref-qualifier*/true, SourceLocation(),
+ /*mutable qualifier*/SourceLocation(),
/*EH*/ EST_None, SourceLocation(), 0, 0, 0, 0,
/*parens*/ loc, loc,
declarator));
@@ -575,10 +571,11 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/// \param D the declarator containing the declaration specifier.
/// \returns The type described by the declaration specifiers. This function
/// never returns null.
-static QualType ConvertDeclSpecToType(Sema &S, TypeProcessingState &state) {
+static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// FIXME: Should move the logic from DeclSpec::Finish to here for validity
// checking.
+ Sema &S = state.getSema();
Declarator &declarator = state.getDeclarator();
const DeclSpec &DS = declarator.getDeclSpec();
SourceLocation DeclLoc = declarator.getIdentifierLoc();
@@ -1017,6 +1014,51 @@ QualType Sema::BuildParenType(QualType T) {
return Context.getParenType(T);
}
+/// Given that we're building a pointer or reference to the given
+static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
+ SourceLocation loc,
+ bool isReference) {
+ // Bail out if retention is unrequired or already specified.
+ if (!type->isObjCLifetimeType() ||
+ type.getObjCLifetime() != Qualifiers::OCL_None)
+ return type;
+
+ Qualifiers::ObjCLifetime implicitLifetime = Qualifiers::OCL_None;
+
+ // If the object type is const-qualified, we can safely use
+ // __unsafe_unretained. This is safe (because there are no read
+ // barriers), and it'll be safe to coerce anything but __weak* to
+ // the resulting type.
+ if (type.isConstQualified()) {
+ implicitLifetime = Qualifiers::OCL_ExplicitNone;
+
+ // Otherwise, check whether the static type does not require
+ // retaining. This currently only triggers for Class (possibly
+ // protocol-qualifed, and arrays thereof).
+ } else if (type->isObjCARCImplicitlyUnretainedType()) {
+ implicitLifetime = Qualifiers::OCL_ExplicitNone;
+
+ // If that failed, give an error and recover using __autoreleasing.
+ } else {
+ // These types can show up in private ivars in system headers, so
+ // we need this to not be an error in those cases. Instead we
+ // want to delay.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(loc,
+ diag::err_arc_indirect_no_ownership, type, isReference));
+ } else {
+ S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference;
+ }
+ implicitLifetime = Qualifiers::OCL_Autoreleasing;
+ }
+ assert(implicitLifetime && "didn't infer any lifetime!");
+
+ Qualifiers qs;
+ qs.addObjCLifetime(implicitLifetime);
+ return S.Context.getQualifiedType(type, qs);
+}
+
/// \brief Build a pointer type.
///
/// \param T The type to which we'll be building a pointer.
@@ -1041,6 +1083,10 @@ QualType Sema::BuildPointerType(QualType T,
assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
+ // In ARC, it is forbidden to build pointers to unqualified pointers.
+ if (getLangOptions().ObjCAutoRefCount)
+ T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false);
+
// Build the pointer type.
return Context.getPointerType(T);
}
@@ -1094,12 +1140,38 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return QualType();
}
+ // In ARC, it is forbidden to build references to unqualified pointers.
+ if (getLangOptions().ObjCAutoRefCount)
+ T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true);
+
// Handle restrict on references.
if (LValueRef)
return Context.getLValueReferenceType(T, SpelledAsLValue);
return Context.getRValueReferenceType(T);
}
+/// Check whether the specified array size makes the array type a VLA. If so,
+/// return true, if not, return the size of the array in SizeVal.
+static bool isArraySizeVLA(Expr *ArraySize, llvm::APSInt &SizeVal, Sema &S) {
+ // If the size is an ICE, it certainly isn't a VLA.
+ if (ArraySize->isIntegerConstantExpr(SizeVal, S.Context))
+ return false;
+
+ // If we're in a GNU mode (like gnu99, but not c99) accept any evaluatable
+ // value as an extension.
+ Expr::EvalResult Result;
+ if (S.LangOpts.GNUMode && ArraySize->Evaluate(Result, S.Context)) {
+ if (!Result.hasSideEffects() && Result.Val.isInt()) {
+ SizeVal = Result.Val.getInt();
+ S.Diag(ArraySize->getLocStart(), diag::ext_vla_folded_to_constant);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
/// \brief Build an array type.
///
/// \param T The type of each element in the array.
@@ -1200,11 +1272,13 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
T = Context.getIncompleteArrayType(T, ASM, Quals);
} else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) {
T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets);
- } else if (!ArraySize->isIntegerConstantExpr(ConstVal, Context) ||
- (!T->isDependentType() && !T->isIncompleteType() &&
- !T->isConstantSizeType())) {
- // Per C99, a variable array is an array with either a non-constant
- // size or an element type that has a non-constant-size
+ } else if (!T->isDependentType() && !T->isIncompleteType() &&
+ !T->isConstantSizeType()) {
+ // C99: an array with an element type that has a non-constant-size is a VLA.
+ T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else if (isArraySizeVLA(ArraySize, ConstVal, *this)) {
+ // C99: an array with a non-ICE size is a VLA. We accept any expression
+ // that we can fold to a non-zero positive value as an extension.
T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
} else {
// C99 6.7.5.2p1: If the expression is a constant expression, it shall
@@ -1242,10 +1316,12 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
if (!getLangOptions().C99) {
if (T->isVariableArrayType()) {
// Prohibit the use of non-POD types in VLAs.
+ QualType BaseT = Context.getBaseElementType(T);
if (!T->isDependentType() &&
- !Context.getBaseElementType(T)->isPODType()) {
+ !BaseT.isPODType(Context) &&
+ !BaseT->isObjCLifetimeType()) {
Diag(Loc, diag::err_vla_non_pod)
- << Context.getBaseElementType(T);
+ << BaseT;
return QualType();
}
// Prohibit the use of VLAs during template argument deduction.
@@ -1296,8 +1372,7 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
return QualType();
}
- if (!T->isDependentType())
- return Context.getExtVectorType(T, vectorSize);
+ return Context.getExtVectorType(T, vectorSize);
}
return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc);
@@ -1347,7 +1422,7 @@ QualType Sema::BuildFunctionType(QualType T,
bool Invalid = false;
for (unsigned Idx = 0; Idx < NumParamTypes; ++Idx) {
- QualType ParamType = adjustParameterType(ParamTypes[Idx]);
+ QualType ParamType = Context.getAdjustedParameterType(ParamTypes[Idx]);
if (ParamType->isVoidType()) {
Diag(Loc, diag::err_param_with_void_type);
Invalid = true;
@@ -1467,6 +1542,109 @@ QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) {
return QT;
}
+static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
+ Qualifiers::ObjCLifetime ownership,
+ unsigned chunkIndex);
+
+/// Given that this is the declaration of a parameter under ARC,
+/// attempt to infer attributes and such for pointer-to-whatever
+/// types.
+static void inferARCWriteback(TypeProcessingState &state,
+ QualType &declSpecType) {
+ Sema &S = state.getSema();
+ Declarator &declarator = state.getDeclarator();
+
+ // TODO: should we care about decl qualifiers?
+
+ // Check whether the declarator has the expected form. We walk
+ // from the inside out in order to make the block logic work.
+ unsigned outermostPointerIndex = 0;
+ bool isBlockPointer = false;
+ unsigned numPointers = 0;
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = i;
+ DeclaratorChunk &chunk = declarator.getTypeObject(chunkIndex);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Paren:
+ // Ignore parens.
+ break;
+
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Pointer:
+ // Count the number of pointers. Treat references
+ // interchangeably as pointers; if they're mis-ordered, normal
+ // type building will discover that.
+ outermostPointerIndex = chunkIndex;
+ numPointers++;
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ // If we have a pointer to block pointer, that's an acceptable
+ // indirect reference; anything else is not an application of
+ // the rules.
+ if (numPointers != 1) return;
+ numPointers++;
+ outermostPointerIndex = chunkIndex;
+ isBlockPointer = true;
+
+ // We don't care about pointer structure in return values here.
+ goto done;
+
+ case DeclaratorChunk::Array: // suppress if written (id[])?
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ return;
+ }
+ }
+ done:
+
+ // If we have *one* pointer, then we want to throw the qualifier on
+ // the declaration-specifiers, which means that it needs to be a
+ // retainable object type.
+ if (numPointers == 1) {
+ // If it's not a retainable object type, the rule doesn't apply.
+ if (!declSpecType->isObjCRetainableType()) return;
+
+ // If it already has lifetime, don't do anything.
+ if (declSpecType.getObjCLifetime()) return;
+
+ // Otherwise, modify the type in-place.
+ Qualifiers qs;
+
+ if (declSpecType->isObjCARCImplicitlyUnretainedType())
+ qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ else
+ qs.addObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ declSpecType = S.Context.getQualifiedType(declSpecType, qs);
+
+ // If we have *two* pointers, then we want to throw the qualifier on
+ // the outermost pointer.
+ } else if (numPointers == 2) {
+ // If we don't have a block pointer, we need to check whether the
+ // declaration-specifiers gave us something that will turn into a
+ // retainable object pointer after we slap the first pointer on it.
+ if (!isBlockPointer && !declSpecType->isObjCObjectType())
+ return;
+
+ // Look for an explicit lifetime attribute there.
+ DeclaratorChunk &chunk = declarator.getTypeObject(outermostPointerIndex);
+ if (chunk.Kind != DeclaratorChunk::Pointer &&
+ chunk.Kind != DeclaratorChunk::BlockPointer)
+ return;
+ for (const AttributeList *attr = chunk.getAttrs(); attr;
+ attr = attr->getNext())
+ if (attr->getKind() == AttributeList::AT_objc_ownership)
+ return;
+
+ transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing,
+ outermostPointerIndex);
+
+ // Any other number of pointers/references does not trigger the rule.
+ } else return;
+
+ // TODO: mark whether we did this inference?
+}
+
static void DiagnoseIgnoredQualifiers(unsigned Quals,
SourceLocation ConstQualLoc,
SourceLocation VolatileQualLoc,
@@ -1513,47 +1691,28 @@ static void DiagnoseIgnoredQualifiers(unsigned Quals,
<< QualStr << NumQuals << ConstFixIt << VolatileFixIt << RestrictFixIt;
}
-/// GetTypeForDeclarator - Convert the type for the specified
-/// declarator to Type instances.
-///
-/// If OwnedDecl is non-NULL, and this declarator's decl-specifier-seq
-/// owns the declaration of a type (e.g., the definition of a struct
-/// type), then *OwnedDecl will receive the owned declaration.
-///
-/// The result of this call will never be null, but the associated
-/// type may be a null type if there's an unrecoverable error.
-TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
- TagDecl **OwnedDecl,
- bool AutoAllowedInTypeName) {
- // Determine the type of the declarator. Not all forms of declarator
- // have a type.
+static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
+ TypeSourceInfo *&ReturnTypeInfo) {
+ Sema &SemaRef = state.getSema();
+ Declarator &D = state.getDeclarator();
QualType T;
- TypeSourceInfo *ReturnTypeInfo = 0;
-
- TypeProcessingState state(*this, D);
+ ReturnTypeInfo = 0;
- // In C++0x, deallocation functions (normal and array operator delete)
- // are implicitly noexcept.
- bool ImplicitlyNoexcept = false;
+ // The TagDecl owned by the DeclSpec.
+ TagDecl *OwnedTagDecl = 0;
switch (D.getName().getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
case UnqualifiedId::IK_OperatorFunctionId:
- if (getLangOptions().CPlusPlus0x) {
- OverloadedOperatorKind OO = D.getName().OperatorFunctionId.Operator;
- if (OO == OO_Delete || OO == OO_Array_Delete)
- ImplicitlyNoexcept = true;
- }
- // Intentional fall-through.
case UnqualifiedId::IK_Identifier:
case UnqualifiedId::IK_LiteralOperatorId:
case UnqualifiedId::IK_TemplateId:
- T = ConvertDeclSpecToType(*this, state);
+ T = ConvertDeclSpecToType(state);
if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
- TagDecl* Owned = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+ OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
// Owned declaration is embedded in declarator.
- Owned->setEmbeddedInDeclarator(true);
- if (OwnedDecl) *OwnedDecl = Owned;
+ OwnedTagDecl->setEmbeddedInDeclarator(true);
}
break;
@@ -1562,14 +1721,14 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
case UnqualifiedId::IK_DestructorName:
// Constructors and destructors don't have return types. Use
// "void" instead.
- T = Context.VoidTy;
+ T = SemaRef.Context.VoidTy;
break;
case UnqualifiedId::IK_ConversionFunctionId:
// The result type of a conversion function is the type that it
// converts to.
- T = GetTypeFromParser(D.getName().ConversionFunctionId,
- &ReturnTypeInfo);
+ T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
+ &ReturnTypeInfo);
break;
}
@@ -1581,7 +1740,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// type (this is checked later) and we can skip this. In other languages
// using auto, we need to check regardless.
if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
- (!getLangOptions().CPlusPlus0x || !D.isFunctionDeclarator())) {
+ (!SemaRef.getLangOptions().CPlusPlus0x || !D.isFunctionDeclarator())) {
int Error = -1;
switch (D.getContext()) {
@@ -1595,7 +1754,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
case Declarator::MemberContext:
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)
break;
- switch (cast<TagDecl>(CurContext)->getTagKind()) {
+ switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
case TTK_Enum: assert(0 && "unhandled tag kind"); break;
case TTK_Struct: Error = 1; /* Struct member */ break;
case TTK_Union: Error = 2; /* Union member */ break;
@@ -1603,6 +1762,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
break;
case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
Error = 4; // Exception declaration
break;
case Declarator::TemplateParamContext:
@@ -1619,13 +1779,13 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
Error = 9; // Type alias
break;
case Declarator::TypeNameContext:
- if (!AutoAllowedInTypeName)
- Error = 11; // Generic
+ Error = 11; // Generic
break;
case Declarator::FileContext:
case Declarator::BlockContext:
case Declarator::ForContext:
case Declarator::ConditionContext:
+ case Declarator::CXXNewContext:
break;
}
@@ -1640,7 +1800,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// contains a trailing return type. That is only legal at the outermost
// level. Check all declarator chunks (outermost first) anyway, to give
// better diagnostics.
- if (getLangOptions().CPlusPlus0x && Error != -1) {
+ if (SemaRef.getLangOptions().CPlusPlus0x && Error != -1) {
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
@@ -1656,15 +1816,86 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
if (Error != -1) {
- Diag(D.getDeclSpec().getTypeSpecTypeLoc(), diag::err_auto_not_allowed)
+ SemaRef.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_auto_not_allowed)
<< Error;
- T = Context.IntTy;
+ T = SemaRef.Context.IntTy;
D.setInvalidType(true);
}
}
-
- if (T.isNull())
- return Context.getNullTypeSourceInfo();
+
+ if (SemaRef.getLangOptions().CPlusPlus &&
+ OwnedTagDecl && OwnedTagDecl->isDefinition()) {
+ // Check the contexts where C++ forbids the declaration of a new class
+ // or enumeration in a type-specifier-seq.
+ switch (D.getContext()) {
+ case Declarator::FileContext:
+ case Declarator::MemberContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::BlockLiteralContext:
+ // C++0x [dcl.type]p3:
+ // A type-specifier-seq shall not define a class or enumeration unless
+ // it appears in the type-id of an alias-declaration (7.1.3) that is not
+ // the declaration of a template-declaration.
+ case Declarator::AliasDeclContext:
+ break;
+ case Declarator::AliasTemplateContext:
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_alias_template)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ break;
+ case Declarator::TypeNameContext:
+ case Declarator::TemplateParamContext:
+ case Declarator::CXXNewContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ case Declarator::TemplateTypeArgContext:
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_type_specifier)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ break;
+ case Declarator::PrototypeContext:
+ case Declarator::ObjCPrototypeContext:
+ case Declarator::KNRTypeListContext:
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_param_type)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ break;
+ case Declarator::ConditionContext:
+ // C++ 6.4p2:
+ // The type-specifier-seq shall not contain typedef and shall not declare
+ // a new class or enumeration.
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_condition);
+ break;
+ }
+ }
+
+ return T;
+}
+
+static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
+ QualType declSpecType,
+ TypeSourceInfo *TInfo) {
+
+ QualType T = declSpecType;
+ Declarator &D = state.getDeclarator();
+ Sema &S = state.getSema();
+ ASTContext &Context = S.Context;
+ const LangOptions &LangOpts = S.getLangOptions();
+
+ bool ImplicitlyNoexcept = false;
+ if (D.getName().getKind() == UnqualifiedId::IK_OperatorFunctionId &&
+ LangOpts.CPlusPlus0x) {
+ OverloadedOperatorKind OO = D.getName().OperatorFunctionId.Operator;
+ /// In C++0x, deallocation functions (normal and array operator delete)
+ /// are implicitly noexcept.
+ if (OO == OO_Delete || OO == OO_Array_Delete)
+ ImplicitlyNoexcept = true;
+ }
// The name we're declaring, if any.
DeclarationName Name;
@@ -1687,56 +1918,56 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
switch (DeclType.Kind) {
default: assert(0 && "Unknown decltype!");
case DeclaratorChunk::Paren:
- T = BuildParenType(T);
+ T = S.BuildParenType(T);
break;
case DeclaratorChunk::BlockPointer:
// If blocks are disabled, emit an error.
if (!LangOpts.Blocks)
- Diag(DeclType.Loc, diag::err_blocks_disable);
+ S.Diag(DeclType.Loc, diag::err_blocks_disable);
- T = BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
+ T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
if (DeclType.Cls.TypeQuals)
- T = BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals);
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals);
break;
case DeclaratorChunk::Pointer:
// Verify that we're not building a pointer to pointer to function with
// exception specification.
- if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
- Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
D.setInvalidType(true);
// Build the type anyway.
}
- if (getLangOptions().ObjC1 && T->getAs<ObjCObjectType>()) {
+ if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
if (DeclType.Ptr.TypeQuals)
- T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
break;
}
- T = BuildPointerType(T, DeclType.Loc, Name);
+ T = S.BuildPointerType(T, DeclType.Loc, Name);
if (DeclType.Ptr.TypeQuals)
- T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
break;
case DeclaratorChunk::Reference: {
// Verify that we're not building a reference to pointer to function with
// exception specification.
- if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
- Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
D.setInvalidType(true);
// Build the type anyway.
}
- T = BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
+ T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
Qualifiers Quals;
if (DeclType.Ref.HasRestrict)
- T = BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
+ T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
break;
}
case DeclaratorChunk::Array: {
// Verify that we're not building an array of pointers to function with
// exception specification.
- if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
- Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
D.setInvalidType(true);
// Build the type anyway.
}
@@ -1753,13 +1984,13 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// FIXME: This check isn't quite right: it allows star in prototypes
// for function definitions, and disallows some edge cases detailed
// in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html
- Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
+ S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
ASM = ArrayType::Normal;
D.setInvalidType(true);
}
- T = BuildArrayType(T, ASM, ArraySize,
- Qualifiers::fromCVRMask(ATI.TypeQuals),
- SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
+ T = S.BuildArrayType(T, ASM, ArraySize,
+ Qualifiers::fromCVRMask(ATI.TypeQuals),
+ SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
break;
}
case DeclaratorChunk::Function: {
@@ -1775,27 +2006,27 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// and not, for instance, a pointer to a function.
if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
!FTI.TrailingReturnType && chunkIndex == 0) {
- Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_auto_missing_trailing_return);
T = Context.IntTy;
D.setInvalidType(true);
} else if (FTI.TrailingReturnType) {
// T must be exactly 'auto' at this point. See CWG issue 681.
if (isa<ParenType>(T)) {
- Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_trailing_return_in_parens)
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
} else if (T.hasQualifiers() || !isa<AutoType>(T)) {
- Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
}
- T = GetTypeFromParser(
+ T = S.GetTypeFromParser(
ParsedType::getFromOpaquePtr(FTI.TrailingReturnType),
- &ReturnTypeInfo);
+ &TInfo);
}
}
@@ -1809,7 +2040,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
if (chunkIndex == 0 &&
D.getContext() == Declarator::BlockLiteralContext)
diagID = diag::err_block_returning_array_function;
- Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
+ S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
D.setInvalidType(true);
}
@@ -1818,7 +2049,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// class type in C++.
if (isa<PointerType>(T) && T.getLocalCVRQualifiers() &&
(D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId) &&
- (!getLangOptions().CPlusPlus || !T->isDependentType())) {
+ (!LangOpts.CPlusPlus || !T->isDependentType())) {
assert(chunkIndex + 1 < e && "No DeclaratorChunk for the return type?");
DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1);
assert(ReturnTypeChunk.Kind == DeclaratorChunk::Pointer);
@@ -1829,43 +2060,43 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
SourceLocation::getFromRawEncoding(PTI.ConstQualLoc),
SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc),
SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc),
- *this);
+ S);
} else if (T.getCVRQualifiers() && D.getDeclSpec().getTypeQualifiers() &&
- (!getLangOptions().CPlusPlus ||
+ (!LangOpts.CPlusPlus ||
(!T->isDependentType() && !T->isRecordType()))) {
DiagnoseIgnoredQualifiers(D.getDeclSpec().getTypeQualifiers(),
D.getDeclSpec().getConstSpecLoc(),
D.getDeclSpec().getVolatileSpecLoc(),
D.getDeclSpec().getRestrictSpecLoc(),
- *this);
+ S);
}
- if (getLangOptions().CPlusPlus && D.getDeclSpec().isTypeSpecOwned()) {
+ if (LangOpts.CPlusPlus && D.getDeclSpec().isTypeSpecOwned()) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
if (Tag->isDefinition())
- Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
+ S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
<< Context.getTypeDeclType(Tag);
}
// Exception specs are not allowed in typedefs. Complain, but add it
// anyway.
if (IsTypedefName && FTI.getExceptionSpecType())
- Diag(FTI.getExceptionSpecLoc(), diag::err_exception_spec_in_typedef)
+ S.Diag(FTI.getExceptionSpecLoc(), diag::err_exception_spec_in_typedef)
<< (D.getContext() == Declarator::AliasDeclContext ||
D.getContext() == Declarator::AliasTemplateContext);
- if (!FTI.NumArgs && !FTI.isVariadic && !getLangOptions().CPlusPlus) {
+ if (!FTI.NumArgs && !FTI.isVariadic && !LangOpts.CPlusPlus) {
// Simple void foo(), where the incoming T is the result type.
T = Context.getFunctionNoProtoType(T);
} else {
// We allow a zero-parameter variadic function in C if the
// function is marked with the "overloadable" attribute. Scan
// for this attribute now.
- if (!FTI.NumArgs && FTI.isVariadic && !getLangOptions().CPlusPlus) {
+ if (!FTI.NumArgs && FTI.isVariadic && !LangOpts.CPlusPlus) {
bool Overloadable = false;
for (const AttributeList *Attrs = D.getAttributes();
Attrs; Attrs = Attrs->getNext()) {
@@ -1876,13 +2107,13 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
if (!Overloadable)
- Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
+ S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
}
if (FTI.NumArgs && FTI.ArgInfo[0].Param == 0) {
// C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
// definition.
- Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
+ S.Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
D.setInvalidType(true);
break;
}
@@ -1899,13 +2130,18 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.reserve(FTI.NumArgs);
+ llvm::SmallVector<bool, 16> ConsumedArguments;
+ ConsumedArguments.reserve(FTI.NumArgs);
+ bool HasAnyConsumedArguments = false;
+
for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
QualType ArgTy = Param->getType();
assert(!ArgTy.isNull() && "Couldn't parse type?");
// Adjust the parameter type.
- assert((ArgTy == adjustParameterType(ArgTy)) && "Unadjusted type?");
+ assert((ArgTy == Context.getAdjustedParameterType(ArgTy)) &&
+ "Unadjusted type?");
// Look for 'void'. void is allowed only as a single argument to a
// function with no other parameters (C99 6.7.5.3p10). We record
@@ -1915,19 +2151,19 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// is an incomplete type (C99 6.2.5p19) and function decls cannot
// have arguments of incomplete type.
if (FTI.NumArgs != 1 || FTI.isVariadic) {
- Diag(DeclType.Loc, diag::err_void_only_param);
+ S.Diag(DeclType.Loc, diag::err_void_only_param);
ArgTy = Context.IntTy;
Param->setType(ArgTy);
} else if (FTI.ArgInfo[i].Ident) {
// Reject, but continue to parse 'int(void abc)'.
- Diag(FTI.ArgInfo[i].IdentLoc,
+ S.Diag(FTI.ArgInfo[i].IdentLoc,
diag::err_param_with_void_type);
ArgTy = Context.IntTy;
Param->setType(ArgTy);
} else {
// Reject, but continue to parse 'float(const void)'.
if (ArgTy.hasQualifiers())
- Diag(DeclType.Loc, diag::err_void_param_qualified);
+ S.Diag(DeclType.Loc, diag::err_void_param_qualified);
// Do not add 'void' to the ArgTys list.
break;
@@ -1944,19 +2180,28 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
}
+ if (LangOpts.ObjCAutoRefCount) {
+ bool Consumed = Param->hasAttr<NSConsumedAttr>();
+ ConsumedArguments.push_back(Consumed);
+ HasAnyConsumedArguments |= Consumed;
+ }
+
ArgTys.push_back(ArgTy);
}
+ if (HasAnyConsumedArguments)
+ EPI.ConsumedArguments = ConsumedArguments.data();
+
llvm::SmallVector<QualType, 4> Exceptions;
EPI.ExceptionSpecType = FTI.getExceptionSpecType();
if (FTI.getExceptionSpecType() == EST_Dynamic) {
Exceptions.reserve(FTI.NumExceptions);
for (unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
// FIXME: Preserve type source info.
- QualType ET = GetTypeFromParser(FTI.Exceptions[ei].Ty);
+ QualType ET = S.GetTypeFromParser(FTI.Exceptions[ei].Ty);
// Check that the type is valid for an exception spec, and
// drop it if not.
- if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
+ if (!S.CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
Exceptions.push_back(ET);
}
EPI.NumExceptions = Exceptions.size();
@@ -1973,7 +2218,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
if (!NoexceptExpr->isValueDependent() &&
!NoexceptExpr->isIntegerConstantExpr(Dummy, Context, &ErrLoc,
/*evaluated*/false))
- Diag(ErrLoc, diag::err_noexcept_needs_constant_expression)
+ S.Diag(ErrLoc, diag::err_noexcept_needs_constant_expression)
<< NoexceptExpr->getSourceRange();
else
EPI.NoexceptExpr = NoexceptExpr;
@@ -1996,8 +2241,8 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
if (SS.isInvalid()) {
// Avoid emitting extra errors if we already errored on the scope.
D.setInvalidType(true);
- } else if (isDependentScopeSpecifier(SS) ||
- dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS))) {
+ } else if (S.isDependentScopeSpecifier(SS) ||
+ dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) {
NestedNameSpecifier *NNS
= static_cast<NestedNameSpecifier*>(SS.getScopeRep());
NestedNameSpecifier *NNSPrefix = NNS->getPrefix();
@@ -2026,7 +2271,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
break;
}
} else {
- Diag(DeclType.Mem.Scope().getBeginLoc(),
+ S.Diag(DeclType.Mem.Scope().getBeginLoc(),
diag::err_illegal_decl_mempointer_in_nonclass)
<< (D.getIdentifier() ? D.getIdentifier()->getName() : "type name")
<< DeclType.Mem.Scope().getRange();
@@ -2034,12 +2279,12 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
if (!ClsType.isNull())
- T = BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier());
+ T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier());
if (T.isNull()) {
T = Context.IntTy;
D.setInvalidType(true);
} else if (DeclType.Mem.TypeQuals) {
- T = BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
}
break;
}
@@ -2054,7 +2299,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
processTypeAttrs(state, T, false, attrs);
}
- if (getLangOptions().CPlusPlus && T->isFunctionType()) {
+ if (LangOpts.CPlusPlus && T->isFunctionType()) {
const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
@@ -2071,7 +2316,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
FreeFunction = (D.getContext() != Declarator::MemberContext ||
D.getDeclSpec().isFriendSpecified());
} else {
- DeclContext *DC = computeDeclContext(D.getCXXScopeSpec());
+ DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec());
FreeFunction = (DC && !DC->isRecord());
}
@@ -2109,16 +2354,16 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
break;
}
- Diag(D.getIdentifierLoc(),
+ S.Diag(D.getIdentifierLoc(),
diag::ext_qualified_function_type_template_arg)
<< Quals;
} else {
if (FnTy->getTypeQuals() != 0) {
if (D.isFunctionDeclarator())
- Diag(D.getIdentifierLoc(),
+ S.Diag(D.getIdentifierLoc(),
diag::err_invalid_qualified_function_type);
else
- Diag(D.getIdentifierLoc(),
+ S.Diag(D.getIdentifierLoc(),
diag::err_invalid_qualified_typedef_function_type_use)
<< FreeFunction;
}
@@ -2135,11 +2380,11 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
}
- Diag(Loc, diag::err_invalid_ref_qualifier_function_type)
+ S.Diag(Loc, diag::err_invalid_ref_qualifier_function_type)
<< (FnTy->getRefQualifier() == RQ_LValue)
<< FixItHint::CreateRemoval(Loc);
} else {
- Diag(D.getIdentifierLoc(),
+ S.Diag(D.getIdentifierLoc(),
diag::err_invalid_ref_qualifier_typedef_function_type_use)
<< FreeFunction
<< (FnTy->getRefQualifier() == RQ_LValue);
@@ -2192,7 +2437,7 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// We represent function parameter packs as function parameters whose
// type is a pack expansion.
if (!T->containsUnexpandedParameterPack()) {
- Diag(D.getEllipsisLoc(),
+ S.Diag(D.getEllipsisLoc(),
diag::err_function_parameter_pack_without_parameter_packs)
<< T << D.getSourceRange();
D.setEllipsisLoc(SourceLocation());
@@ -2212,14 +2457,15 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// it expands those parameter packs.
if (T->containsUnexpandedParameterPack())
T = Context.getPackExpansionType(T, llvm::Optional<unsigned>());
- else if (!getLangOptions().CPlusPlus0x)
- Diag(D.getEllipsisLoc(), diag::ext_variadic_templates);
+ else if (!LangOpts.CPlusPlus0x)
+ S.Diag(D.getEllipsisLoc(), diag::ext_variadic_templates);
break;
case Declarator::FileContext:
case Declarator::KNRTypeListContext:
case Declarator::ObjCPrototypeContext: // FIXME: special diagnostic here?
case Declarator::TypeNameContext:
+ case Declarator::CXXNewContext:
case Declarator::AliasDeclContext:
case Declarator::AliasTemplateContext:
case Declarator::MemberContext:
@@ -2227,11 +2473,12 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
case Declarator::ForContext:
case Declarator::ConditionContext:
case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
case Declarator::BlockLiteralContext:
case Declarator::TemplateTypeArgContext:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
- Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter);
+ S.Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter);
D.setEllipsisLoc(SourceLocation());
break;
}
@@ -2241,7 +2488,138 @@ TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
return Context.getNullTypeSourceInfo();
else if (D.isInvalidType())
return Context.getTrivialTypeSourceInfo(T);
- return GetTypeSourceInfoForDeclarator(D, T, ReturnTypeInfo);
+
+ return S.GetTypeSourceInfoForDeclarator(D, T, TInfo);
+}
+
+/// GetTypeForDeclarator - Convert the type for the specified
+/// declarator to Type instances.
+///
+/// The result of this call will never be null, but the associated
+/// type may be a null type if there's an unrecoverable error.
+TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
+ // Determine the type of the declarator. Not all forms of declarator
+ // have a type.
+
+ TypeProcessingState state(*this, D);
+
+ TypeSourceInfo *ReturnTypeInfo = 0;
+ QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
+ if (T.isNull())
+ return Context.getNullTypeSourceInfo();
+
+ if (D.isPrototypeContext() && getLangOptions().ObjCAutoRefCount)
+ inferARCWriteback(state, T);
+
+ return GetFullTypeForDeclarator(state, T, ReturnTypeInfo);
+}
+
+static void transferARCOwnershipToDeclSpec(Sema &S,
+ QualType &declSpecTy,
+ Qualifiers::ObjCLifetime ownership) {
+ if (declSpecTy->isObjCRetainableType() &&
+ declSpecTy.getObjCLifetime() == Qualifiers::OCL_None) {
+ Qualifiers qs;
+ qs.addObjCLifetime(ownership);
+ declSpecTy = S.Context.getQualifiedType(declSpecTy, qs);
+ }
+}
+
+static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
+ Qualifiers::ObjCLifetime ownership,
+ unsigned chunkIndex) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ // Look for an explicit lifetime attribute.
+ DeclaratorChunk &chunk = D.getTypeObject(chunkIndex);
+ for (const AttributeList *attr = chunk.getAttrs(); attr;
+ attr = attr->getNext())
+ if (attr->getKind() == AttributeList::AT_objc_ownership)
+ return;
+
+ const char *attrStr = 0;
+ switch (ownership) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); break;
+ case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break;
+ case Qualifiers::OCL_Strong: attrStr = "strong"; break;
+ case Qualifiers::OCL_Weak: attrStr = "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: attrStr = "autoreleasing"; break;
+ }
+
+ // If there wasn't one, add one (with an invalid source location
+ // so that we don't make an AttributedType for it).
+ AttributeList *attr = D.getAttributePool()
+ .create(&S.Context.Idents.get("objc_ownership"), SourceLocation(),
+ /*scope*/ 0, SourceLocation(),
+ &S.Context.Idents.get(attrStr), SourceLocation(),
+ /*args*/ 0, 0,
+ /*declspec*/ false, /*C++0x*/ false);
+ spliceAttrIntoList(*attr, chunk.getAttrListRef());
+
+ // TODO: mark whether we did this inference?
+}
+
+static void transferARCOwnership(TypeProcessingState &state,
+ QualType &declSpecTy,
+ Qualifiers::ObjCLifetime ownership) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ int inner = -1;
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Paren:
+ // Ignore parens.
+ break;
+
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Pointer:
+ inner = i;
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ return transferARCOwnershipToDeclaratorChunk(state, ownership, i);
+
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ return;
+ }
+ }
+
+ if (inner == -1)
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+
+ DeclaratorChunk &chunk = D.getTypeObject(inner);
+ if (chunk.Kind == DeclaratorChunk::Pointer) {
+ if (declSpecTy->isObjCRetainableType())
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ if (declSpecTy->isObjCObjectType())
+ return transferARCOwnershipToDeclaratorChunk(state, ownership, inner);
+ } else {
+ assert(chunk.Kind == DeclaratorChunk::Array ||
+ chunk.Kind == DeclaratorChunk::Reference);
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ }
+}
+
+TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
+ TypeProcessingState state(*this, D);
+
+ TypeSourceInfo *ReturnTypeInfo = 0;
+ QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
+ if (declSpecTy.isNull())
+ return Context.getNullTypeSourceInfo();
+
+ if (getLangOptions().ObjCAutoRefCount) {
+ Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
+ if (ownership != Qualifiers::OCL_None)
+ transferARCOwnership(state, declSpecTy, ownership);
+ }
+
+ return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo);
}
/// Map an AttributedType::Kind to an AttributeList::Kind.
@@ -2259,6 +2637,8 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
return AttributeList::AT_neon_polyvector_type;
case AttributedType::attr_objc_gc:
return AttributeList::AT_objc_gc;
+ case AttributedType::attr_objc_ownership:
+ return AttributeList::AT_objc_ownership;
case AttributedType::attr_noreturn:
return AttributeList::AT_noreturn;
case AttributedType::attr_cdecl:
@@ -2489,6 +2869,9 @@ namespace {
llvm_unreachable("qualified type locs not expected here!");
}
+ void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ fillAttributedTypeLoc(TL, Chunk.getAttrs());
+ }
void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
assert(Chunk.Kind == DeclaratorChunk::BlockPointer);
TL.setCaretLoc(Chunk.Loc);
@@ -2657,8 +3040,7 @@ TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
// the parser.
assert(D.getIdentifier() == 0 && "Type name should have no identifier!");
- TagDecl *OwnedTag = 0;
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedTag);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
if (D.isInvalidType())
return true;
@@ -2666,26 +3048,11 @@ TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
if (getLangOptions().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
-
- // C++0x [dcl.type]p3:
- // A type-specifier-seq shall not define a class or enumeration unless
- // it appears in the type-id of an alias-declaration (7.1.3) that is not
- // the declaration of a template-declaration.
- if (OwnedTag && OwnedTag->isDefinition()) {
- if (D.getContext() == Declarator::AliasTemplateContext)
- Diag(OwnedTag->getLocation(), diag::err_type_defined_in_alias_template)
- << Context.getTypeDeclType(OwnedTag);
- else if (D.getContext() != Declarator::AliasDeclContext)
- Diag(OwnedTag->getLocation(), diag::err_type_defined_in_type_specifier)
- << Context.getTypeDeclType(OwnedTag);
- }
}
return CreateParsedType(T, TInfo);
}
-
-
//===----------------------------------------------------------------------===//
// Type Attribute Processing
//===----------------------------------------------------------------------===//
@@ -2744,6 +3111,99 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
}
+/// handleObjCOwnershipTypeAttr - Process an objc_ownership
+/// attribute on the specified type.
+///
+/// Returns 'true' if the attribute was handled.
+static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ if (!type->isObjCRetainableType() && !type->isDependentType())
+ return false;
+
+ Sema &S = state.getSema();
+
+ if (type.getQualifiers().getObjCLifetime()) {
+ S.Diag(attr.getLoc(), diag::err_attr_objc_ownership_redundant)
+ << type;
+ return true;
+ }
+
+ if (!attr.getParameterName()) {
+ S.Diag(attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "objc_ownership" << 1;
+ attr.setInvalid();
+ return true;
+ }
+
+ Qualifiers::ObjCLifetime lifetime;
+ if (attr.getParameterName()->isStr("none"))
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ else if (attr.getParameterName()->isStr("strong"))
+ lifetime = Qualifiers::OCL_Strong;
+ else if (attr.getParameterName()->isStr("weak"))
+ lifetime = Qualifiers::OCL_Weak;
+ else if (attr.getParameterName()->isStr("autoreleasing"))
+ lifetime = Qualifiers::OCL_Autoreleasing;
+ else {
+ S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "objc_ownership" << attr.getParameterName();
+ attr.setInvalid();
+ return true;
+ }
+
+ // Consume lifetime attributes without further comment outside of
+ // ARC mode.
+ if (!S.getLangOptions().ObjCAutoRefCount)
+ return true;
+
+ Qualifiers qs;
+ qs.setObjCLifetime(lifetime);
+ QualType origType = type;
+ type = S.Context.getQualifiedType(type, qs);
+
+ // If we have a valid source location for the attribute, use an
+ // AttributedType instead.
+ if (attr.getLoc().isValid())
+ type = S.Context.getAttributedType(AttributedType::attr_objc_ownership,
+ origType, type);
+
+ // Forbid __weak if the runtime doesn't support it.
+ if (lifetime == Qualifiers::OCL_Weak &&
+ !S.getLangOptions().ObjCRuntimeHasWeak) {
+
+ // Actually, delay this until we know what we're parsing.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(attr.getLoc(),
+ diag::err_arc_weak_no_runtime, type, /*ignored*/ 0));
+ } else {
+ S.Diag(attr.getLoc(), diag::err_arc_weak_no_runtime);
+ }
+
+ attr.setInvalid();
+ return true;
+ }
+
+ // Forbid __weak for class objects marked as
+ // objc_arc_weak_reference_unavailable
+ if (lifetime == Qualifiers::OCL_Weak) {
+ QualType T = type;
+ while (const PointerType *ptr = T->getAs<PointerType>())
+ T = ptr->getPointeeType();
+ if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
+ if (Class->isArcWeakrefUnavailable()) {
+ S.Diag(attr.getLoc(), diag::err_arc_unsupported_weak_class);
+ S.Diag(ObjT->getInterfaceDecl()->getLocation(),
+ diag::note_class_declared);
+ }
+ }
+ }
+
+ return true;
+}
+
/// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type
/// attribute on the specified type. Returns true to indicate that
/// the attribute was handled, false to indicate that the type does
@@ -2954,6 +3414,23 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state,
return true;
}
+ // ns_returns_retained is not always a type attribute, but if we got
+ // here, we're treating it as one right now.
+ if (attr.getKind() == AttributeList::AT_ns_returns_retained) {
+ assert(S.getLangOptions().ObjCAutoRefCount &&
+ "ns_returns_retained treated as type attribute in non-ARC");
+ if (attr.getNumArgs()) return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ FunctionType::ExtInfo EI
+ = unwrapped.get()->getExtInfo().withProducesResult(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
if (attr.getKind() == AttributeList::AT_regparm) {
unsigned value;
if (S.CheckRegparmAttr(attr, value))
@@ -3127,6 +3604,40 @@ static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
VectorType::GenericVector);
}
+/// \brief Process the OpenCL-like ext_vector_type attribute when it occurs on
+/// a type.
+static void HandleExtVectorTypeAttr(QualType &CurType,
+ const AttributeList &Attr,
+ Sema &S) {
+ Expr *sizeExpr;
+
+ // Special case where the argument is a template id.
+ if (Attr.getParameterName()) {
+ CXXScopeSpec SS;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
+
+ ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, id, false,
+ false);
+ if (Size.isInvalid())
+ return;
+
+ sizeExpr = Size.get();
+ } else {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ sizeExpr = Attr.getArg(0);
+ }
+
+ // Create the vector type.
+ QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc());
+ if (!T.isNull())
+ CurType = T;
+}
+
/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
/// "neon_polyvector_type" attributes are used to create vector types that
/// are mangled according to ARM's ABI. Otherwise, these types are identical
@@ -3217,6 +3728,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case AttributeList::AT_vector_size:
HandleVectorSizeAttr(type, attr, state.getSema());
break;
+ case AttributeList::AT_ext_vector_type:
+ if (state.getDeclarator().getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_typedef)
+ HandleExtVectorTypeAttr(type, attr, state.getSema());
+ break;
case AttributeList::AT_neon_vector_type:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
VectorType::NeonVector, "neon_vector_type");
@@ -3226,11 +3742,15 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
VectorType::NeonPolyVector,
"neon_polyvector_type");
break;
-
case AttributeList::AT_opencl_image_access:
HandleOpenCLImageAccessAttribute(type, attr, state.getSema());
break;
+ case AttributeList::AT_ns_returns_retained:
+ if (!state.getSema().getLangOptions().ObjCAutoRefCount)
+ break;
+ // fallthrough into the function attrs
+
FUNCTION_TYPE_ATTRS_CASELIST:
// Never process function type attributes as part of the
// declaration-specifiers.
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index ff2e46a9026b..fa87217821aa 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -1192,6 +1192,16 @@ public:
Body);
}
+ /// \brief Build a new Objective-C @autoreleasepool statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAutoreleasePoolStmt(SourceLocation AtLoc,
+ Stmt *Body) {
+ return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
+ }
+
+
/// \brief Build a new Objective-C fast enumeration statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -1537,9 +1547,9 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildInitList(SourceLocation LBraceLoc,
- MultiExprArg Inits,
- SourceLocation RBraceLoc,
- QualType ResultTy) {
+ MultiExprArg Inits,
+ SourceLocation RBraceLoc,
+ QualType ResultTy) {
ExprResult Result
= SemaRef.ActOnInitList(LBraceLoc, move(Inits), RBraceLoc);
if (Result.isInvalid() || ResultTy->isDependentType())
@@ -1856,8 +1866,9 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXThrowExpr(SourceLocation ThrowLoc, Expr *Sub) {
- return getSema().ActOnCXXThrow(ThrowLoc, Sub);
+ ExprResult RebuildCXXThrowExpr(SourceLocation ThrowLoc, Expr *Sub,
+ bool IsThrownVariableInScope) {
+ return getSema().BuildCXXThrow(ThrowLoc, Sub, IsThrownVariableInScope);
}
/// \brief Build a new C++ default-argument expression.
@@ -2466,6 +2477,10 @@ bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
Outputs.push_back(Out.get());
continue;
}
+
+ // Record right away that the argument was changed. This needs
+ // to happen even if the array expands to nothing.
+ if (ArgChanged) *ArgChanged = true;
// The transform has determined that we should perform an elementwise
// expansion of the pattern. Do so.
@@ -2482,8 +2497,6 @@ bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
return true;
}
- if (ArgChanged)
- *ArgChanged = true;
Outputs.push_back(Out.get());
}
@@ -3162,6 +3175,38 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
if (Result->isFunctionType() || Result->isReferenceType())
return Result;
+ // Suppress Objective-C lifetime qualifiers if they don't make sense for the
+ // resulting type.
+ if (Quals.hasObjCLifetime()) {
+ if (!Result->isObjCLifetimeType() && !Result->isDependentType())
+ Quals.removeObjCLifetime();
+ else if (Result.getObjCLifetime()) {
+ // Objective-C ARC:
+ // A lifetime qualifier applied to a substituted template parameter
+ // overrides the lifetime qualifier from the template argument.
+ if (const SubstTemplateTypeParmType *SubstTypeParam
+ = dyn_cast<SubstTemplateTypeParmType>(Result)) {
+ QualType Replacement = SubstTypeParam->getReplacementType();
+ Qualifiers Qs = Replacement.getQualifiers();
+ Qs.removeObjCLifetime();
+ Replacement
+ = SemaRef.Context.getQualifiedType(Replacement.getUnqualifiedType(),
+ Qs);
+ Result = SemaRef.Context.getSubstTemplateTypeParmType(
+ SubstTypeParam->getReplacedParameter(),
+ Replacement);
+ TLB.TypeWasModifiedSafely(Result);
+ } else {
+ // Otherwise, complain about the addition of a qualifier to an
+ // already-qualified type.
+ SourceRange R = TLB.getTemporaryTypeLoc(Result).getSourceRange();
+ SemaRef.Diag(R.getBegin(), diag::err_attr_objc_ownership_redundant)
+ << Result << R;
+
+ Quals.removeObjCLifetime();
+ }
+ }
+ }
if (!Quals.empty()) {
Result = SemaRef.BuildQualifiedType(Result, T.getBeginLoc(), Quals);
TLB.push<QualifiedTypeLoc>(Result);
@@ -3333,7 +3378,11 @@ QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
if (Result.isNull())
return QualType();
}
-
+
+ // Objective-C ARC can add lifetime qualifiers to the type that we're
+ // pointing to.
+ TLB.TypeWasModifiedSafely(Result->getPointeeType());
+
PointerTypeLoc NewT = TLB.push<PointerTypeLoc>(Result);
NewT.setSigilLoc(TL.getSigilLoc());
return Result;
@@ -3387,6 +3436,11 @@ TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB,
return QualType();
}
+ // Objective-C ARC can add lifetime qualifiers to the type that we're
+ // referring to.
+ TLB.TypeWasModifiedSafely(
+ Result->getAs<ReferenceType>()->getPointeeTypeAsWritten());
+
// r-value references can be rebuilt as l-value references.
ReferenceTypeLoc NewTL;
if (isa<LValueReferenceType>(Result))
@@ -5435,6 +5489,25 @@ TreeTransform<Derived>::TransformObjCAtSynchronizedStmt(
template<typename Derived>
StmtResult
+TreeTransform<Derived>::TransformObjCAutoreleasePoolStmt(
+ ObjCAutoreleasePoolStmt *S) {
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getSubStmt());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Body.get() == S->getSubStmt())
+ return SemaRef.Owned(S);
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAutoreleasePoolStmt(
+ S->getAtLoc(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
TreeTransform<Derived>::TransformObjCForCollectionStmt(
ObjCForCollectionStmt *S) {
// Transform the element statement.
@@ -6721,7 +6794,8 @@ TreeTransform<Derived>::TransformCXXThrowExpr(CXXThrowExpr *E) {
SubExpr.get() == E->getSubExpr())
return SemaRef.Owned(E);
- return getDerived().RebuildCXXThrowExpr(E->getThrowLoc(), SubExpr.get());
+ return getDerived().RebuildCXXThrowExpr(E->getThrowLoc(), SubExpr.get(),
+ E->isThrownVariableInScope());
}
template<typename Derived>
@@ -6867,9 +6941,13 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
AllocType,
AllocTypeInfo,
ArraySize.get(),
- /*FIXME:*/E->getLocStart(),
+ /*FIXME:*/E->hasInitializer()
+ ? E->getLocStart()
+ : SourceLocation(),
move_arg(ConstructorArgs),
- E->getLocEnd());
+ /*FIXME:*/E->hasInitializer()
+ ? E->getLocEnd()
+ : SourceLocation());
}
template<typename Derived>
@@ -7587,6 +7665,21 @@ TreeTransform<Derived>::TransformSubstNonTypeTemplateParmPackExpr(
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
+ MaterializeTemporaryExpr *E) {
+ return getDerived().TransformExpr(E->GetTemporaryExpr());
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) {
return SemaRef.Owned(E);
}
@@ -7609,6 +7702,43 @@ TreeTransform<Derived>::TransformObjCEncodeExpr(ObjCEncodeExpr *E) {
}
template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ ExprResult result = getDerived().TransformExpr(E->getSubExpr());
+ if (result.isInvalid()) return ExprError();
+ Expr *subExpr = result.take();
+
+ if (!getDerived().AlwaysRebuild() &&
+ subExpr == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return SemaRef.Owned(new(SemaRef.Context)
+ ObjCIndirectCopyRestoreExpr(subExpr, E->getType(), E->shouldCopy()));
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ TypeSourceInfo *TSInfo
+ = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!TSInfo)
+ return ExprError();
+
+ ExprResult Result = getDerived().TransformExpr(E->getSubExpr());
+ if (Result.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TSInfo == E->getTypeInfoAsWritten() &&
+ Result.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return SemaRef.BuildObjCBridgedCast(E->getLParenLoc(), E->getBridgeKind(),
+ E->getBridgeKeywordLoc(), TSInfo,
+ Result.get());
+}
+
+template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
// Transform arguments.
@@ -8227,11 +8357,24 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
return SemaRef.CreateOverloadedUnaryOp(OpLoc, Opc, Functions, First);
}
- if (Op == OO_Subscript)
- return SemaRef.CreateOverloadedArraySubscriptExpr(Callee->getLocStart(),
- OpLoc,
- First,
- Second);
+ if (Op == OO_Subscript) {
+ SourceLocation LBrace;
+ SourceLocation RBrace;
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
+ DeclarationNameLoc &NameLoc = DRE->getNameInfo().getInfo();
+ LBrace = SourceLocation::getFromRawEncoding(
+ NameLoc.CXXOperatorName.BeginOpNameLoc);
+ RBrace = SourceLocation::getFromRawEncoding(
+ NameLoc.CXXOperatorName.EndOpNameLoc);
+ } else {
+ LBrace = Callee->getLocStart();
+ RBrace = OpLoc;
+ }
+
+ return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace,
+ First, Second);
+ }
// Create the overloaded operator invocation for binary operators.
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
diff --git a/lib/Sema/TypeLocBuilder.h b/lib/Sema/TypeLocBuilder.h
index 3570737d11bc..7a5e43e25dc9 100644
--- a/lib/Sema/TypeLocBuilder.h
+++ b/lib/Sema/TypeLocBuilder.h
@@ -16,7 +16,6 @@
#define LLVM_CLANG_SEMA_TYPELOCBUILDER_H
#include "clang/AST/TypeLoc.h"
-#include "llvm/ADT/SmallVector.h"
#include "clang/AST/ASTContext.h"
namespace clang {
@@ -87,6 +86,14 @@ class TypeLocBuilder {
Index = Capacity;
}
+ /// \brief Tell the TypeLocBuilder that the type it is storing has been
+ /// modified in some safe way that doesn't affect type-location information.
+ void TypeWasModifiedSafely(QualType T) {
+#ifndef NDEBUG
+ LastTy = T;
+#endif
+ }
+
/// Pushes space for a new TypeLoc of the given type. Invalidates
/// any TypeLocs previously retrieved from this builder.
template <class TyLocType> TyLocType push(QualType T) {
@@ -139,7 +146,7 @@ private:
Index -= LocalSize;
- return getTypeLoc(T);
+ return getTemporaryTypeLoc(T);
}
/// Grow to the given capacity.
@@ -171,15 +178,17 @@ private:
reserve(Size);
Index -= Size;
- return getTypeLoc(T);
+ return getTemporaryTypeLoc(T);
}
-
- // This is private because, when we kill off TypeSourceInfo in favor
- // of TypeLoc, we'll want an interface that creates a TypeLoc given
- // an ASTContext, and we don't want people to think they can just
- // use this as an equivalent.
- TypeLoc getTypeLoc(QualType T) {
+public:
+ /// \brief Retrieve a temporary TypeLoc that refers into this \c TypeLocBuilder
+ /// object.
+ ///
+ /// The resulting \c TypeLoc should only be used so long as the
+ /// \c TypeLocBuilder is active and has not had more type information
+ /// pushed into it.
+ TypeLoc getTemporaryTypeLoc(QualType T) {
#ifndef NDEBUG
assert(LastTy == T && "type doesn't match last type pushed!");
#endif
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 8fb20d22b7de..a4ed5f4da48c 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -147,9 +147,11 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_IMPORTANT(OpenCL, diag::warn_pch_opencl);
PARSE_LANGOPT_IMPORTANT(CUDA, diag::warn_pch_cuda);
PARSE_LANGOPT_BENIGN(CatchUndefined);
+ PARSE_LANGOPT_BENIGN(DefaultFPContract);
PARSE_LANGOPT_IMPORTANT(ElideConstructors, diag::warn_pch_elide_constructors);
PARSE_LANGOPT_BENIGN(SpellChecking);
- PARSE_LANGOPT_BENIGN(DefaultFPContract);
+ PARSE_LANGOPT_IMPORTANT(ObjCAutoRefCount, diag::warn_pch_auto_ref_count);
+ PARSE_LANGOPT_BENIGN(ObjCInferRelatedReturnType);
#undef PARSE_LANGOPT_IMPORTANT
#undef PARSE_LANGOPT_BENIGN
@@ -655,7 +657,8 @@ public:
// and associate it with the persistent ID.
IdentifierInfo *II = KnownII;
if (!II)
- II = &Reader.getIdentifierTable().getOwn(k.first, k.first + k.second);
+ II = &Reader.getIdentifierTable().getOwn(llvm::StringRef(k.first,
+ k.second));
Reader.SetIdentifierInfo(ID, II);
II->setIsFromAST();
return II;
@@ -682,7 +685,8 @@ public:
// the new IdentifierInfo.
IdentifierInfo *II = KnownII;
if (!II)
- II = &Reader.getIdentifierTable().getOwn(k.first, k.first + k.second);
+ II = &Reader.getIdentifierTable().getOwn(llvm::StringRef(k.first,
+ k.second));
Reader.SetIdentifierInfo(ID, II);
// Set or check the various bits in the IdentifierInfo structure.
@@ -999,8 +1003,7 @@ bool ASTReader::ParseLineTable(PerFileData &F,
std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen);
Idx += FilenameLen;
MaybeAddSystemRootToFilename(Filename);
- FileIDs[I] = LineTable.getLineTableFilenameID(Filename.c_str(),
- Filename.size());
+ FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
}
// Parse the line entries
@@ -1192,7 +1195,7 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(PerFileData &F) {
case SM_SLOC_FILE_ENTRY:
case SM_SLOC_BUFFER_ENTRY:
- case SM_SLOC_INSTANTIATION_ENTRY:
+ case SM_SLOC_EXPANSION_ENTRY:
// Once we hit one of the source location entries, we're done.
return Success;
}
@@ -1362,7 +1365,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(unsigned ID) {
break;
}
- case SM_SLOC_INSTANTIATION_ENTRY: {
+ case SM_SLOC_EXPANSION_ENTRY: {
SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
SourceMgr.createInstantiationLoc(SpellingLoc,
ReadSourceLocation(*F, Record[2]),
@@ -1553,17 +1556,17 @@ PreprocessedEntity *ASTReader::LoadPreprocessedEntity(PerFileData &F) {
(PreprocessorDetailRecordTypes)F.PreprocessorDetailCursor.ReadRecord(
Code, Record, BlobStart, BlobLen);
switch (RecType) {
- case PPD_MACRO_INSTANTIATION: {
+ case PPD_MACRO_EXPANSION: {
if (PreprocessedEntity *PE = PPRec.getPreprocessedEntity(Record[0]))
return PE;
- MacroInstantiation *MI
- = new (PPRec) MacroInstantiation(DecodeIdentifierInfo(Record[3]),
+ MacroExpansion *ME =
+ new (PPRec) MacroExpansion(DecodeIdentifierInfo(Record[3]),
SourceRange(ReadSourceLocation(F, Record[1]),
ReadSourceLocation(F, Record[2])),
- getMacroDefinition(Record[4]));
- PPRec.SetPreallocatedEntity(Record[0], MI);
- return MI;
+ getMacroDefinition(Record[4]));
+ PPRec.SetPreallocatedEntity(Record[0], ME);
+ return ME;
}
case PPD_MACRO_DEFINITION: {
@@ -2366,6 +2369,15 @@ ASTReader::ReadASTBlock(PerFileData &F) {
TentativeDefinitions.insert(TentativeDefinitions.end(),
Record.begin(), Record.end());
break;
+
+ case KNOWN_NAMESPACES:
+ // Optimization for the first block.
+ if (KnownNamespaces.empty())
+ KnownNamespaces.swap(Record);
+ else
+ KnownNamespaces.insert(KnownNamespaces.end(),
+ Record.begin(), Record.end());
+ break;
}
First = false;
}
@@ -2980,6 +2992,7 @@ bool ASTReader::ParseLanguageOptions(
PARSE_LANGOPT(ElideConstructors);
PARSE_LANGOPT(SpellChecking);
PARSE_LANGOPT(MRTD);
+ PARSE_LANGOPT(ObjCAutoRefCount);
#undef PARSE_LANGOPT
return Listener->ReadLanguageOptions(LangOpts);
@@ -3225,12 +3238,13 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
}
case TYPE_FUNCTION_NO_PROTO: {
- if (Record.size() != 5) {
+ if (Record.size() != 6) {
Error("incorrect encoding of no-proto function type");
return QualType();
}
QualType ResultType = GetType(Record[0]);
- FunctionType::ExtInfo Info(Record[1], Record[2], Record[3], (CallingConv)Record[4]);
+ FunctionType::ExtInfo Info(Record[1], Record[2], Record[3],
+ (CallingConv)Record[4], Record[5]);
return Context->getFunctionNoProtoType(ResultType, Info);
}
@@ -3241,9 +3255,10 @@ QualType ASTReader::ReadTypeRecord(unsigned Index) {
EPI.ExtInfo = FunctionType::ExtInfo(/*noreturn*/ Record[1],
/*hasregparm*/ Record[2],
/*regparm*/ Record[3],
- static_cast<CallingConv>(Record[4]));
+ static_cast<CallingConv>(Record[4]),
+ /*produces*/ Record[5]);
- unsigned Idx = 5;
+ unsigned Idx = 6;
unsigned NumParams = Record[Idx++];
llvm::SmallVector<QualType, 16> ParamTypes;
for (unsigned I = 0; I != NumParams; ++I)
@@ -3993,12 +4008,9 @@ Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
llvm_unreachable("Broken chain");
}
-bool ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
+ExternalLoadResult ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
bool (*isKindWeWant)(Decl::Kind),
llvm::SmallVectorImpl<Decl*> &Decls) {
- assert(DC->hasExternalLexicalStorage() &&
- "DeclContext has no lexical decls in storage");
-
// There might be lexical decls in multiple parts of the chain, for the TU
// at least.
// DeclContextOffsets might reallocate as we load additional decls below,
@@ -4023,7 +4035,7 @@ bool ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
}
++NumLexicalDeclContextsRead;
- return false;
+ return ELR_Success;
}
DeclContext::lookup_result
@@ -4446,6 +4458,17 @@ ASTReader::ReadMethodPool(Selector Sel) {
return std::pair<ObjCMethodList, ObjCMethodList>();
}
+void ASTReader::ReadKnownNamespaces(
+ llvm::SmallVectorImpl<NamespaceDecl *> &Namespaces) {
+ Namespaces.clear();
+
+ for (unsigned I = 0, N = KnownNamespaces.size(); I != N; ++I) {
+ if (NamespaceDecl *Namespace
+ = dyn_cast_or_null<NamespaceDecl>(GetDecl(KnownNamespaces[I])))
+ Namespaces.push_back(Namespace);
+ }
+}
+
void ASTReader::LoadSelector(Selector Sel) {
// It would be complicated to avoid reading the methods anyway. So don't.
ReadMethodPool(Sel);
@@ -4540,7 +4563,7 @@ IdentifierInfo *ASTReader::DecodeIdentifierInfo(unsigned ID) {
unsigned StrLen = (((unsigned) StrLenPtr[0])
| (((unsigned) StrLenPtr[1]) << 8)) - 1;
IdentifiersLoaded[ID]
- = &PP->getIdentifierTable().get(Str, StrLen);
+ = &PP->getIdentifierTable().get(llvm::StringRef(Str, StrLen));
if (DeserializationListener)
DeserializationListener->IdentifierRead(ID + 1, IdentifiersLoaded[ID]);
}
@@ -4716,6 +4739,14 @@ ASTReader::ReadTemplateName(PerFileData &F, const RecordData &Record,
return Context->getDependentTemplateName(NNS,
(OverloadedOperatorKind)Record[Idx++]);
}
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ TemplateTemplateParmDecl *param
+ = cast_or_null<TemplateTemplateParmDecl>(GetDecl(Record[Idx++]));
+ if (!param) return TemplateName();
+ TemplateName replacement = ReadTemplateName(F, Record, Idx);
+ return Context->getSubstTemplateTemplateParm(param, replacement);
+ }
case TemplateName::SubstTemplateTemplateParmPack: {
TemplateTemplateParmDecl *Param
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index fab2069bc5f4..24ab544dcd1c 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -710,6 +710,7 @@ void ASTDeclReader::VisitVarDecl(VarDecl *VD) {
VD->VarDeclBits.ExceptionVar = Record[Idx++];
VD->VarDeclBits.NRVOVariable = Record[Idx++];
VD->VarDeclBits.CXXForRangeDecl = Record[Idx++];
+ VD->VarDeclBits.ARCPseudoStrong = Record[Idx++];
if (Record[Idx++])
VD->setInit(Reader.ReadExpr(F));
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index f3f67a76c4d4..14927b9e3bc8 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -60,136 +60,16 @@ namespace clang {
/// \brief The number of record fields required for the Expr class
/// itself.
- static const unsigned NumExprFields = NumStmtFields + 6;
+ static const unsigned NumExprFields = NumStmtFields + 7;
/// \brief Read and initialize a ExplicitTemplateArgumentList structure.
void ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList,
unsigned NumTemplateArgs);
void VisitStmt(Stmt *S);
- void VisitNullStmt(NullStmt *S);
- void VisitCompoundStmt(CompoundStmt *S);
- void VisitSwitchCase(SwitchCase *S);
- void VisitCaseStmt(CaseStmt *S);
- void VisitDefaultStmt(DefaultStmt *S);
- void VisitLabelStmt(LabelStmt *S);
- void VisitIfStmt(IfStmt *S);
- void VisitSwitchStmt(SwitchStmt *S);
- void VisitWhileStmt(WhileStmt *S);
- void VisitDoStmt(DoStmt *S);
- void VisitForStmt(ForStmt *S);
- void VisitGotoStmt(GotoStmt *S);
- void VisitIndirectGotoStmt(IndirectGotoStmt *S);
- void VisitContinueStmt(ContinueStmt *S);
- void VisitBreakStmt(BreakStmt *S);
- void VisitReturnStmt(ReturnStmt *S);
- void VisitDeclStmt(DeclStmt *S);
- void VisitAsmStmt(AsmStmt *S);
- void VisitExpr(Expr *E);
- void VisitPredefinedExpr(PredefinedExpr *E);
- void VisitDeclRefExpr(DeclRefExpr *E);
- void VisitIntegerLiteral(IntegerLiteral *E);
- void VisitFloatingLiteral(FloatingLiteral *E);
- void VisitImaginaryLiteral(ImaginaryLiteral *E);
- void VisitStringLiteral(StringLiteral *E);
- void VisitCharacterLiteral(CharacterLiteral *E);
- void VisitParenExpr(ParenExpr *E);
- void VisitParenListExpr(ParenListExpr *E);
- void VisitUnaryOperator(UnaryOperator *E);
- void VisitOffsetOfExpr(OffsetOfExpr *E);
- void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
- void VisitArraySubscriptExpr(ArraySubscriptExpr *E);
- void VisitCallExpr(CallExpr *E);
- void VisitMemberExpr(MemberExpr *E);
- void VisitCastExpr(CastExpr *E);
- void VisitBinaryOperator(BinaryOperator *E);
- void VisitCompoundAssignOperator(CompoundAssignOperator *E);
- void VisitConditionalOperator(ConditionalOperator *E);
- void VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
- void VisitImplicitCastExpr(ImplicitCastExpr *E);
- void VisitExplicitCastExpr(ExplicitCastExpr *E);
- void VisitCStyleCastExpr(CStyleCastExpr *E);
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
- void VisitExtVectorElementExpr(ExtVectorElementExpr *E);
- void VisitInitListExpr(InitListExpr *E);
- void VisitDesignatedInitExpr(DesignatedInitExpr *E);
- void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
- void VisitVAArgExpr(VAArgExpr *E);
- void VisitAddrLabelExpr(AddrLabelExpr *E);
- void VisitStmtExpr(StmtExpr *E);
- void VisitChooseExpr(ChooseExpr *E);
- void VisitGNUNullExpr(GNUNullExpr *E);
- void VisitShuffleVectorExpr(ShuffleVectorExpr *E);
- void VisitBlockExpr(BlockExpr *E);
- void VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
- void VisitGenericSelectionExpr(GenericSelectionExpr *E);
- void VisitObjCStringLiteral(ObjCStringLiteral *E);
- void VisitObjCEncodeExpr(ObjCEncodeExpr *E);
- void VisitObjCSelectorExpr(ObjCSelectorExpr *E);
- void VisitObjCProtocolExpr(ObjCProtocolExpr *E);
- void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
- void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCMessageExpr(ObjCMessageExpr *E);
- void VisitObjCIsaExpr(ObjCIsaExpr *E);
-
- void VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
- void VisitObjCAtCatchStmt(ObjCAtCatchStmt *);
- void VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *);
- void VisitObjCAtTryStmt(ObjCAtTryStmt *);
- void VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *);
- void VisitObjCAtThrowStmt(ObjCAtThrowStmt *);
-
- // C++ Statements
- void VisitCXXCatchStmt(CXXCatchStmt *S);
- void VisitCXXTryStmt(CXXTryStmt *S);
- void VisitCXXForRangeStmt(CXXForRangeStmt *);
-
- void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E);
- void VisitCXXConstructExpr(CXXConstructExpr *E);
- void VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E);
- void VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
- void VisitCXXStaticCastExpr(CXXStaticCastExpr *E);
- void VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E);
- void VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E);
- void VisitCXXConstCastExpr(CXXConstCastExpr *E);
- void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E);
- void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
- void VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
- void VisitCXXTypeidExpr(CXXTypeidExpr *E);
- void VisitCXXUuidofExpr(CXXUuidofExpr *E);
- void VisitCXXThisExpr(CXXThisExpr *E);
- void VisitCXXThrowExpr(CXXThrowExpr *E);
- void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
- void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
-
- void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
- void VisitCXXNewExpr(CXXNewExpr *E);
- void VisitCXXDeleteExpr(CXXDeleteExpr *E);
- void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
-
- void VisitExprWithCleanups(ExprWithCleanups *E);
-
- void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
- void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
- void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
-
- void VisitOverloadExpr(OverloadExpr *E);
- void VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
- void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
-
- void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E);
- void VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E);
- void VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
- void VisitExpressionTraitExpr(ExpressionTraitExpr *E);
- void VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
- void VisitPackExpansionExpr(PackExpansionExpr *E);
- void VisitSizeOfPackExpr(SizeOfPackExpr *E);
- void VisitSubstNonTypeTemplateParmPackExpr(
- SubstNonTypeTemplateParmPackExpr *E);
- void VisitOpaqueValueExpr(OpaqueValueExpr *E);
-
- // CUDA Expressions
- void VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E);
+#define STMT(Type, Base) \
+ void Visit##Type(Type *);
+#include "clang/AST/StmtNodes.inc"
};
}
@@ -410,6 +290,7 @@ void ASTStmtReader::VisitExpr(Expr *E) {
E->setType(Reader.GetType(Record[Idx++]));
E->setTypeDependent(Record[Idx++]);
E->setValueDependent(Record[Idx++]);
+ E->setInstantiationDependent(Record[Idx++]);
E->ExprBits.ContainsUnexpandedParameterPack = Record[Idx++];
E->setValueKind(static_cast<ExprValueKind>(Record[Idx++]));
E->setObjectKind(static_cast<ExprObjectKind>(Record[Idx++]));
@@ -589,6 +470,10 @@ void ASTStmtReader::VisitCallExpr(CallExpr *E) {
E->setArg(I, Reader.ReadSubExpr());
}
+void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ VisitCallExpr(E);
+}
+
void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
// Don't call VisitExpr, this is fully initialized at creation.
assert(E->getStmtClass() == Stmt::MemberExprClass &&
@@ -602,6 +487,20 @@ void ASTStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
E->setArrow(Record[Idx++]);
}
+void ASTStmtReader::
+VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ VisitExpr(E);
+ E->Operand = Reader.ReadSubExpr();
+ E->setShouldCopy(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->BridgeKeywordLoc = ReadSourceLocation(Record, Idx);
+ E->Kind = Record[Idx++];
+}
+
void ASTStmtReader::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
unsigned NumBaseSpecs = Record[Idx++];
@@ -929,6 +828,7 @@ void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
assert(Record[Idx] == E->getNumArgs());
++Idx;
+ E->setDelegateInitCall(Record[Idx++]);
ObjCMessageExpr::ReceiverKind Kind
= static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]);
switch (Kind) {
@@ -987,6 +887,12 @@ void ASTStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
S->setAtFinallyLoc(ReadSourceLocation(Record, Idx));
}
+void ASTStmtReader::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S);
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setAtLoc(ReadSourceLocation(Record, Idx));
+}
+
void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
VisitStmt(S);
assert(Record[Idx] == S->getNumCatchStmts());
@@ -1125,18 +1031,6 @@ void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
// typeid(42+2)
E->setExprOperand(Reader.ReadSubExpr());
}
-void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
- VisitExpr(E);
- E->setSourceRange(ReadSourceRange(Record, Idx));
- if (E->isTypeOperand()) { // __uuidof(ComType)
- E->setTypeOperandSourceInfo(
- GetTypeSourceInfo(Record, Idx));
- return;
- }
-
- // __uuidof(expr)
- E->setExprOperand(Reader.ReadSubExpr());
-}
void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
@@ -1146,8 +1040,9 @@ void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
- E->setThrowLoc(ReadSourceLocation(Record, Idx));
- E->setSubExpr(Reader.ReadSubExpr());
+ E->ThrowLoc = ReadSourceLocation(Record, Idx);
+ E->Op = Reader.ReadSubExpr();
+ E->IsThrownVariableInScope = Record[Idx++];
}
void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
@@ -1388,6 +1283,15 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
E->Pack = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
}
+void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ VisitExpr(E);
+ E->Param
+ = cast_or_null<NonTypeTemplateParmDecl>(Reader.GetDecl(Record[Idx++]));
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+ E->Replacement = Reader.ReadSubExpr();
+}
+
void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E) {
VisitExpr(E);
@@ -1402,6 +1306,11 @@ void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
E->NameLoc = ReadSourceLocation(Record, Idx);
}
+void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ VisitExpr(E);
+ E->Temporary = Reader.ReadSubExpr();
+}
+
void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
Idx++; // skip ID
@@ -1409,6 +1318,43 @@ void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
}
//===----------------------------------------------------------------------===//
+// Microsoft Expressions and Statements
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ E->setSourceRange(ReadSourceRange(Record, Idx));
+ if (E->isTypeOperand()) { // __uuidof(ComType)
+ E->setTypeOperandSourceInfo(
+ GetTypeSourceInfo(Record, Idx));
+ return;
+ }
+
+ // __uuidof(expr)
+ E->setExprOperand(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitSEHExceptStmt(SEHExceptStmt *S) {
+ VisitStmt(S);
+ S->Loc = ReadSourceLocation(Record, Idx);
+ S->Children[SEHExceptStmt::FILTER_EXPR] = Reader.ReadSubStmt();
+ S->Children[SEHExceptStmt::BLOCK] = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+ VisitStmt(S);
+ S->Loc = ReadSourceLocation(Record, Idx);
+ S->Block = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitSEHTryStmt(SEHTryStmt *S) {
+ VisitStmt(S);
+ S->IsCXXTry = Record[Idx++];
+ S->TryLoc = ReadSourceLocation(Record, Idx);
+ S->Children[SEHTryStmt::TRY] = Reader.ReadSubStmt();
+ S->Children[SEHTryStmt::HANDLER] = Reader.ReadSubStmt();
+}
+
+//===----------------------------------------------------------------------===//
// CUDA Expressions and Statements
//===----------------------------------------------------------------------===//
@@ -1417,6 +1363,20 @@ void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
E->setConfig(cast<CallExpr>(Reader.ReadSubExpr()));
}
+//===----------------------------------------------------------------------===//
+// OpenCL Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
+ VisitExpr(E);
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->SrcExpr = Reader.ReadSubExpr();
+}
+
+//===----------------------------------------------------------------------===//
+// ASTReader Implementation
+//===----------------------------------------------------------------------===//
+
Stmt *ASTReader::ReadStmt(PerFileData &F) {
switch (ReadingKind) {
case Read_Decl:
@@ -1784,6 +1744,12 @@ Stmt *ASTReader::ReadStmtFromStream(PerFileData &F) {
case EXPR_OBJC_ISA:
S = new (Context) ObjCIsaExpr(Empty);
break;
+ case EXPR_OBJC_INDIRECT_COPY_RESTORE:
+ S = new (Context) ObjCIndirectCopyRestoreExpr(Empty);
+ break;
+ case EXPR_OBJC_BRIDGED_CAST:
+ S = new (Context) ObjCBridgedCastExpr(Empty);
+ break;
case STMT_OBJC_FOR_COLLECTION:
S = new (Context) ObjCForCollectionStmt(Empty);
break;
@@ -1804,7 +1770,18 @@ Stmt *ASTReader::ReadStmtFromStream(PerFileData &F) {
case STMT_OBJC_AT_THROW:
S = new (Context) ObjCAtThrowStmt(Empty);
break;
-
+ case STMT_OBJC_AUTORELEASE_POOL:
+ S = new (Context) ObjCAutoreleasePoolStmt(Empty);
+ break;
+ case STMT_SEH_EXCEPT:
+ S = new (Context) SEHExceptStmt(Empty);
+ break;
+ case STMT_SEH_FINALLY:
+ S = new (Context) SEHFinallyStmt(Empty);
+ break;
+ case STMT_SEH_TRY:
+ S = new (Context) SEHTryStmt(Empty);
+ break;
case STMT_CXX_CATCH:
S = new (Context) CXXCatchStmt(Empty);
break;
@@ -1978,10 +1955,18 @@ Stmt *ASTReader::ReadStmtFromStream(PerFileData &F) {
S = new (Context) SizeOfPackExpr(Empty);
break;
+ case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM:
+ S = new (Context) SubstNonTypeTemplateParmExpr(Empty);
+ break;
+
case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK:
S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty);
break;
+ case EXPR_MATERIALIZE_TEMPORARY:
+ S = new (Context) MaterializeTemporaryExpr(Empty);
+ break;
+
case EXPR_OPAQUE_VALUE: {
unsigned key = Record[ASTStmtReader::NumExprFields];
OpaqueValueExpr *&expr = OpaqueValueExprs[key];
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index ba9032e0d33e..39f2fbddf80e 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -170,6 +170,7 @@ void ASTTypeWriter::VisitFunctionType(const FunctionType *T) {
Record.push_back(C.getRegParm());
// FIXME: need to stabilize encoding of calling convention...
Record.push_back(C.getCC());
+ Record.push_back(C.getProducesResult());
}
void ASTTypeWriter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
@@ -780,13 +781,15 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(FP_PRAGMA_OPTIONS);
RECORD(OPENCL_EXTENSIONS);
RECORD(DELEGATING_CTORS);
+ RECORD(FILE_SOURCE_LOCATION_OFFSETS);
+ RECORD(KNOWN_NAMESPACES);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
RECORD(SM_SLOC_FILE_ENTRY);
RECORD(SM_SLOC_BUFFER_ENTRY);
RECORD(SM_SLOC_BUFFER_BLOB);
- RECORD(SM_SLOC_INSTANTIATION_ENTRY);
+ RECORD(SM_SLOC_EXPANSION_ENTRY);
RECORD(SM_LINE_TABLE);
// Preprocessor Block.
@@ -893,7 +896,7 @@ void ASTWriter::WriteBlockInfoBlock() {
AddStmtsExprs(Stream, Record);
BLOCK(PREPROCESSOR_DETAIL_BLOCK);
- RECORD(PPD_MACRO_INSTANTIATION);
+ RECORD(PPD_MACRO_EXPANSION);
RECORD(PPD_MACRO_DEFINITION);
RECORD(PPD_INCLUSION_DIRECTIVE);
@@ -1109,6 +1112,8 @@ void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.ElideConstructors);
Record.push_back(LangOpts.SpellChecking);
Record.push_back(LangOpts.MRTD);
+ Record.push_back(LangOpts.ObjCAutoRefCount);
+ Record.push_back(LangOpts.ObjCInferRelatedReturnType);
Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
}
@@ -1169,8 +1174,8 @@ void ASTWriter::WriteStatCache(MemorizeStatCalls &StatCalls) {
for (MemorizeStatCalls::iterator Stat = StatCalls.begin(),
StatEnd = StatCalls.end();
Stat != StatEnd; ++Stat, ++NumStatEntries) {
- const char *Filename = Stat->first();
- Generator.insert(Filename, Stat->second);
+ llvm::StringRef Filename = Stat->first();
+ Generator.insert(Filename.data(), Stat->second);
}
// Create the on-disk hash table in a buffer.
@@ -1245,12 +1250,12 @@ static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream) {
return Stream.EmitAbbrev(Abbrev);
}
-/// \brief Create an abbreviation for the SLocEntry that refers to an
-/// buffer.
-static unsigned CreateSLocInstantiationAbbrev(llvm::BitstreamWriter &Stream) {
+/// \brief Create an abbreviation for the SLocEntry that refers to a macro
+/// expansion.
+static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
using namespace llvm;
BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
- Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_INSTANTIATION_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_EXPANSION_ENTRY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
@@ -1409,7 +1414,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
unsigned SLocFileAbbrv = CreateSLocFileAbbrev(Stream);
unsigned SLocBufferAbbrv = CreateSLocBufferAbbrev(Stream);
unsigned SLocBufferBlobAbbrv = CreateSLocBufferBlobAbbrev(Stream);
- unsigned SLocInstantiationAbbrv = CreateSLocInstantiationAbbrev(Stream);
+ unsigned SLocExpansionAbbrv = CreateSLocExpansionAbbrev(Stream);
// Write the line table.
if (SourceMgr.hasLineTable()) {
@@ -1474,7 +1479,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
} else
Code = SM_SLOC_BUFFER_ENTRY;
} else
- Code = SM_SLOC_INSTANTIATION_ENTRY;
+ Code = SM_SLOC_EXPANSION_ENTRY;
Record.clear();
Record.push_back(Code);
@@ -1534,7 +1539,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
PreloadSLocs.push_back(BaseSLocID + SLocEntryOffsets.size());
}
} else {
- // The source location entry is an instantiation.
+ // The source location entry is a macro expansion.
const SrcMgr::InstantiationInfo &Inst = SLoc->getInstantiation();
Record.push_back(Inst.getSpellingLoc().getRawEncoding());
Record.push_back(Inst.getInstantiationLocStart().getRawEncoding());
@@ -1545,7 +1550,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
if (I + 1 != N)
NextOffset = SourceMgr.getSLocEntry(I + 1).getOffset();
Record.push_back(NextOffset - SLoc->getOffset() - 1);
- Stream.EmitRecordWithAbbrev(SLocInstantiationAbbrv, Record);
+ Stream.EmitRecordWithAbbrev(SLocExpansionAbbrv, Record);
}
}
@@ -1795,13 +1800,13 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
SerializationListener->SerializedPreprocessedEntity(*E,
Stream.GetCurrentBitNo());
- if (MacroInstantiation *MI = dyn_cast<MacroInstantiation>(*E)) {
+ if (MacroExpansion *ME = dyn_cast<MacroExpansion>(*E)) {
Record.push_back(IndexBase + NumPreprocessingRecords++);
- AddSourceLocation(MI->getSourceRange().getBegin(), Record);
- AddSourceLocation(MI->getSourceRange().getEnd(), Record);
- AddIdentifierRef(MI->getName(), Record);
- Record.push_back(getMacroDefinitionID(MI->getDefinition()));
- Stream.EmitRecord(PPD_MACRO_INSTANTIATION, Record);
+ AddSourceLocation(ME->getSourceRange().getBegin(), Record);
+ AddSourceLocation(ME->getSourceRange().getEnd(), Record);
+ AddIdentifierRef(ME->getName(), Record);
+ Record.push_back(getMacroDefinitionID(ME->getDefinition()));
+ Stream.EmitRecord(PPD_MACRO_EXPANSION, Record);
continue;
}
@@ -2842,6 +2847,16 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
AddDeclRef(Context.getcudaConfigureCallDecl(), CUDASpecialDeclRefs);
}
+ // Build a record containing all of the known namespaces.
+ RecordData KnownNamespaces;
+ for (llvm::DenseMap<NamespaceDecl*, bool>::iterator
+ I = SemaRef.KnownNamespaces.begin(),
+ IEnd = SemaRef.KnownNamespaces.end();
+ I != IEnd; ++I) {
+ if (!I->second)
+ AddDeclRef(I->first, KnownNamespaces);
+ }
+
// Write the remaining AST contents.
RecordData Record;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
@@ -2951,6 +2966,10 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
if (!DelegatingCtorDecls.empty())
Stream.EmitRecord(DELEGATING_CTORS, DelegatingCtorDecls);
+ // Write the known namespaces.
+ if (!KnownNamespaces.empty())
+ Stream.EmitRecord(KNOWN_NAMESPACES, KnownNamespaces);
+
// Some simple statistics
Record.clear();
Record.push_back(NumStatements);
@@ -3675,6 +3694,14 @@ void ASTWriter::AddTemplateName(TemplateName Name, RecordDataImpl &Record) {
Record.push_back(DepT->getOperator());
break;
}
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ AddDeclRef(subst->getParameter(), Record);
+ AddTemplateName(subst->getReplacement(), Record);
+ break;
+ }
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *SubstPack
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 7c24088c65ba..2b8349495751 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -645,6 +645,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->isExceptionVariable());
Record.push_back(D->isNRVOVariable());
Record.push_back(D->isCXXForRangeDecl());
+ Record.push_back(D->isARCPseudoStrong());
Record.push_back(D->getInit() ? 1 : 0);
if (D->getInit())
Writer.AddStmt(D->getInit());
@@ -670,7 +671,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
D->RedeclLink.getNext() == D &&
!D->hasCXXDirectInitializer() &&
D->getInit() == 0 &&
- !ParmVarDecl::classofKind(D->getKind()) &&
+ !isa<ParmVarDecl>(D) &&
!SpecInfo)
AbbrevToUse = Writer.getDeclVarAbbrev();
@@ -695,6 +696,8 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
Writer.AddStmt(D->getUninstantiatedDefaultArg());
Code = serialization::DECL_PARM_VAR;
+ assert(!D->isARCPseudoStrong()); // can be true of ImplicitParamDecl
+
// If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
// we dynamically check for the properties that we optimize for, but don't
// know are true of all PARM_VAR_DECLs.
@@ -1426,6 +1429,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isExceptionVariable
Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(0)); // isCXXForRangeDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
// ParmVarDecl
@@ -1498,6 +1502,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasInit
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasMemberSpecInfo
// Type Source Info
@@ -1512,8 +1517,9 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
//Stmt
//Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsTypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
@@ -1531,8 +1537,9 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
//Stmt
//Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsTypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
@@ -1548,8 +1555,9 @@ void ASTWriter::WriteDeclsBlockAbbrevs() {
//Stmt
//Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsTypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 00e240494645..1d73ed447b4f 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -39,134 +39,9 @@ namespace clang {
AddExplicitTemplateArgumentList(const ExplicitTemplateArgumentList &Args);
void VisitStmt(Stmt *S);
- void VisitNullStmt(NullStmt *S);
- void VisitCompoundStmt(CompoundStmt *S);
- void VisitSwitchCase(SwitchCase *S);
- void VisitCaseStmt(CaseStmt *S);
- void VisitDefaultStmt(DefaultStmt *S);
- void VisitLabelStmt(LabelStmt *S);
- void VisitIfStmt(IfStmt *S);
- void VisitSwitchStmt(SwitchStmt *S);
- void VisitWhileStmt(WhileStmt *S);
- void VisitDoStmt(DoStmt *S);
- void VisitForStmt(ForStmt *S);
- void VisitGotoStmt(GotoStmt *S);
- void VisitIndirectGotoStmt(IndirectGotoStmt *S);
- void VisitContinueStmt(ContinueStmt *S);
- void VisitBreakStmt(BreakStmt *S);
- void VisitReturnStmt(ReturnStmt *S);
- void VisitDeclStmt(DeclStmt *S);
- void VisitAsmStmt(AsmStmt *S);
- void VisitExpr(Expr *E);
- void VisitPredefinedExpr(PredefinedExpr *E);
- void VisitDeclRefExpr(DeclRefExpr *E);
- void VisitIntegerLiteral(IntegerLiteral *E);
- void VisitFloatingLiteral(FloatingLiteral *E);
- void VisitImaginaryLiteral(ImaginaryLiteral *E);
- void VisitStringLiteral(StringLiteral *E);
- void VisitCharacterLiteral(CharacterLiteral *E);
- void VisitParenExpr(ParenExpr *E);
- void VisitParenListExpr(ParenListExpr *E);
- void VisitUnaryOperator(UnaryOperator *E);
- void VisitOffsetOfExpr(OffsetOfExpr *E);
- void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
- void VisitArraySubscriptExpr(ArraySubscriptExpr *E);
- void VisitCallExpr(CallExpr *E);
- void VisitMemberExpr(MemberExpr *E);
- void VisitCastExpr(CastExpr *E);
- void VisitBinaryOperator(BinaryOperator *E);
- void VisitCompoundAssignOperator(CompoundAssignOperator *E);
- void VisitConditionalOperator(ConditionalOperator *E);
- void VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
- void VisitImplicitCastExpr(ImplicitCastExpr *E);
- void VisitExplicitCastExpr(ExplicitCastExpr *E);
- void VisitCStyleCastExpr(CStyleCastExpr *E);
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
- void VisitExtVectorElementExpr(ExtVectorElementExpr *E);
- void VisitInitListExpr(InitListExpr *E);
- void VisitDesignatedInitExpr(DesignatedInitExpr *E);
- void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
- void VisitVAArgExpr(VAArgExpr *E);
- void VisitAddrLabelExpr(AddrLabelExpr *E);
- void VisitStmtExpr(StmtExpr *E);
- void VisitChooseExpr(ChooseExpr *E);
- void VisitGNUNullExpr(GNUNullExpr *E);
- void VisitShuffleVectorExpr(ShuffleVectorExpr *E);
- void VisitBlockExpr(BlockExpr *E);
- void VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
- void VisitGenericSelectionExpr(GenericSelectionExpr *E);
-
- // Objective-C Expressions
- void VisitObjCStringLiteral(ObjCStringLiteral *E);
- void VisitObjCEncodeExpr(ObjCEncodeExpr *E);
- void VisitObjCSelectorExpr(ObjCSelectorExpr *E);
- void VisitObjCProtocolExpr(ObjCProtocolExpr *E);
- void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
- void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCMessageExpr(ObjCMessageExpr *E);
- void VisitObjCIsaExpr(ObjCIsaExpr *E);
-
- // Objective-C Statements
- void VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
- void VisitObjCAtCatchStmt(ObjCAtCatchStmt *);
- void VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *);
- void VisitObjCAtTryStmt(ObjCAtTryStmt *);
- void VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *);
- void VisitObjCAtThrowStmt(ObjCAtThrowStmt *);
-
- // C++ Statements
- void VisitCXXCatchStmt(CXXCatchStmt *S);
- void VisitCXXTryStmt(CXXTryStmt *S);
- void VisitCXXForRangeStmt(CXXForRangeStmt *);
-
- void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E);
- void VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
- void VisitCXXConstructExpr(CXXConstructExpr *E);
- void VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E);
- void VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
- void VisitCXXStaticCastExpr(CXXStaticCastExpr *E);
- void VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E);
- void VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E);
- void VisitCXXConstCastExpr(CXXConstCastExpr *E);
- void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E);
- void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
- void VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
- void VisitCXXTypeidExpr(CXXTypeidExpr *E);
- void VisitCXXUuidofExpr(CXXUuidofExpr *E);
- void VisitCXXThisExpr(CXXThisExpr *E);
- void VisitCXXThrowExpr(CXXThrowExpr *E);
- void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
- void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
-
- void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
- void VisitCXXNewExpr(CXXNewExpr *E);
- void VisitCXXDeleteExpr(CXXDeleteExpr *E);
- void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
-
- void VisitExprWithCleanups(ExprWithCleanups *E);
- void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
- void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
- void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
-
- void VisitOverloadExpr(OverloadExpr *E);
- void VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
- void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
-
- void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E);
- void VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E);
- void VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
- void VisitExpressionTraitExpr(ExpressionTraitExpr *E);
- void VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
- void VisitPackExpansionExpr(PackExpansionExpr *E);
- void VisitSizeOfPackExpr(SizeOfPackExpr *E);
- void VisitSubstNonTypeTemplateParmPackExpr(
- SubstNonTypeTemplateParmPackExpr *E);
- void VisitOpaqueValueExpr(OpaqueValueExpr *E);
-
- // CUDA Expressions
- void VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E);
-
- void VisitAsTypeExpr(AsTypeExpr *E);
+#define STMT(Type, Base) \
+ void Visit##Type(Type *);
+#include "clang/AST/StmtNodes.inc"
};
}
@@ -371,6 +246,7 @@ void ASTStmtWriter::VisitExpr(Expr *E) {
Writer.AddTypeRef(E->getType(), Record);
Record.push_back(E->isTypeDependent());
Record.push_back(E->isValueDependent());
+ Record.push_back(E->isInstantiationDependent());
Record.push_back(E->containsUnexpandedParameterPack());
Record.push_back(E->getValueKind());
Record.push_back(E->getObjectKind());
@@ -606,6 +482,22 @@ void ASTStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) {
Code = serialization::EXPR_OBJC_ISA;
}
+void ASTStmtWriter::
+VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->shouldCopy());
+ Code = serialization::EXPR_OBJC_INDIRECT_COPY_RESTORE;
+}
+
+void ASTStmtWriter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getBridgeKeywordLoc(), Record);
+ Record.push_back(E->getBridgeKind()); // FIXME: Stable encoding
+ Code = serialization::EXPR_OBJC_BRIDGED_CAST;
+}
+
void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
Record.push_back(E->path_size());
@@ -914,6 +806,7 @@ void ASTStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
+ Record.push_back(E->isDelegateInitCall());
Record.push_back((unsigned)E->getReceiverKind()); // FIXME: stable encoding
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
@@ -973,6 +866,12 @@ void ASTStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
Code = serialization::STMT_OBJC_FINALLY;
}
+void ASTStmtWriter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getAtLoc(), Record);
+ Code = serialization::STMT_OBJC_AUTORELEASE_POOL;
+}
+
void ASTStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
Record.push_back(S->getNumCatchStmts());
Record.push_back(S->getFinallyStmt() != 0);
@@ -1123,18 +1022,6 @@ void ASTStmtWriter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
}
}
-void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
- VisitExpr(E);
- Writer.AddSourceRange(E->getSourceRange(), Record);
- if (E->isTypeOperand()) {
- Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
- Code = serialization::EXPR_CXX_UUIDOF_TYPE;
- } else {
- Writer.AddStmt(E->getExprOperand());
- Code = serialization::EXPR_CXX_UUIDOF_EXPR;
- }
-}
-
void ASTStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getLocation(), Record);
@@ -1146,6 +1033,7 @@ void ASTStmtWriter::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getThrowLoc(), Record);
Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->isThrownVariableInScope());
Code = serialization::EXPR_CXX_THROW;
}
@@ -1408,15 +1296,30 @@ void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
Code = serialization::EXPR_SIZEOF_PACK;
}
+void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getParameter(), Record);
+ Writer.AddSourceLocation(E->getNameLoc(), Record);
+ Writer.AddStmt(E->getReplacement());
+ Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM;
+}
+
void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E) {
VisitExpr(E);
- Writer.AddDeclRef(E->Param, Record);
+ Writer.AddDeclRef(E->getParameterPack(), Record);
Writer.AddTemplateArgument(E->getArgumentPack(), Record);
- Writer.AddSourceLocation(E->NameLoc, Record);
+ Writer.AddSourceLocation(E->getParameterPackLocation(), Record);
Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK;
}
+void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->Temporary);
+ Code = serialization::EXPR_MATERIALIZE_TEMPORARY;
+}
+
void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
Record.push_back(Writer.getOpaqueValueID(E));
@@ -1439,11 +1342,52 @@ void ASTStmtWriter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
//===----------------------------------------------------------------------===//
void ASTStmtWriter::VisitAsTypeExpr(AsTypeExpr *E) {
VisitExpr(E);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
Writer.AddStmt(E->getSrcExpr());
Code = serialization::EXPR_ASTYPE;
}
//===----------------------------------------------------------------------===//
+// Microsoft Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ if (E->isTypeOperand()) {
+ Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_UUIDOF_TYPE;
+ } else {
+ Writer.AddStmt(E->getExprOperand());
+ Code = serialization::EXPR_CXX_UUIDOF_EXPR;
+ }
+}
+
+void ASTStmtWriter::VisitSEHExceptStmt(SEHExceptStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getExceptLoc(), Record);
+ Writer.AddStmt(S->getFilterExpr());
+ Writer.AddStmt(S->getBlock());
+ Code = serialization::STMT_SEH_EXCEPT;
+}
+
+void ASTStmtWriter::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getFinallyLoc(), Record);
+ Writer.AddStmt(S->getBlock());
+ Code = serialization::STMT_SEH_FINALLY;
+}
+
+void ASTStmtWriter::VisitSEHTryStmt(SEHTryStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getIsCXXTry());
+ Writer.AddSourceLocation(S->getTryLoc(), Record);
+ Writer.AddStmt(S->getTryBlock());
+ Writer.AddStmt(S->getHandler());
+ Code = serialization::STMT_SEH_TRY;
+}
+
+//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Serialization/ChainedIncludesSource.cpp b/lib/Serialization/ChainedIncludesSource.cpp
index da5be957a530..3b7cd23b92ae 100644
--- a/lib/Serialization/ChainedIncludesSource.cpp
+++ b/lib/Serialization/ChainedIncludesSource.cpp
@@ -185,7 +185,8 @@ ChainedIncludesSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
void ChainedIncludesSource::MaterializeVisibleDecls(const DeclContext *DC) {
return getFinalReader().MaterializeVisibleDecls(DC);
}
-bool ChainedIncludesSource::FindExternalLexicalDecls(const DeclContext *DC,
+ExternalLoadResult
+ChainedIncludesSource::FindExternalLexicalDecls(const DeclContext *DC,
bool (*isKindWeWant)(Decl::Kind),
llvm::SmallVectorImpl<Decl*> &Result) {
return getFinalReader().FindExternalLexicalDecls(DC, isKindWeWant, Result);
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 235b400eb9e3..9fc8163ab8b9 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -606,7 +606,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
continue;
// Ignore CF references, which can be toll-free bridged.
- if (cocoa::isCFObjectRef(ArgTy))
+ if (coreFoundation::isCFObjectRef(ArgTy))
continue;
// Generate only one error node to use for all bug reports.
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index f2f5c1ed44e9..c5dac5d21626 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -30,8 +30,11 @@ class CStringChecker : public Checker< eval::Call,
check::DeadSymbols,
check::RegionChanges
> {
- mutable llvm::OwningPtr<BugType> BT_Null, BT_Bounds, BT_BoundsWrite,
- BT_Overlap, BT_NotCString;
+ mutable llvm::OwningPtr<BugType> BT_Null, BT_Bounds,
+ BT_Overlap, BT_NotCString,
+ BT_AdditionOverflow;
+ mutable const char *CurrentFunctionDescription;
+
public:
static void *getTag() { static int tag; return &tag; }
@@ -91,9 +94,11 @@ public:
const MemRegion *MR, SVal strLength);
static SVal getCStringLengthForRegion(CheckerContext &C,
const GRState *&state,
- const Expr *Ex, const MemRegion *MR);
+ const Expr *Ex, const MemRegion *MR,
+ bool hypothetical);
SVal getCStringLength(CheckerContext &C, const GRState *&state,
- const Expr *Ex, SVal Buf) const;
+ const Expr *Ex, SVal Buf,
+ bool hypothetical = false) const;
const StringLiteral *getCStringLiteral(CheckerContext &C,
const GRState *&state,
@@ -112,17 +117,29 @@ public:
const Expr *S, SVal l) const;
const GRState *CheckLocation(CheckerContext &C, const GRState *state,
const Expr *S, SVal l,
- bool IsDestination = false) const;
+ const char *message = NULL) const;
const GRState *CheckBufferAccess(CheckerContext &C, const GRState *state,
const Expr *Size,
const Expr *FirstBuf,
- const Expr *SecondBuf = NULL,
- bool FirstIsDestination = false) const;
+ const Expr *SecondBuf,
+ const char *firstMessage = NULL,
+ const char *secondMessage = NULL,
+ bool WarnAboutSize = false) const;
+ const GRState *CheckBufferAccess(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *Buf,
+ const char *message = NULL,
+ bool WarnAboutSize = false) const {
+ // This is a convenience override.
+ return CheckBufferAccess(C, state, Size, Buf, NULL, message, NULL,
+ WarnAboutSize);
+ }
const GRState *CheckOverlap(CheckerContext &C, const GRState *state,
const Expr *Size, const Expr *First,
const Expr *Second) const;
void emitOverlapBug(CheckerContext &C, const GRState *state,
const Stmt *First, const Stmt *Second) const;
+ const GRState *checkAdditionOverflow(CheckerContext &C, const GRState *state,
+ NonLoc left, NonLoc right) const;
};
class CStringLength {
@@ -176,10 +193,14 @@ const GRState *CStringChecker::checkNonNull(CheckerContext &C,
BT_Null.reset(new BuiltinBug("API",
"Null pointer argument in call to byte string function"));
+ llvm::SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ assert(CurrentFunctionDescription);
+ os << "Null pointer argument in call to " << CurrentFunctionDescription;
+
// Generate a report for this bug.
BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null.get());
- EnhancedBugReport *report = new EnhancedBugReport(*BT,
- BT->getDescription(), N);
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), N);
report->addRange(S->getSourceRange());
report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, S);
@@ -196,7 +217,7 @@ const GRState *CStringChecker::checkNonNull(CheckerContext &C,
const GRState *CStringChecker::CheckLocation(CheckerContext &C,
const GRState *state,
const Expr *S, SVal l,
- bool IsDestination) const {
+ const char *warningMsg) const {
// If a previous check has failed, propagate the failure.
if (!state)
return NULL;
@@ -216,7 +237,8 @@ const GRState *CStringChecker::CheckLocation(CheckerContext &C,
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
SValBuilder &svalBuilder = C.getSValBuilder();
- SVal Extent = svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
+ SVal Extent =
+ svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Extent);
// Get the index of the accessed element.
@@ -229,28 +251,32 @@ const GRState *CStringChecker::CheckLocation(CheckerContext &C,
if (!N)
return NULL;
- BuiltinBug *BT;
- if (IsDestination) {
- if (!BT_BoundsWrite) {
- BT_BoundsWrite.reset(new BuiltinBug("Out-of-bound array access",
- "Byte string function overflows destination buffer"));
- }
- BT = static_cast<BuiltinBug*>(BT_BoundsWrite.get());
+ if (!BT_Bounds) {
+ BT_Bounds.reset(new BuiltinBug("Out-of-bound array access",
+ "Byte string function accesses out-of-bound array element"));
+ }
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
+
+ // Generate a report for this bug.
+ RangedBugReport *report;
+ if (warningMsg) {
+ report = new RangedBugReport(*BT, warningMsg, N);
} else {
- if (!BT_Bounds) {
- BT_Bounds.reset(new BuiltinBug("Out-of-bound array access",
- "Byte string function accesses out-of-bound array element"));
- }
- BT = static_cast<BuiltinBug*>(BT_Bounds.get());
+ assert(CurrentFunctionDescription);
+ assert(CurrentFunctionDescription[0] != '\0');
+
+ llvm::SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << (char)toupper(CurrentFunctionDescription[0])
+ << &CurrentFunctionDescription[1]
+ << " accesses out-of-bound array element";
+ report = new RangedBugReport(*BT, os.str(), N);
}
// FIXME: It would be nice to eventually make this diagnostic more clear,
// e.g., by referencing the original declaration or by saying *why* this
// reference is outside the range.
- // Generate a report for this bug.
- RangedBugReport *report = new RangedBugReport(*BT, BT->getDescription(), N);
-
report->addRange(S->getSourceRange());
C.EmitReport(report);
return NULL;
@@ -266,13 +292,15 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
const Expr *Size,
const Expr *FirstBuf,
const Expr *SecondBuf,
- bool FirstIsDestination) const {
+ const char *firstMessage,
+ const char *secondMessage,
+ bool WarnAboutSize) const {
// If a previous check has failed, propagate the failure.
if (!state)
return NULL;
SValBuilder &svalBuilder = C.getSValBuilder();
- ASTContext &Ctx = C.getASTContext();
+ ASTContext &Ctx = svalBuilder.getContext();
QualType sizeTy = Size->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
@@ -284,6 +312,8 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
return NULL;
// Get the access length and make sure it is known.
+ // FIXME: This assumes the caller has already checked that the access length
+ // is positive. And that it's unsigned.
SVal LengthVal = state->getSVal(Size);
NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
if (!Length)
@@ -297,9 +327,11 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
// Check that the first buffer is sufficiently long.
SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
if (Loc *BufLoc = dyn_cast<Loc>(&BufStart)) {
+ const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
+
SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
LastOffset, PtrTy);
- state = CheckLocation(C, state, FirstBuf, BufEnd, FirstIsDestination);
+ state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
// If the buffer isn't large enough, abort.
if (!state)
@@ -315,9 +347,11 @@ const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
if (Loc *BufLoc = dyn_cast<Loc>(&BufStart)) {
+ const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
+
SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
LastOffset, PtrTy);
- state = CheckLocation(C, state, SecondBuf, BufEnd);
+ state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
}
}
@@ -368,8 +402,7 @@ const GRState *CStringChecker::CheckOverlap(CheckerContext &C,
state = stateFalse;
// Which value comes first?
- ASTContext &Ctx = svalBuilder.getContext();
- QualType cmpTy = Ctx.IntTy;
+ QualType cmpTy = svalBuilder.getConditionType();
SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
*firstLoc, *secondLoc, cmpTy);
DefinedOrUnknownSVal *reverseTest = dyn_cast<DefinedOrUnknownSVal>(&reverse);
@@ -402,8 +435,10 @@ const GRState *CStringChecker::CheckOverlap(CheckerContext &C,
// Convert the first buffer's start address to char*.
// Bail out if the cast fails.
+ ASTContext &Ctx = svalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy, First->getType());
+ SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
+ First->getType());
Loc *FirstStartLoc = dyn_cast<Loc>(&FirstStart);
if (!FirstStartLoc)
return state;
@@ -454,12 +489,78 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, const GRState *state,
C.EmitReport(report);
}
+const GRState *CStringChecker::checkAdditionOverflow(CheckerContext &C,
+ const GRState *state,
+ NonLoc left,
+ NonLoc right) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
+ NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
+
+ SVal maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
+ sizeTy);
+
+ if (maxMinusRight.isUnknownOrUndef()) {
+ // Try switching the operands. (The order of these two assignments is
+ // important!)
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
+ sizeTy);
+ left = right;
+ }
+
+ if (NonLoc *maxMinusRightNL = dyn_cast<NonLoc>(&maxMinusRight)) {
+ QualType cmpTy = svalBuilder.getConditionType();
+ // If left > max - right, we have an overflow.
+ SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
+ *maxMinusRightNL, cmpTy);
+
+ const GRState *stateOverflow, *stateOkay;
+ llvm::tie(stateOverflow, stateOkay) =
+ state->assume(cast<DefinedOrUnknownSVal>(willOverflow));
+
+ if (stateOverflow && !stateOkay) {
+ // We have an overflow. Emit a bug report.
+ ExplodedNode *N = C.generateSink(stateOverflow);
+ if (!N)
+ return NULL;
+
+ if (!BT_AdditionOverflow)
+ BT_AdditionOverflow.reset(new BuiltinBug("API",
+ "Sum of expressions causes overflow"));
+
+ // This isn't a great error message, but this should never occur in real
+ // code anyway -- you'd have to create a buffer longer than a size_t can
+ // represent, which is sort of a contradiction.
+ const char *warning =
+ "This expression will create a string whose length is too big to "
+ "be represented as a size_t";
+
+ // Generate a report for this bug.
+ BugReport *report = new BugReport(*BT_AdditionOverflow, warning, N);
+ C.EmitReport(report);
+
+ return NULL;
+ }
+
+ // From now on, assume an overflow didn't occur.
+ assert(stateOkay);
+ state = stateOkay;
+ }
+
+ return state;
+}
+
const GRState *CStringChecker::setCStringLength(const GRState *state,
const MemRegion *MR,
SVal strLength) {
assert(!strLength.isUndef() && "Attempt to set an undefined string length");
- if (strLength.isUnknown())
- return state;
MR = MR->StripCasts();
@@ -474,7 +575,8 @@ const GRState *CStringChecker::setCStringLength(const GRState *state,
case MemRegion::VarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
- return state->set<CStringLength>(MR, strLength);
+ // These are the types we can currently track string lengths for.
+ break;
case MemRegion::ElementRegionKind:
// FIXME: Handle element regions by upper-bounding the parent region's
@@ -488,16 +590,24 @@ const GRState *CStringChecker::setCStringLength(const GRState *state,
// warning for things like strcpy((char[]){'a', 0}, "b");
return state;
}
+
+ if (strLength.isUnknown())
+ return state->remove<CStringLength>(MR);
+
+ return state->set<CStringLength>(MR, strLength);
}
SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
const GRState *&state,
const Expr *Ex,
- const MemRegion *MR) {
- // If there's a recorded length, go ahead and return it.
- const SVal *Recorded = state->get<CStringLength>(MR);
- if (Recorded)
- return *Recorded;
+ const MemRegion *MR,
+ bool hypothetical) {
+ if (!hypothetical) {
+ // If there's a recorded length, go ahead and return it.
+ const SVal *Recorded = state->get<CStringLength>(MR);
+ if (Recorded)
+ return *Recorded;
+ }
// Otherwise, get a new symbol and update the state.
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
@@ -505,12 +615,16 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
QualType sizeTy = svalBuilder.getContext().getSizeType();
SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
MR, Ex, sizeTy, Count);
- state = state->set<CStringLength>(MR, strLength);
+
+ if (!hypothetical)
+ state = state->set<CStringLength>(MR, strLength);
+
return strLength;
}
SVal CStringChecker::getCStringLength(CheckerContext &C, const GRState *&state,
- const Expr *Ex, SVal Buf) const {
+ const Expr *Ex, SVal Buf,
+ bool hypothetical) const {
const MemRegion *MR = Buf.getAsRegion();
if (!MR) {
// If we can't get a region, see if it's something we /know/ isn't a
@@ -524,8 +638,9 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const GRState *&state,
llvm::SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
- os << "Argument to byte string function is the address of the label '"
- << Label->getLabel()->getName()
+ assert(CurrentFunctionDescription);
+ os << "Argument to " << CurrentFunctionDescription
+ << " is the address of the label '" << Label->getLabel()->getName()
<< "', which is not a null-terminated string";
// Generate a report for this bug.
@@ -561,7 +676,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const GRState *&state,
case MemRegion::VarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
- return getCStringLengthForRegion(C, state, Ex, MR);
+ return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
case MemRegion::CompoundLiteralRegionKind:
// FIXME: Can we track this? Is it necessary?
return UnknownVal();
@@ -581,7 +696,8 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, const GRState *&state,
llvm::SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
- os << "Argument to byte string function is ";
+ assert(CurrentFunctionDescription);
+ os << "Argument to " << CurrentFunctionDescription << " is ";
if (SummarizeRegion(os, C.getASTContext(), MR))
os << ", which is not a null-terminated string";
@@ -700,12 +816,15 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
const Expr *Size, const Expr *Dest,
const Expr *Source, bool Restricted,
bool IsMempcpy) const {
+ CurrentFunctionDescription = "memory copy function";
+
// See if the size argument is zero.
SVal sizeVal = state->getSVal(Size);
QualType sizeTy = Size->getType();
const GRState *stateZeroSize, *stateNonZeroSize;
- llvm::tie(stateZeroSize, stateNonZeroSize) = assumeZero(C, state, sizeVal, sizeTy);
+ llvm::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, sizeVal, sizeTy);
// Get the value of the Dest.
SVal destVal = state->getSVal(Dest);
@@ -737,8 +856,10 @@ void CStringChecker::evalCopyCommon(CheckerContext &C,
return;
// Ensure the accesses are valid and that the buffers do not overlap.
+ const char * const writeWarning =
+ "Memory copy function overflows destination buffer";
state = CheckBufferAccess(C, state, Size, Dest, Source,
- /* FirstIsDst = */ true);
+ writeWarning, /* sourceWarning = */ NULL);
if (Restricted)
state = CheckOverlap(C, state, Size, Dest, Source);
@@ -824,6 +945,8 @@ void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// int memcmp(const void *s1, const void *s2, size_t n);
+ CurrentFunctionDescription = "memory comparison function";
+
const Expr *Left = CE->getArg(0);
const Expr *Right = CE->getArg(1);
const Expr *Size = CE->getArg(2);
@@ -861,7 +984,7 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
const GRState *StSameBuf, *StNotSameBuf;
llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
- // If the two arguments might be the same buffer, we know the result is zero,
+ // If the two arguments might be the same buffer, we know the result is 0,
// and we only need to check one size.
if (StSameBuf) {
state = StSameBuf;
@@ -902,58 +1025,126 @@ void CStringChecker::evalstrnLength(CheckerContext &C,
void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
bool IsStrnlen) const {
+ CurrentFunctionDescription = "string length function";
const GRState *state = C.getState();
+
+ if (IsStrnlen) {
+ const Expr *maxlenExpr = CE->getArg(1);
+ SVal maxlenVal = state->getSVal(maxlenExpr);
+
+ const GRState *stateZeroSize, *stateNonZeroSize;
+ llvm::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, maxlenVal, maxlenExpr->getType());
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check the string itself.
+ if (stateZeroSize) {
+ SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
+ stateZeroSize = stateZeroSize->BindExpr(CE, zero);
+ C.addTransition(stateZeroSize);
+ }
+
+ // If the size is GUARANTEED to be zero, we're done!
+ if (!stateNonZeroSize)
+ return;
+
+ // Otherwise, record the assumption that the size is nonzero.
+ state = stateNonZeroSize;
+ }
+
+ // Check that the string argument is non-null.
const Expr *Arg = CE->getArg(0);
SVal ArgVal = state->getSVal(Arg);
- // Check that the argument is non-null.
state = checkNonNull(C, state, Arg, ArgVal);
- if (state) {
- SVal strLength = getCStringLength(C, state, Arg, ArgVal);
+ if (!state)
+ return;
- // If the argument isn't a valid C string, there's no valid state to
- // transition to.
- if (strLength.isUndef())
- return;
+ SVal strLength = getCStringLength(C, state, Arg, ArgVal);
- // If the check is for strnlen() then bind the return value to no more than
- // the maxlen value.
- if (IsStrnlen) {
- const Expr *maxlenExpr = CE->getArg(1);
- SVal maxlenVal = state->getSVal(maxlenExpr);
-
- NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
- NonLoc *maxlenValNL = dyn_cast<NonLoc>(&maxlenVal);
+ // If the argument isn't a valid C string, there's no valid state to
+ // transition to.
+ if (strLength.isUndef())
+ return;
- QualType cmpTy = C.getSValBuilder().getContext().IntTy;
- const GRState *stateTrue, *stateFalse;
-
- // Check if the strLength is greater than or equal to the maxlen
- llvm::tie(stateTrue, stateFalse) =
+ DefinedOrUnknownSVal result = UnknownVal();
+
+ // If the check is for strnlen() then bind the return value to no more than
+ // the maxlen value.
+ if (IsStrnlen) {
+ QualType cmpTy = C.getSValBuilder().getConditionType();
+
+ // It's a little unfortunate to be getting this again,
+ // but it's not that expensive...
+ const Expr *maxlenExpr = CE->getArg(1);
+ SVal maxlenVal = state->getSVal(maxlenExpr);
+
+ NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
+ NonLoc *maxlenValNL = dyn_cast<NonLoc>(&maxlenVal);
+
+ if (strLengthNL && maxlenValNL) {
+ const GRState *stateStringTooLong, *stateStringNotTooLong;
+
+ // Check if the strLength is greater than the maxlen.
+ llvm::tie(stateStringTooLong, stateStringNotTooLong) =
state->assume(cast<DefinedOrUnknownSVal>
- (C.getSValBuilder().evalBinOpNN(state, BO_GE,
- *strLengthNL, *maxlenValNL,
+ (C.getSValBuilder().evalBinOpNN(state, BO_GT,
+ *strLengthNL,
+ *maxlenValNL,
cmpTy)));
- // If the strLength is greater than or equal to the maxlen, set strLength
- // to maxlen
- if (stateTrue && !stateFalse) {
- strLength = maxlenVal;
+ if (stateStringTooLong && !stateStringNotTooLong) {
+ // If the string is longer than maxlen, return maxlen.
+ result = *maxlenValNL;
+ } else if (stateStringNotTooLong && !stateStringTooLong) {
+ // If the string is shorter than maxlen, return its length.
+ result = *strLengthNL;
}
}
- // If getCStringLength couldn't figure out the length, conjure a return
- // value, so it can be used in constraints, at least.
- if (strLength.isUnknown()) {
+ if (result.isUnknown()) {
+ // If we don't have enough information for a comparison, there's
+ // no guarantee the full string length will actually be returned.
+ // All we know is the return value is the min of the string length
+ // and the limit. This is better than nothing.
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- strLength = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
+ result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
+ NonLoc *resultNL = cast<NonLoc>(&result);
+
+ if (strLengthNL) {
+ state = state->assume(cast<DefinedOrUnknownSVal>
+ (C.getSValBuilder().evalBinOpNN(state, BO_LE,
+ *resultNL,
+ *strLengthNL,
+ cmpTy)), true);
+ }
+
+ if (maxlenValNL) {
+ state = state->assume(cast<DefinedOrUnknownSVal>
+ (C.getSValBuilder().evalBinOpNN(state, BO_LE,
+ *resultNL,
+ *maxlenValNL,
+ cmpTy)), true);
+ }
}
- // Bind the return value.
- state = state->BindExpr(CE, strLength);
- C.addTransition(state);
+ } else {
+ // This is a plain strlen(), not strnlen().
+ result = cast<DefinedOrUnknownSVal>(strLength);
+
+ // If we don't know the length of the string, conjure a return
+ // value, so it can be used in constraints, at least.
+ if (result.isUnknown()) {
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, Count);
+ }
}
+
+ // Bind the return value.
+ assert(!result.isUnknown() && "Should have conjured a value by now");
+ state = state->BindExpr(CE, result);
+ C.addTransition(state);
}
void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
@@ -999,6 +1190,7 @@ void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
bool returnEnd, bool isBounded,
bool isAppending) const {
+ CurrentFunctionDescription = "string copy function";
const GRState *state = C.getState();
// Check that the destination is non-null.
@@ -1023,76 +1215,240 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
if (strLength.isUndef())
return;
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType cmpTy = svalBuilder.getConditionType();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+
+ // These two values allow checking two kinds of errors:
+ // - actual overflows caused by a source that doesn't fit in the destination
+ // - potential overflows caused by a bound that could exceed the destination
+ SVal amountCopied = UnknownVal();
+ SVal maxLastElementIndex = UnknownVal();
+ const char *boundWarning = NULL;
+
// If the function is strncpy, strncat, etc... it is bounded.
if (isBounded) {
// Get the max number of characters to copy.
const Expr *lenExpr = CE->getArg(2);
SVal lenVal = state->getSVal(lenExpr);
- // Cast the length to a NonLoc SVal. If it is not a NonLoc then give up.
- NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
- if (!strLengthNL)
- return;
+ // Protect against misdeclared strncpy().
+ lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
- // Cast the max length to a NonLoc SVal. If it is not a NonLoc then give up.
+ NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
NonLoc *lenValNL = dyn_cast<NonLoc>(&lenVal);
- if (!lenValNL)
- return;
- QualType cmpTy = C.getSValBuilder().getContext().IntTy;
- const GRState *stateTrue, *stateFalse;
-
- // Check if the max number to copy is less than the length of the src.
- llvm::tie(stateTrue, stateFalse) =
- state->assume(cast<DefinedOrUnknownSVal>
- (C.getSValBuilder().evalBinOpNN(state, BO_GT,
- *strLengthNL, *lenValNL,
- cmpTy)));
-
- if (stateTrue) {
- // Max number to copy is less than the length of the src, so the actual
- // strLength copied is the max number arg.
- strLength = lenVal;
- }
+ // If we know both values, we might be able to figure out how much
+ // we're copying.
+ if (strLengthNL && lenValNL) {
+ const GRState *stateSourceTooLong, *stateSourceNotTooLong;
+
+ // Check if the max number to copy is less than the length of the src.
+ // If the bound is equal to the source length, strncpy won't null-
+ // terminate the result!
+ llvm::tie(stateSourceTooLong, stateSourceNotTooLong) =
+ state->assume(cast<DefinedOrUnknownSVal>
+ (svalBuilder.evalBinOpNN(state, BO_GE, *strLengthNL,
+ *lenValNL, cmpTy)));
+
+ if (stateSourceTooLong && !stateSourceNotTooLong) {
+ // Max number to copy is less than the length of the src, so the actual
+ // strLength copied is the max number arg.
+ state = stateSourceTooLong;
+ amountCopied = lenVal;
+
+ } else if (!stateSourceTooLong && stateSourceNotTooLong) {
+ // The source buffer entirely fits in the bound.
+ state = stateSourceNotTooLong;
+ amountCopied = strLength;
+ }
+ }
+
+ // We still want to know if the bound is known to be too large.
+ if (lenValNL) {
+ if (isAppending) {
+ // For strncat, the check is strlen(dst) + lenVal < sizeof(dst)
+
+ // Get the string length of the destination. If the destination is
+ // memory that can't have a string length, we shouldn't be copying
+ // into it anyway.
+ SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ if (dstStrLength.isUndef())
+ return;
+
+ if (NonLoc *dstStrLengthNL = dyn_cast<NonLoc>(&dstStrLength)) {
+ maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Add,
+ *lenValNL,
+ *dstStrLengthNL,
+ sizeTy);
+ boundWarning = "Size argument is greater than the free space in the "
+ "destination buffer";
+ }
+
+ } else {
+ // For strncpy, this is just checking that lenVal <= sizeof(dst)
+ // (Yes, strncpy and strncat differ in how they treat termination.
+ // strncat ALWAYS terminates, but strncpy doesn't.)
+ NonLoc one = cast<NonLoc>(svalBuilder.makeIntVal(1, sizeTy));
+ maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
+ one, sizeTy);
+ boundWarning = "Size argument is greater than the length of the "
+ "destination buffer";
+ }
+ }
+
+ // If we couldn't pin down the copy length, at least bound it.
+ // FIXME: We should actually run this code path for append as well, but
+ // right now it creates problems with constraints (since we can end up
+ // trying to pass constraints from symbol to symbol).
+ if (amountCopied.isUnknown() && !isAppending) {
+ // Try to get a "hypothetical" string length symbol, which we can later
+ // set as a real value if that turns out to be the case.
+ amountCopied = getCStringLength(C, state, lenExpr, srcVal, true);
+ assert(!amountCopied.isUndef());
+
+ if (NonLoc *amountCopiedNL = dyn_cast<NonLoc>(&amountCopied)) {
+ if (lenValNL) {
+ // amountCopied <= lenVal
+ SVal copiedLessThanBound = svalBuilder.evalBinOpNN(state, BO_LE,
+ *amountCopiedNL,
+ *lenValNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(copiedLessThanBound),
+ true);
+ if (!state)
+ return;
+ }
+
+ if (strLengthNL) {
+ // amountCopied <= strlen(source)
+ SVal copiedLessThanSrc = svalBuilder.evalBinOpNN(state, BO_LE,
+ *amountCopiedNL,
+ *strLengthNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(copiedLessThanSrc),
+ true);
+ if (!state)
+ return;
+ }
+ }
+ }
+
+ } else {
+ // The function isn't bounded. The amount copied should match the length
+ // of the source buffer.
+ amountCopied = strLength;
}
+ assert(state);
+
+ // This represents the number of characters copied into the destination
+ // buffer. (It may not actually be the strlen if the destination buffer
+ // is not terminated.)
+ SVal finalStrLength = UnknownVal();
+
// If this is an appending function (strcat, strncat...) then set the
// string length to strlen(src) + strlen(dst) since the buffer will
// ultimately contain both.
if (isAppending) {
- // Get the string length of the destination, or give up.
+ // Get the string length of the destination. If the destination is memory
+ // that can't have a string length, we shouldn't be copying into it anyway.
SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
if (dstStrLength.isUndef())
return;
- NonLoc *srcStrLengthNL = dyn_cast<NonLoc>(&strLength);
+ NonLoc *srcStrLengthNL = dyn_cast<NonLoc>(&amountCopied);
NonLoc *dstStrLengthNL = dyn_cast<NonLoc>(&dstStrLength);
- // If src or dst cast to NonLoc is NULL, give up.
- if ((!srcStrLengthNL) || (!dstStrLengthNL))
- return;
+ // If we know both string lengths, we might know the final string length.
+ if (srcStrLengthNL && dstStrLengthNL) {
+ // Make sure the two lengths together don't overflow a size_t.
+ state = checkAdditionOverflow(C, state, *srcStrLengthNL, *dstStrLengthNL);
+ if (!state)
+ return;
+
+ finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL,
+ *dstStrLengthNL, sizeTy);
+ }
- QualType addTy = C.getSValBuilder().getContext().getSizeType();
+ // If we couldn't get a single value for the final string length,
+ // we can at least bound it by the individual lengths.
+ if (finalStrLength.isUnknown()) {
+ // Try to get a "hypothetical" string length symbol, which we can later
+ // set as a real value if that turns out to be the case.
+ finalStrLength = getCStringLength(C, state, CE, DstVal, true);
+ assert(!finalStrLength.isUndef());
+
+ if (NonLoc *finalStrLengthNL = dyn_cast<NonLoc>(&finalStrLength)) {
+ if (srcStrLengthNL) {
+ // finalStrLength >= srcStrLength
+ SVal sourceInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+ *finalStrLengthNL,
+ *srcStrLengthNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(sourceInResult),
+ true);
+ if (!state)
+ return;
+ }
+
+ if (dstStrLengthNL) {
+ // finalStrLength >= dstStrLength
+ SVal destInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+ *finalStrLengthNL,
+ *dstStrLengthNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(destInResult),
+ true);
+ if (!state)
+ return;
+ }
+ }
+ }
- strLength = C.getSValBuilder().evalBinOpNN(state, BO_Add,
- *srcStrLengthNL, *dstStrLengthNL,
- addTy);
+ } else {
+ // Otherwise, this is a copy-over function (strcpy, strncpy, ...), and
+ // the final string length will match the input string length.
+ finalStrLength = amountCopied;
}
+ // The final result of the function will either be a pointer past the last
+ // copied element, or a pointer to the start of the destination buffer.
SVal Result = (returnEnd ? UnknownVal() : DstVal);
+ assert(state);
+
// If the destination is a MemRegion, try to check for a buffer overflow and
// record the new string length.
if (loc::MemRegionVal *dstRegVal = dyn_cast<loc::MemRegionVal>(&DstVal)) {
- // If the length is known, we can check for an overflow.
- if (NonLoc *knownStrLength = dyn_cast<NonLoc>(&strLength)) {
- SVal lastElement =
- C.getSValBuilder().evalBinOpLN(state, BO_Add, *dstRegVal,
- *knownStrLength, Dst->getType());
+ QualType ptrTy = Dst->getType();
+
+ // If we have an exact value on a bounded copy, use that to check for
+ // overflows, rather than our estimate about how much is actually copied.
+ if (boundWarning) {
+ if (NonLoc *maxLastNL = dyn_cast<NonLoc>(&maxLastElementIndex)) {
+ SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+ *maxLastNL, ptrTy);
+ state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
+ boundWarning);
+ if (!state)
+ return;
+ }
+ }
- state = CheckLocation(C, state, Dst, lastElement, /* IsDst = */ true);
- if (!state)
- return;
+ // Then, if the final length is known...
+ if (NonLoc *knownStrLength = dyn_cast<NonLoc>(&finalStrLength)) {
+ SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+ *knownStrLength, ptrTy);
+
+ // ...and we haven't checked the bound, we'll check the actual copy.
+ if (!boundWarning) {
+ const char * const warningMsg =
+ "String copy function overflows destination buffer";
+ state = CheckLocation(C, state, Dst, lastElement, warningMsg);
+ if (!state)
+ return;
+ }
// If this is a stpcpy-style copy, the last element is the return value.
if (returnEnd)
@@ -1107,16 +1463,25 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// string, but that's still an improvement over blank invalidation.
state = InvalidateBuffer(C, state, Dst, *dstRegVal);
- // Set the C string length of the destination.
- state = setCStringLength(state, dstRegVal->getRegion(), strLength);
+ // Set the C string length of the destination, if we know it.
+ if (isBounded && !isAppending) {
+ // strncpy is annoying in that it doesn't guarantee to null-terminate
+ // the result string. If the original string didn't fit entirely inside
+ // the bound (including the null-terminator), we don't know how long the
+ // result is.
+ if (amountCopied != strLength)
+ finalStrLength = UnknownVal();
+ }
+ state = setCStringLength(state, dstRegVal->getRegion(), finalStrLength);
}
+ assert(state);
+
// If this is a stpcpy-style copy, but we were unable to check for a buffer
// overflow, we still need a result. Conjure a return value.
if (returnEnd && Result.isUnknown()) {
- SValBuilder &svalBuilder = C.getSValBuilder();
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
- strLength = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
+ Result = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
}
// Set the return value.
@@ -1125,29 +1490,30 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
}
void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
- //int strcmp(const char *restrict s1, const char *restrict s2);
+ //int strcmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
}
void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
- //int strncmp(const char *restrict s1, const char *restrict s2, size_t n);
+ //int strncmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
}
void CStringChecker::evalStrcasecmp(CheckerContext &C,
const CallExpr *CE) const {
- //int strcasecmp(const char *restrict s1, const char *restrict s2);
+ //int strcasecmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
}
void CStringChecker::evalStrncasecmp(CheckerContext &C,
const CallExpr *CE) const {
- //int strncasecmp(const char *restrict s1, const char *restrict s2, size_t n);
+ //int strncasecmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
}
void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
bool isBounded, bool ignoreCase) const {
+ CurrentFunctionDescription = "string comparison function";
const GRState *state = C.getState();
// Check that the first string is non-null
@@ -1174,52 +1540,96 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
if (s2Length.isUndef())
return;
- // Get the string literal of the first string.
- const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
- if (!s1StrLiteral)
- return;
- llvm::StringRef s1StrRef = s1StrLiteral->getString();
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV = cast<DefinedOrUnknownSVal>(s1Val);
+ DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(s2Val);
+
+ // See if they are the same.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+ const GRState *StSameBuf, *StNotSameBuf;
+ llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is 0,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ StSameBuf = StSameBuf->BindExpr(CE, svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(StSameBuf);
+
+ // If the two arguments are GUARANTEED to be the same, we're done!
+ if (!StNotSameBuf)
+ return;
+ }
+
+ assert(StNotSameBuf);
+ state = StNotSameBuf;
+
+ // At this point we can go about comparing the two buffers.
+ // For now, we only do this if they're both known string literals.
- // Get the string literal of the second string.
+ // Attempt to extract string literals from both expressions.
+ const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
- if (!s2StrLiteral)
- return;
- llvm::StringRef s2StrRef = s2StrLiteral->getString();
+ bool canComputeResult = false;
+
+ if (s1StrLiteral && s2StrLiteral) {
+ llvm::StringRef s1StrRef = s1StrLiteral->getString();
+ llvm::StringRef s2StrRef = s2StrLiteral->getString();
+
+ if (isBounded) {
+ // Get the max number of characters to compare.
+ const Expr *lenExpr = CE->getArg(2);
+ SVal lenVal = state->getSVal(lenExpr);
+
+ // If the length is known, we can get the right substrings.
+ if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
+ // Create substrings of each to compare the prefix.
+ s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
+ s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
+ canComputeResult = true;
+ }
+ } else {
+ // This is a normal, unbounded strcmp.
+ canComputeResult = true;
+ }
- int result;
- if (isBounded) {
- // Get the max number of characters to compare.
- const Expr *lenExpr = CE->getArg(2);
- SVal lenVal = state->getSVal(lenExpr);
+ if (canComputeResult) {
+ // Real strcmp stops at null characters.
+ size_t s1Term = s1StrRef.find('\0');
+ if (s1Term != llvm::StringRef::npos)
+ s1StrRef = s1StrRef.substr(0, s1Term);
- // Dynamically cast the length to a ConcreteInt. If it is not a ConcreteInt
- // then give up, otherwise get the value and use it as the bounds.
- nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&lenVal);
- if (!CI)
- return;
- llvm::APSInt lenInt(CI->getValue());
+ size_t s2Term = s2StrRef.find('\0');
+ if (s2Term != llvm::StringRef::npos)
+ s2StrRef = s2StrRef.substr(0, s2Term);
+
+ // Use StringRef's comparison methods to compute the actual result.
+ int result;
- // Create substrings of each to compare the prefix.
- s1StrRef = s1StrRef.substr(0, (size_t)lenInt.getLimitedValue());
- s2StrRef = s2StrRef.substr(0, (size_t)lenInt.getLimitedValue());
+ if (ignoreCase) {
+ // Compare string 1 to string 2 the same way strcasecmp() does.
+ result = s1StrRef.compare_lower(s2StrRef);
+ } else {
+ // Compare string 1 to string 2 the same way strcmp() does.
+ result = s1StrRef.compare(s2StrRef);
+ }
+
+ // Build the SVal of the comparison and bind the return value.
+ SVal resultVal = svalBuilder.makeIntVal(result, CE->getType());
+ state = state->BindExpr(CE, resultVal);
+ }
}
- if (ignoreCase) {
- // Compare string 1 to string 2 the same way strcasecmp() does.
- result = s1StrRef.compare_lower(s2StrRef);
- } else {
- // Compare string 1 to string 2 the same way strcmp() does.
- result = s1StrRef.compare(s2StrRef);
+ if (!canComputeResult) {
+ // Conjure a symbolic value. It's the best we can do.
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ SVal resultVal = svalBuilder.getConjuredSymbolVal(NULL, CE, Count);
+ state = state->BindExpr(CE, resultVal);
}
-
- // Build the SVal of the comparison to bind the return value.
- SValBuilder &svalBuilder = C.getSValBuilder();
- QualType intTy = svalBuilder.getContext().IntTy;
- SVal resultVal = svalBuilder.makeIntVal(result, intTy);
- // Bind the return value of the expression.
- // Set the return value.
- state = state->BindExpr(CE, resultVal);
+ // Record this as a possible path.
C.addTransition(state);
}
@@ -1251,7 +1661,7 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
.Cases("memcmp", "bcmp", &CStringChecker::evalMemcmp)
.Cases("memmove", "__memmove_chk", &CStringChecker::evalMemmove)
.Cases("strcpy", "__strcpy_chk", &CStringChecker::evalStrcpy)
- //.Cases("strncpy", "__strncpy_chk", &CStringChecker::evalStrncpy)
+ .Cases("strncpy", "__strncpy_chk", &CStringChecker::evalStrncpy)
.Cases("stpcpy", "__stpcpy_chk", &CStringChecker::evalStpcpy)
.Cases("strcat", "__strcat_chk", &CStringChecker::evalStrcat)
.Cases("strncat", "__strncat_chk", &CStringChecker::evalStrncat)
@@ -1268,6 +1678,10 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
if (!evalFunction)
return false;
+ // Make sure each function sets its own description.
+ // (But don't bother in a release build.)
+ assert(!(CurrentFunctionDescription = NULL));
+
// Check and evaluate the call.
(this->*evalFunction)(C, CE);
return true;
@@ -1373,8 +1787,10 @@ void CStringChecker::checkLiveSymbols(const GRState *state,
for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end();
I != E; ++I) {
SVal Len = I.getData();
- if (SymbolRef Sym = Len.getAsSymbol())
- SR.markInUse(Sym);
+
+ for (SVal::symbol_iterator si = Len.symbol_begin(), se = Len.symbol_end();
+ si != se; ++si)
+ SR.markInUse(*si);
}
}
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index dfe0a0e6f50a..6c3dfacec331 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -282,7 +282,7 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
return triple.getVendor() == llvm::Triple::Apple &&
- (triple.getDarwinMajorNumber() >= 9 ||
+ (!triple.isMacOSXVersionLT(10,5) ||
triple.getArch() == llvm::Triple::arm ||
triple.getArch() == llvm::Triple::thumb);
}
diff --git a/lib/StaticAnalyzer/Checkers/Checkers.td b/lib/StaticAnalyzer/Checkers/Checkers.td
index 1a71fc4074ba..2c196b597b7d 100644
--- a/lib/StaticAnalyzer/Checkers/Checkers.td
+++ b/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -168,10 +168,6 @@ def ReturnUndefChecker : Checker<"UndefReturn">,
let ParentPackage = CplusplusExperimental in {
-def CStringChecker : Checker<"CString">,
- HelpText<"Check calls to functions in <string.h>">,
- DescFile<"CStringChecker.cpp">;
-
def IteratorsChecker : Checker<"Iterators">,
HelpText<"Check improper uses of STL vector iterators">,
DescFile<"IteratorsChecker.cpp">;
@@ -244,6 +240,10 @@ def ChrootChecker : Checker<"Chroot">,
HelpText<"Check improper use of chroot">,
DescFile<"ChrootChecker.cpp">;
+def CStringChecker : Checker<"CString">,
+ HelpText<"Check calls to functions in <string.h>">,
+ DescFile<"CStringChecker.cpp">;
+
def MallocChecker : Checker<"Malloc">,
HelpText<"Check for potential memory leaks, double free, and use-after-free problems">,
DescFile<"MallocChecker.cpp">;
diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index bc1d823dde03..ec2a88a55681 100644
--- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -254,13 +254,13 @@ public:
return;
if (Expr* E = V->getInit()) {
+ while (ExprWithCleanups *exprClean = dyn_cast<ExprWithCleanups>(E))
+ E = exprClean->getSubExpr();
+
// Don't warn on C++ objects (yet) until we can show that their
// constructors/destructors don't have side effects.
if (isa<CXXConstructExpr>(E))
return;
-
- if (isa<ExprWithCleanups>(E))
- return;
// A dead initialization is a variable that is dead after it
// is initialized. We don't flag warnings for those variables
diff --git a/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp b/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
index e4e5f54770b3..de6da4f8c377 100644
--- a/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
@@ -237,8 +237,11 @@ const GRState *IteratorsChecker::invalidateIterators(const GRState *state,
const GRState *IteratorsChecker::handleAssign(const GRState *state,
const Expr *lexp, const Expr *rexp, const LocationContext *LC) const {
// Skip the cast if present.
- if (isa<ImplicitCastExpr>(lexp))
- lexp = dyn_cast<ImplicitCastExpr>(lexp)->getSubExpr();
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(lexp))
+ lexp = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(lexp))
+ lexp = ICE->getSubExpr();
SVal sv = state->getSVal(lexp);
const MemRegion *MR = sv.getAsRegion();
if (!MR)
@@ -260,8 +263,11 @@ const GRState *IteratorsChecker::handleAssign(const GRState *state,
const MemRegion *MR, const Expr *rexp, const LocationContext *LC) const {
// Assume unknown until we find something definite.
state = state->set<IteratorState>(MR, RefState::getUnknown());
- if (isa<ImplicitCastExpr>(rexp))
- rexp = dyn_cast<ImplicitCastExpr>(rexp)->getSubExpr();
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(rexp))
+ rexp = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(rexp))
+ rexp = ICE->getSubExpr();
// Need to handle three cases: MemberCall, copy, copy with addition.
if (const CallExpr *CE = dyn_cast<CallExpr>(rexp)) {
// Handle MemberCall.
@@ -347,8 +353,10 @@ const DeclRefExpr *IteratorsChecker::getDeclRefExpr(const Expr *E) const {
E = CE->getArg(0);
}
}
- if (isa<ImplicitCastExpr>(E))
- E = dyn_cast<ImplicitCastExpr>(E)->getSubExpr();
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExpr();
// If it isn't one of our types, don't do anything.
if (getTemplateKind(E->getType()) != VectorIteratorKind)
return NULL;
@@ -520,8 +528,11 @@ void IteratorsChecker::checkPreStmt(const DeclStmt *DS,
if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitEx)) {
if (CE->getNumArgs() == 1) {
const Expr *E = CE->getArg(0);
- if (isa<ImplicitCastExpr>(E))
- InitEx = dyn_cast<ImplicitCastExpr>(E)->getSubExpr();
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ InitEx = ICE->getSubExpr();
state = handleAssign(state, MR, InitEx,
C.getPredecessor()->getLocationContext());
}
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 12ce86613ddc..f8d076b54e3d 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -31,22 +31,17 @@ using namespace ento;
namespace {
class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
- enum SubChecks {
- DispatchOnce = 0,
- DispatchOnceF,
- NumChecks
- };
-
- mutable BugType *BTypes[NumChecks];
+ mutable llvm::OwningPtr<BugType> BT_dispatchOnce;
public:
- MacOSXAPIChecker() { memset(BTypes, 0, sizeof(*BTypes) * NumChecks); }
- ~MacOSXAPIChecker() {
- for (unsigned i=0; i != NumChecks; ++i)
- delete BTypes[i];
- }
-
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ const IdentifierInfo *FI) const;
+
+ typedef void (MacOSXAPIChecker::*SubChecker)(CheckerContext &,
+ const CallExpr *,
+ const IdentifierInfo *) const;
};
} //end anonymous namespace
@@ -54,16 +49,8 @@ public:
// dispatch_once and dispatch_once_f
//===----------------------------------------------------------------------===//
-static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
- BugType *&BT, const IdentifierInfo *FI) {
-
- if (!BT) {
- llvm::SmallString<128> S;
- llvm::raw_svector_ostream os(S);
- os << "Improper use of '" << FI->getName() << '\'';
- BT = new BugType(os.str(), "Mac OS X API");
- }
-
+void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ const IdentifierInfo *FI) const {
if (CE->getNumArgs() < 1)
return;
@@ -78,6 +65,10 @@ static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (!N)
return;
+ if (!BT_dispatchOnce)
+ BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
+ "Mac OS X API"));
+
llvm::SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to '" << FI->getName() << "' uses";
@@ -90,7 +81,7 @@ static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
os << " Perhaps you intended to declare the variable as 'static'?";
- EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), N);
+ RangedBugReport *report = new RangedBugReport(*BT_dispatchOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.EmitReport(report);
}
@@ -99,47 +90,29 @@ static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
// Central dispatch function.
//===----------------------------------------------------------------------===//
-typedef void (*SubChecker)(CheckerContext &C, const CallExpr *CE, BugType *&BT,
- const IdentifierInfo *FI);
-namespace {
- class SubCheck {
- SubChecker SC;
- BugType **BT;
- public:
- SubCheck(SubChecker sc, BugType *& bt) : SC(sc), BT(&bt) {}
- SubCheck() : SC(NULL), BT(NULL) {}
-
- void run(CheckerContext &C, const CallExpr *CE,
- const IdentifierInfo *FI) const {
- if (SC)
- SC(C, CE, *BT, FI);
- }
- };
-} // end anonymous namespace
-
void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
- // FIXME: Mostly copy and paste from UnixAPIChecker. Should refactor.
+ // FIXME: This sort of logic is common to several checkers, including
+ // UnixAPIChecker, PthreadLockChecker, and CStringChecker. Should refactor.
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
- const FunctionTextRegion *Fn =
- dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+ const FunctionDecl *Fn = state->getSVal(Callee).getAsFunctionDecl();
if (!Fn)
return;
- const IdentifierInfo *FI = Fn->getDecl()->getIdentifier();
+ const IdentifierInfo *FI = Fn->getIdentifier();
if (!FI)
return;
- const SubCheck &SC =
- llvm::StringSwitch<SubCheck>(FI->getName())
- .Case("dispatch_once", SubCheck(CheckDispatchOnce, BTypes[DispatchOnce]))
- .Case("dispatch_once_f", SubCheck(CheckDispatchOnce,
- BTypes[DispatchOnceF]))
- .Default(SubCheck());
+ SubChecker SC =
+ llvm::StringSwitch<SubChecker>(FI->getName())
+ .Cases("dispatch_once", "dispatch_once_f",
+ &MacOSXAPIChecker::CheckDispatchOnce)
+ .Default(NULL);
- SC.run(C, CE, FI);
+ if (SC)
+ (this->*SC)(C, CE, FI);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index 07de8706df24..73ce359edc3e 100644
--- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -121,6 +121,11 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
return;
if (R->hasStackStorage()) {
+ // Automatic reference counting automatically copies blocks.
+ if (C.getASTContext().getLangOptions().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(R))
+ return;
+
EmitStackError(C, R, RetE);
return;
}
@@ -135,12 +140,13 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
// a memory region in the stack space.
class CallBack : public StoreManager::BindingsHandler {
private:
+ ExprEngine &Eng;
const StackFrameContext *CurSFC;
public:
llvm::SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
- CallBack(const LocationContext *LCtx)
- : CurSFC(LCtx->getCurrentStackFrame()) {}
+ CallBack(ExprEngine &Eng, const LocationContext *LCtx)
+ : Eng(Eng), CurSFC(LCtx->getCurrentStackFrame()) {}
bool HandleBinding(StoreManager &SMgr, Store store,
const MemRegion *region, SVal val) {
@@ -151,7 +157,13 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
const MemRegion *vR = val.getAsRegion();
if (!vR)
return true;
-
+
+ // Under automated retain release, it is okay to assign a block
+ // directly to a global variable.
+ if (Eng.getContext().getLangOptions().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(vR))
+ return true;
+
if (const StackSpaceRegion *SSR =
dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
// If the global variable holds a location in the current stack frame,
@@ -164,7 +176,7 @@ void StackAddrEscapeChecker::checkEndPath(EndOfFunctionNodeBuilder &B,
}
};
- CallBack cb(B.getPredecessor()->getLocationContext());
+ CallBack cb(Eng, B.getPredecessor()->getLocationContext());
state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
if (cb.V.empty())
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 48d7c367a96d..0ecc391f3e8f 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -28,26 +28,18 @@ using llvm::Optional;
namespace {
class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
- enum SubChecks {
- OpenFn = 0,
- PthreadOnceFn = 1,
- MallocZero = 2,
- NumChecks
- };
-
- mutable BugType *BTypes[NumChecks];
-
-public:
+ mutable llvm::OwningPtr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
mutable Optional<uint64_t> Val_O_CREAT;
public:
- UnixAPIChecker() { memset(BTypes, 0, sizeof(*BTypes) * NumChecks); }
- ~UnixAPIChecker() {
- for (unsigned i=0; i != NumChecks; ++i)
- delete BTypes[i];
- }
-
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
+ void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+ void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
+
+ typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
+ const CallExpr *) const;
};
} //end anonymous namespace
@@ -55,23 +47,23 @@ public:
// Utility functions.
//===----------------------------------------------------------------------===//
-static inline void LazyInitialize(BugType *&BT, const char *name) {
+static inline void LazyInitialize(llvm::OwningPtr<BugType> &BT,
+ const char *name) {
if (BT)
return;
- BT = new BugType(name, "Unix API");
+ BT.reset(new BugType(name, "Unix API"));
}
//===----------------------------------------------------------------------===//
// "open" (man 2 open)
//===----------------------------------------------------------------------===//
-static void CheckOpen(CheckerContext &C, const UnixAPIChecker &UC,
- const CallExpr *CE, BugType *&BT) {
+void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
// The definition of O_CREAT is platform specific. We need a better way
// of querying this information from the checking environment.
- if (!UC.Val_O_CREAT.hasValue()) {
+ if (!Val_O_CREAT.hasValue()) {
if (C.getASTContext().Target.getTriple().getVendor() == llvm::Triple::Apple)
- UC.Val_O_CREAT = 0x0200;
+ Val_O_CREAT = 0x0200;
else {
// FIXME: We need a more general way of getting the O_CREAT value.
// We could possibly grovel through the preprocessor state, but
@@ -80,8 +72,6 @@ static void CheckOpen(CheckerContext &C, const UnixAPIChecker &UC,
}
}
- LazyInitialize(BT, "Improper use of 'open'");
-
// Look at the 'oflags' argument for the O_CREAT flag.
const GRState *state = C.getState();
@@ -101,7 +91,7 @@ static void CheckOpen(CheckerContext &C, const UnixAPIChecker &UC,
}
NonLoc oflags = cast<NonLoc>(V);
NonLoc ocreateFlag =
- cast<NonLoc>(C.getSValBuilder().makeIntVal(UC.Val_O_CREAT.getValue(),
+ cast<NonLoc>(C.getSValBuilder().makeIntVal(Val_O_CREAT.getValue(),
oflagsEx->getType()));
SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
oflags, ocreateFlag,
@@ -124,8 +114,10 @@ static void CheckOpen(CheckerContext &C, const UnixAPIChecker &UC,
if (!N)
return;
- EnhancedBugReport *report =
- new EnhancedBugReport(*BT,
+ LazyInitialize(BT_open, "Improper use of 'open'");
+
+ RangedBugReport *report =
+ new RangedBugReport(*BT_open,
"Call to 'open' requires a third argument when "
"the 'O_CREAT' flag is set", N);
report->addRange(oflagsEx->getSourceRange());
@@ -137,14 +129,12 @@ static void CheckOpen(CheckerContext &C, const UnixAPIChecker &UC,
// pthread_once
//===----------------------------------------------------------------------===//
-static void CheckPthreadOnce(CheckerContext &C, const UnixAPIChecker &,
- const CallExpr *CE, BugType *&BT) {
+void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
+ const CallExpr *CE) const {
// This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
// They can possibly be refactored.
- LazyInitialize(BT, "Improper use of 'pthread_once'");
-
if (CE->getNumArgs() < 1)
return;
@@ -171,7 +161,9 @@ static void CheckPthreadOnce(CheckerContext &C, const UnixAPIChecker &,
if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
os << " Perhaps you intended to declare the variable as 'static'?";
- EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), N);
+ LazyInitialize(BT_pthreadOnce, "Improper use of 'pthread_once'");
+
+ RangedBugReport *report = new RangedBugReport(*BT_pthreadOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.EmitReport(report);
}
@@ -182,8 +174,8 @@ static void CheckPthreadOnce(CheckerContext &C, const UnixAPIChecker &,
// FIXME: Eventually this should be rolled into the MallocChecker, but this
// check is more basic and is valuable for widespread use.
-static void CheckMallocZero(CheckerContext &C, const UnixAPIChecker &UC,
- const CallExpr *CE, BugType *&BT) {
+void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
// Sanity check that malloc takes one argument.
if (CE->getNumArgs() != 1)
@@ -208,11 +200,11 @@ static void CheckMallocZero(CheckerContext &C, const UnixAPIChecker &UC,
// FIXME: Add reference to CERT advisory, and/or C99 standard in bug
// output.
- LazyInitialize(BT, "Undefined allocation of 0 bytes");
+ LazyInitialize(BT_mallocZero, "Undefined allocation of 0 bytes");
EnhancedBugReport *report =
- new EnhancedBugReport(*BT, "Call to 'malloc' has an allocation size"
- " of 0 bytes", N);
+ new EnhancedBugReport(*BT_mallocZero, "Call to 'malloc' has an allocation"
+ " size of 0 bytes", N);
report->addRange(CE->getArg(0)->getSourceRange());
report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
CE->getArg(0));
@@ -230,51 +222,29 @@ static void CheckMallocZero(CheckerContext &C, const UnixAPIChecker &UC,
// Central dispatch function.
//===----------------------------------------------------------------------===//
-typedef void (*SubChecker)(CheckerContext &C, const UnixAPIChecker &UC,
- const CallExpr *CE, BugType *&BT);
-namespace {
- class SubCheck {
- SubChecker SC;
- const UnixAPIChecker *UC;
- BugType **BT;
- public:
- SubCheck(SubChecker sc, const UnixAPIChecker *uc, BugType *& bt)
- : SC(sc), UC(uc), BT(&bt) {}
- SubCheck() : SC(NULL), UC(NULL), BT(NULL) {}
-
- void run(CheckerContext &C, const CallExpr *CE) const {
- if (SC)
- SC(C, *UC, CE, *BT);
- }
- };
-} // end anonymous namespace
-
void UnixAPIChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
// Get the callee. All the functions we care about are C functions
// with simple identifiers.
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
- const FunctionTextRegion *Fn =
- dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+ const FunctionDecl *Fn = state->getSVal(Callee).getAsFunctionDecl();
if (!Fn)
return;
- const IdentifierInfo *FI = Fn->getDecl()->getIdentifier();
+ const IdentifierInfo *FI = Fn->getIdentifier();
if (!FI)
return;
- const SubCheck &SC =
- llvm::StringSwitch<SubCheck>(FI->getName())
- .Case("open",
- SubCheck(CheckOpen, this, BTypes[OpenFn]))
- .Case("pthread_once",
- SubCheck(CheckPthreadOnce, this, BTypes[PthreadOnceFn]))
- .Case("malloc",
- SubCheck(CheckMallocZero, this, BTypes[MallocZero]))
- .Default(SubCheck());
-
- SC.run(C, CE);
+ SubChecker SC =
+ llvm::StringSwitch<SubChecker>(FI->getName())
+ .Case("open", &UnixAPIChecker::CheckOpen)
+ .Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
+ .Case("malloc", &UnixAPIChecker::CheckMallocZero)
+ .Default(NULL);
+
+ if (SC)
+ (this->*SC)(C, CE);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/CFRefCount.cpp b/lib/StaticAnalyzer/Core/CFRefCount.cpp
index 0512e2f08d04..bf5302920819 100644
--- a/lib/StaticAnalyzer/Core/CFRefCount.cpp
+++ b/lib/StaticAnalyzer/Core/CFRefCount.cpp
@@ -126,6 +126,7 @@ public:
/// ArgEffect is used to summarize a function/method call's effect on a
/// particular argument.
enum ArgEffect { Autorelease, Dealloc, DecRef, DecRefMsg, DoNothing,
+ DecRefBridgedTransfered,
DoNothingByRef, IncRefMsg, IncRef, MakeCollectable, MayEscape,
NewAutoreleasePool, SelfOwn, StopTracking };
@@ -148,7 +149,8 @@ namespace {
class RetEffect {
public:
enum Kind { NoRet, Alias, OwnedSymbol, OwnedAllocatedSymbol,
- NotOwnedSymbol, GCNotOwnedSymbol, ReceiverAlias,
+ NotOwnedSymbol, GCNotOwnedSymbol, ARCNotOwnedSymbol,
+ ReceiverAlias,
OwnedWhenTrackedReceiver };
enum ObjKind { CF, ObjC, AnyObj };
@@ -195,7 +197,9 @@ public:
static RetEffect MakeGCNotOwned() {
return RetEffect(GCNotOwnedSymbol, ObjC);
}
-
+ static RetEffect MakeARCNotOwned() {
+ return RetEffect(ARCNotOwnedSymbol, ObjC);
+ }
static RetEffect MakeNoRet() {
return RetEffect(NoRet);
}
@@ -636,6 +640,9 @@ class RetainSummaryManager {
/// GCEnabled - Records whether or not the analyzed code runs in GC mode.
const bool GCEnabled;
+ /// Records whether or not the analyzed code runs in ARC mode.
+ const bool ARCEnabled;
+
/// FuncSummaries - A map from FunctionDecls to summaries.
FuncSummariesTy FuncSummaries;
@@ -788,14 +795,20 @@ private:
public:
- RetainSummaryManager(ASTContext& ctx, bool gcenabled)
+ RetainSummaryManager(ASTContext& ctx, bool gcenabled, bool usesARC)
: Ctx(ctx),
CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")),
- GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
- ObjCAllocRetE(gcenabled ? RetEffect::MakeGCNotOwned()
- : RetEffect::MakeOwned(RetEffect::ObjC, true)),
- ObjCInitRetE(gcenabled ? RetEffect::MakeGCNotOwned()
- : RetEffect::MakeOwnedWhenTrackedReceiver()),
+ GCEnabled(gcenabled),
+ ARCEnabled(usesARC),
+ AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
+ ObjCAllocRetE(gcenabled
+ ? RetEffect::MakeGCNotOwned()
+ : (usesARC ? RetEffect::MakeARCNotOwned()
+ : RetEffect::MakeOwned(RetEffect::ObjC, true))),
+ ObjCInitRetE(gcenabled
+ ? RetEffect::MakeGCNotOwned()
+ : (usesARC ? RetEffect::MakeARCNotOwned()
+ : RetEffect::MakeOwnedWhenTrackedReceiver())),
DefaultSummary(AF.getEmptyMap() /* per-argument effects (none) */,
RetEffect::MakeNoRet() /* return effect */,
MayEscape, /* default argument effect */
@@ -871,6 +884,10 @@ public:
bool isGCEnabled() const { return GCEnabled; }
+ bool isARCEnabled() const { return ARCEnabled; }
+
+ bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
+
RetainSummary *copySummary(RetainSummary *OldSumm) {
RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
new (Summ) RetainSummary(*OldSumm);
@@ -1118,8 +1135,7 @@ RetainSummary* RetainSummaryManager::getSummary(const FunctionDecl* FD) {
RetainSummary*
RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl* FD,
StringRef FName) {
- if (FName.find("Create") != StringRef::npos ||
- FName.find("Copy") != StringRef::npos)
+ if (coreFoundation::followsCreateRule(FName))
return getCFSummaryCreateRule(FD);
return getCFSummaryGetRule(FD);
@@ -1189,7 +1205,8 @@ RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
assert(ScratchArgs.isEmpty());
// 'init' methods conceptually return a newly allocated object and claim
// the receiver.
- if (cocoa::isCocoaObjectRef(RetTy) || cocoa::isCFObjectRef(RetTy))
+ if (cocoa::isCocoaObjectRef(RetTy) ||
+ coreFoundation::isCFObjectRef(RetTy))
return getPersistentSummary(ObjCInitRetE, DecRefMsg);
return getDefaultSummary();
@@ -1332,15 +1349,15 @@ RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
if (cocoa::isCocoaObjectRef(RetTy)) {
// EXPERIMENTAL: assume the Cocoa conventions for all objects returned
// by instance methods.
- RetEffect E = cocoa::followsFundamentalRule(S)
+ RetEffect E = cocoa::followsFundamentalRule(S, MD)
? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
return getPersistentSummary(E, ReceiverEff, MayEscape);
}
// Look for methods that return an owned core foundation object.
- if (cocoa::isCFObjectRef(RetTy)) {
- RetEffect E = cocoa::followsFundamentalRule(S)
+ if (coreFoundation::isCFObjectRef(RetTy)) {
+ RetEffect E = cocoa::followsFundamentalRule(S, MD)
? RetEffect::MakeOwned(RetEffect::CF, true)
: RetEffect::MakeNotOwned(RetEffect::CF);
@@ -1411,7 +1428,7 @@ RetainSummaryManager::getInstanceMethodSummary(Selector S,
assert(ScratchArgs.isEmpty());
// "initXXX": pass-through for receiver.
- if (cocoa::deriveNamingConvention(S) == cocoa::InitRule)
+ if (cocoa::deriveNamingConvention(S, MD) == cocoa::InitRule)
Summ = getInitMethodSummary(RetTy);
else
Summ = getCommonMethodSummary(MD, S, RetTy);
@@ -1654,7 +1671,6 @@ public:
const char* nl, const char* sep);
};
-private:
typedef llvm::DenseMap<const ExplodedNode*, const RetainSummary*>
SummaryLogTy;
@@ -1691,7 +1707,7 @@ private:
public:
CFRefCount(ASTContext& Ctx, bool gcenabled, const LangOptions& lopts)
- : Summaries(Ctx, gcenabled),
+ : Summaries(Ctx, gcenabled, (bool)lopts.ObjCAutoRefCount),
LOpts(lopts), useAfterRelease(0), releaseNotOwned(0),
deallocGC(0), deallocNotOwned(0),
leakWithinFunction(0), leakAtReturn(0), overAutorelease(0),
@@ -1706,6 +1722,8 @@ public:
}
bool isGCEnabled() const { return Summaries.isGCEnabled(); }
+ bool isARCorGCEnabled() const { return Summaries.isARCorGCEnabled(); }
+
const LangOptions& getLangOptions() const { return LOpts; }
const RetainSummary *getSummaryOfNode(const ExplodedNode *N) const {
@@ -1907,7 +1925,7 @@ namespace {
CFRefBug(tf, "Method should return an owned object") {}
const char *getDescription() const {
- return "Object with +0 retain counts returned to caller where a +1 "
+ return "Object with a +0 retain count returned to caller where a +1 "
"(owning) retain count is expected";
}
};
@@ -2094,7 +2112,7 @@ PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode* N,
if (static_cast<CFRefBug&>(getBugType()).getTF().isGCEnabled()) {
assert(CurrV.getObjKind() == RetEffect::CF);
- os << " "
+ os << ". "
"Core Foundation objects are not automatically garbage collected.";
}
}
@@ -2416,14 +2434,14 @@ CFRefLeakReport::getEndPath(BugReporterContext& BRC,
if (RV->getKind() == RefVal::ErrorLeakReturned) {
// FIXME: Per comments in rdar://6320065, "create" only applies to CF
- // ojbects. Only "copy", "alloc", "retain" and "new" transfer ownership
+ // objects. Only "copy", "alloc", "retain" and "new" transfer ownership
// to the caller for NS objects.
const Decl *D = &EndN->getCodeDecl();
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
os << " is returned from a method whose name ('"
<< MD->getSelector().getAsString()
<< "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
- " This violates the naming convention rules "
+ " This violates the naming convention rules"
" given in the Memory Management Guide for Cocoa";
}
else {
@@ -2431,7 +2449,7 @@ CFRefLeakReport::getEndPath(BugReporterContext& BRC,
os << " is return from a function whose name ('"
<< FD->getNameAsString()
<< "') does not contain 'Copy' or 'Create'. This violates the naming"
- " convention rules given the Memory Management Guide for Core "
+ " convention rules given the Memory Management Guide for Core"
" Foundation";
}
}
@@ -2777,6 +2795,7 @@ void CFRefCount::evalSummary(ExplodedNodeSet& Dst,
}
case RetEffect::GCNotOwnedSymbol:
+ case RetEffect::ARCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
unsigned Count = Builder.getCurrentBlockCount();
SValBuilder &svalBuilder = Eng.getSValBuilder();
@@ -3103,8 +3122,8 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
// In GC mode [... release] and [... retain] do nothing.
switch (E) {
default: break;
- case IncRefMsg: E = isGCEnabled() ? DoNothing : IncRef; break;
- case DecRefMsg: E = isGCEnabled() ? DoNothing : DecRef; break;
+ case IncRefMsg: E = isARCorGCEnabled() ? DoNothing : IncRef; break;
+ case DecRefMsg: E = isARCorGCEnabled() ? DoNothing : DecRef; break;
case MakeCollectable: E = isGCEnabled() ? DecRef : DoNothing; break;
case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
NewAutoreleasePool; break;
@@ -3118,9 +3137,13 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
}
switch (E) {
- default:
- assert (false && "Unhandled CFRef transition.");
-
+ case DecRefMsg:
+ case IncRefMsg:
+ case MakeCollectable:
+ assert(false &&
+ "DecRefMsg/IncRefMsg/MakeCollectable already transformed");
+ return state;
+
case Dealloc:
// Any use of -dealloc in GC is *bad*.
if (isGCEnabled()) {
@@ -3193,6 +3216,7 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
V = V ^ RefVal::NotOwned;
// Fall-through.
case DecRef:
+ case DecRefBridgedTransfered:
switch (V.getKind()) {
default:
// case 'RefVal::Released' handled above.
@@ -3200,7 +3224,9 @@ const GRState * CFRefCount::Update(const GRState * state, SymbolRef sym,
case RefVal::Owned:
assert(V.getCount() > 0);
- if (V.getCount() == 1) V = V ^ RefVal::Released;
+ if (V.getCount() == 1)
+ V = V ^ (E == DecRefBridgedTransfered ?
+ RefVal::NotOwned : RefVal::Released);
V = V - 1;
break;
@@ -3280,15 +3306,10 @@ CFRefCount::HandleAutoreleaseCounts(const GRState * state,
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
- os << "Object over-autoreleased: object was sent -autorelease";
+ os << "Object over-autoreleased: object was sent -autorelease ";
if (V.getAutoreleaseCount() > 1)
- os << V.getAutoreleaseCount() << " times";
- os << " but the object has ";
- if (V.getCount() == 0)
- os << "zero (locally visible)";
- else
- os << "+" << V.getCount();
- os << " retain counts";
+ os << V.getAutoreleaseCount() << " times ";
+ os << "but the object has a +" << V.getCount() << " retain count";
CFRefReport *report =
new CFRefReport(*static_cast<CFRefBug*>(overAutorelease),
@@ -3468,7 +3489,9 @@ void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst,
namespace {
class RetainReleaseChecker
- : public Checker< check::PostStmt<BlockExpr>, check::RegionChanges > {
+ : public Checker< check::PostStmt<BlockExpr>,
+ check::PostStmt<CastExpr>,
+ check::RegionChanges > {
public:
bool wantsRegionUpdate;
@@ -3476,6 +3499,9 @@ public:
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+
+ void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
+
const GRState *checkRegionChanges(const GRState *state,
const StoreManager::InvalidatedSymbols *invalidated,
const MemRegion * const *begin,
@@ -3545,6 +3571,48 @@ void RetainReleaseChecker::checkPostStmt(const BlockExpr *BE,
C.addTransition(state);
}
+void RetainReleaseChecker::checkPostStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
+ if (!BE)
+ return;
+
+ ArgEffect AE = IncRef;
+
+ switch (BE->getBridgeKind()) {
+ case clang::OBC_Bridge:
+ // Do nothing.
+ return;
+ case clang::OBC_BridgeRetained:
+ AE = IncRef;
+ break;
+ case clang::OBC_BridgeTransfer:
+ AE = DecRefBridgedTransfered;
+ break;
+ }
+
+ const GRState *state = C.getState();
+ SymbolRef Sym = state->getSVal(CE).getAsLocSymbol();
+ if (!Sym)
+ return;
+ const RefVal* T = state->get<RefBindings>(Sym);
+ if (!T)
+ return;
+
+ // This is gross. Once the checker and CFRefCount are unified,
+ // this will go away.
+ CFRefCount &cf = static_cast<CFRefCount&>(C.getEngine().getTF());
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = cf.Update(state, Sym, *T, AE, hasErr);
+
+ if (hasErr) {
+
+ return;
+ }
+
+ C.generateNode(state);
+}
+
//===----------------------------------------------------------------------===//
// Transfer function creation for external clients.
//===----------------------------------------------------------------------===//
diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp
index 48f126bfd863..3961c7b95259 100644
--- a/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/lib/StaticAnalyzer/Core/Environment.cpp
@@ -83,6 +83,9 @@ SVal Environment::getSVal(const Stmt *E, SValBuilder& svalBuilder,
case Stmt::CXXBindTemporaryExprClass:
E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
continue;
+ case Stmt::MaterializeTemporaryExprClass:
+ E = cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr();
+ continue;
// Handle all other Stmt* using a lookup.
default:
break;
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index aed39eb0cec6..ffe5f0b6cdf2 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -442,7 +442,8 @@ void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
}
switch (S->getStmtClass()) {
- // C++ stuff we don't support yet.
+ // C++ and ARC stuff we don't support yet.
+ case Expr::ObjCIndirectCopyRestoreExprClass:
case Stmt::CXXBindTemporaryExprClass:
case Stmt::CXXCatchStmtClass:
case Stmt::CXXDependentScopeMemberExprClass:
@@ -478,6 +479,7 @@ void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
// We don't handle default arguments either yet, but we can fake it
// for now by just skipping them.
+ case Stmt::SubstNonTypeTemplateParmExprClass:
case Stmt::CXXDefaultArgExprClass: {
Dst.Add(Pred);
break;
@@ -508,7 +510,10 @@ void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
break;
case Stmt::GNUNullExprClass: {
- MakeNode(Dst, S, Pred, GetState(Pred)->BindExpr(S, svalBuilder.makeNull()));
+ // GNU __null is a pointer-width integer, not an actual pointer.
+ const GRState *state = GetState(Pred);
+ state = state->BindExpr(S, svalBuilder.makeIntValWithPtrWidth(0, false));
+ MakeNode(Dst, S, Pred, state);
break;
}
@@ -520,14 +525,27 @@ void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
VisitObjCPropertyRefExpr(cast<ObjCPropertyRefExpr>(S), Pred, Dst);
break;
+ case Stmt::ImplicitValueInitExprClass: {
+ const GRState *state = GetState(Pred);
+ QualType ty = cast<ImplicitValueInitExpr>(S)->getType();
+ SVal val = svalBuilder.makeZeroVal(ty);
+ MakeNode(Dst, S, Pred, state->BindExpr(S, val));
+ break;
+ }
+
+ case Stmt::ExprWithCleanupsClass: {
+ Visit(cast<ExprWithCleanups>(S)->getSubExpr(), Pred, Dst);
+ break;
+ }
+
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::ExtVectorElementExprClass:
case Stmt::ImaginaryLiteralClass:
- case Stmt::ImplicitValueInitExprClass:
case Stmt::ObjCAtCatchStmtClass:
case Stmt::ObjCAtFinallyStmtClass:
case Stmt::ObjCAtTryStmtClass:
+ case Stmt::ObjCAutoreleasePoolStmtClass:
case Stmt::ObjCEncodeExprClass:
case Stmt::ObjCIsaExprClass:
case Stmt::ObjCProtocolExprClass:
@@ -548,7 +566,6 @@ void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::IntegerLiteralClass:
case Stmt::CharacterLiteralClass:
case Stmt::CXXBoolLiteralExprClass:
- case Stmt::ExprWithCleanupsClass:
case Stmt::FloatingLiteralClass:
case Stmt::SizeOfPackExprClass:
case Stmt::CXXNullPtrLiteralExprClass:
@@ -668,12 +685,35 @@ void ExprEngine::Visit(const Stmt* S, ExplodedNode* Pred,
case Stmt::CXXDynamicCastExprClass:
case Stmt::CXXReinterpretCastExprClass:
case Stmt::CXXConstCastExprClass:
- case Stmt::CXXFunctionalCastExprClass: {
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::ObjCBridgedCastExprClass: {
const CastExpr* C = cast<CastExpr>(S);
- VisitCast(C, C->getSubExpr(), Pred, Dst);
+ // Handle the previsit checks.
+ ExplodedNodeSet dstPrevisit;
+ getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, C, *this);
+
+ // Handle the expression itself.
+ ExplodedNodeSet dstExpr;
+ for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
+ e = dstPrevisit.end(); i != e ; ++i) {
+ VisitCast(C, C->getSubExpr(), *i, dstExpr);
+ }
+
+ // Handle the postvisit checks.
+ getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, C, *this);
break;
}
+ case Expr::MaterializeTemporaryExprClass: {
+ const MaterializeTemporaryExpr *Materialize
+ = cast<MaterializeTemporaryExpr>(S);
+ if (!Materialize->getType()->isRecordType())
+ CreateCXXTemporaryObject(Materialize->GetTemporaryExpr(), Pred, Dst);
+ else
+ Visit(Materialize->GetTemporaryExpr(), Pred, Dst);
+ break;
+ }
+
case Stmt::InitListExprClass:
VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
break;
@@ -2148,10 +2188,19 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
Pred = *I;
switch (CastE->getCastKind()) {
+ case CK_LValueToRValue:
+ assert(false && "LValueToRValue casts handled earlier.");
+ case CK_GetObjCProperty:
+ assert(false && "GetObjCProperty casts handled earlier.");
case CK_ToVoid:
Dst.Add(Pred);
continue;
- case CK_LValueToRValue:
+ // The analyzer doesn't do anything special with these casts,
+ // since it understands retain/release semantics already.
+ case CK_ObjCProduceObject:
+ case CK_ObjCConsumeObject:
+ case CK_ObjCReclaimReturnedObject: // Fall-through.
+ // True no-ops.
case CK_NoOp:
case CK_FunctionToPointerDecay: {
// Copy the SVal of Ex to CastE.
@@ -2161,7 +2210,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
MakeNode(Dst, CastE, Pred, state);
continue;
}
- case CK_GetObjCProperty:
case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
@@ -2273,17 +2321,9 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
// time a function is called those values may not be current.
ExplodedNodeSet Tmp;
- if (InitEx) {
- if (VD->getType()->isReferenceType() && !InitEx->isLValue()) {
- // If the initializer is C++ record type, it should already has a
- // temp object.
- if (!InitEx->getType()->isRecordType())
- CreateCXXTemporaryObject(InitEx, Pred, Tmp);
- else
- Tmp.Add(Pred);
- } else
- Visit(InitEx, Pred, Tmp);
- } else
+ if (InitEx)
+ Visit(InitEx, Pred, Tmp);
+ else
Tmp.Add(Pred);
ExplodedNodeSet Tmp2;
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index d0d8f601f071..23dd6416a8f1 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -1377,7 +1377,12 @@ StoreRef RegionStoreManager::setImplicitDefaultValue(Store store,
V = svalBuilder.makeZeroVal(Ctx.IntTy);
}
else {
- return StoreRef(store, *this);
+ // We can't represent values of this type, but we still need to set a value
+ // to record that the region has been initialized.
+ // If this assertion ever fires, a new case should be added above -- we
+ // should know how to default-initialize any value we can symbolicate.
+ assert(!SymbolManager::canSymbolicate(T) && "This type is representable");
+ V = UnknownVal();
}
return StoreRef(addBinding(B, R, BindingKey::Default,
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 197442b1574f..80c18a335fde 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -415,6 +415,24 @@ SVal SimpleSValBuilder::evalBinOpNN(const GRState *state,
case nonloc::ConcreteIntKind: {
const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
+ // Is the RHS a symbol we can simplify?
+ // FIXME: This was mostly copy/pasted from the LHS-is-a-symbol case.
+ if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
+ SymbolRef RSym = srhs->getSymbol();
+ if (RSym->getType(Context)->isIntegerType()) {
+ if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
+ // The symbol evaluates to a constant.
+ const llvm::APSInt *rhs_I;
+ if (BinaryOperator::isRelationalOp(op))
+ rhs_I = &BasicVals.Convert(lhsInt.getValue(), *Constant);
+ else
+ rhs_I = &BasicVals.Convert(resultTy, *Constant);
+
+ rhs = nonloc::ConcreteInt(*rhs_I);
+ }
+ }
+ }
+
if (isa<nonloc::ConcreteInt>(rhs)) {
return lhsInt.evalBinOp(*this, op, cast<nonloc::ConcreteInt>(rhs));
} else {
@@ -461,13 +479,22 @@ SVal SimpleSValBuilder::evalBinOpNN(const GRState *state,
case nonloc::SymbolValKind: {
nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
SymbolRef Sym = slhs->getSymbol();
+ QualType lhsType = Sym->getType(Context);
+
+ // The conversion type is usually the result type, but not in the case
+ // of relational expressions.
+ QualType conversionType = resultTy;
+ if (BinaryOperator::isRelationalOp(op))
+ conversionType = lhsType;
+
// Does the symbol simplify to a constant? If so, "fold" the constant
// by setting 'lhs' to a ConcreteInt and try again.
- if (Sym->getType(Context)->isIntegerType())
+ if (lhsType->isIntegerType())
if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
// The symbol evaluates to a constant. If necessary, promote the
// folded constant (LHS) to the result type.
- const llvm::APSInt &lhs_I = BasicVals.Convert(resultTy, *Constant);
+ const llvm::APSInt &lhs_I = BasicVals.Convert(conversionType,
+ *Constant);
lhs = nonloc::ConcreteInt(lhs_I);
// Also promote the RHS (if necessary).
@@ -479,7 +506,7 @@ SVal SimpleSValBuilder::evalBinOpNN(const GRState *state,
// Other operators: do an implicit conversion. This shouldn't be
// necessary once we support truncation/extension of symbolic values.
if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
- rhs = nonloc::ConcreteInt(BasicVals.Convert(resultTy,
+ rhs = nonloc::ConcreteInt(BasicVals.Convert(conversionType,
rhs_I->getValue()));
}
@@ -492,7 +519,8 @@ SVal SimpleSValBuilder::evalBinOpNN(const GRState *state,
if (RSym->getType(Context)->isIntegerType()) {
if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
// The symbol evaluates to a constant.
- const llvm::APSInt &rhs_I = BasicVals.Convert(resultTy, *Constant);
+ const llvm::APSInt &rhs_I = BasicVals.Convert(conversionType,
+ *Constant);
rhs = nonloc::ConcreteInt(rhs_I);
}
}