aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Instrumentation
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
commit3a0822f094b578157263e04114075ad7df81db41 (patch)
treebc48361fe2cd1ca5f93ac01b38b183774468fc79 /lib/Transforms/Instrumentation
parent85d8b2bbe386bcfe669575d05b61482d7be07e5d (diff)
downloadsrc-3a0822f094b578157263e04114075ad7df81db41.tar.gz
src-3a0822f094b578157263e04114075ad7df81db41.zip
Vendor import of llvm trunk r240225:vendor/llvm/llvm-trunk-r240225
Notes
Notes: svn path=/vendor/llvm/dist/; revision=284677 svn path=/vendor/llvm/llvm-trunk-r240225/; revision=284678; tag=vendor/llvm/llvm-trunk-r240225
Diffstat (limited to 'lib/Transforms/Instrumentation')
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp123
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp2
-rw-r--r--lib/Transforms/Instrumentation/CMakeLists.txt1
-rw-r--r--lib/Transforms/Instrumentation/DataFlowSanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/GCOVProfiling.cpp4
-rw-r--r--lib/Transforms/Instrumentation/Instrumentation.cpp1
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/SafeStack.cpp608
-rw-r--r--lib/Transforms/Instrumentation/SanitizerCoverage.cpp12
9 files changed, 694 insertions, 61 deletions
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 25f78b0b2a26..2dd2fe6211c3 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -67,6 +67,7 @@ static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
+static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
@@ -106,10 +107,8 @@ static const char *const kAsanUnpoisonStackMemoryName =
static const char *const kAsanOptionDetectUAR =
"__asan_option_detect_stack_use_after_return";
-static const char *const kAsanAllocaPoison =
- "__asan_alloca_poison";
-static const char *const kAsanAllocasUnpoison =
- "__asan_allocas_unpoison";
+static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
+static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
static const size_t kNumberOfAccessSizes = 5;
@@ -117,6 +116,9 @@ static const size_t kNumberOfAccessSizes = 5;
static const unsigned kAllocaRzSize = 32;
// Command-line flags.
+static cl::opt<bool> ClEnableKasan(
+ "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
+ cl::Hidden, cl::init(false));
// This flag may need to be replaced with -f[no-]asan-reads.
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
@@ -317,7 +319,8 @@ struct ShadowMapping {
bool OrShadowOffset;
};
-static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
+static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
+ bool IsKasan) {
bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
bool IsIOS = TargetTriple.isiOS();
bool IsFreeBSD = TargetTriple.isOSFreeBSD();
@@ -352,9 +355,12 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
Mapping.Offset = kPPC64_ShadowOffset64;
else if (IsFreeBSD)
Mapping.Offset = kFreeBSD_ShadowOffset64;
- else if (IsLinux && IsX86_64)
- Mapping.Offset = kSmallX86_64ShadowOffset;
- else if (IsMIPS64)
+ else if (IsLinux && IsX86_64) {
+ if (IsKasan)
+ Mapping.Offset = kLinuxKasan_ShadowOffset64;
+ else
+ Mapping.Offset = kSmallX86_64ShadowOffset;
+ } else if (IsMIPS64)
Mapping.Offset = kMIPS64_ShadowOffset64;
else if (IsAArch64)
Mapping.Offset = kAArch64_ShadowOffset64;
@@ -383,7 +389,8 @@ static size_t RedzoneSizeForScale(int MappingScale) {
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public FunctionPass {
- AddressSanitizer() : FunctionPass(ID) {
+ explicit AddressSanitizer(bool CompileKernel = false)
+ : FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan) {
initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
}
const char *getPassName() const override {
@@ -410,8 +417,7 @@ struct AddressSanitizer : public FunctionPass {
/// If it is an interesting memory access, return the PointerOperand
/// and set IsWrite/Alignment. Otherwise return nullptr.
Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
- uint64_t *TypeSize,
- unsigned *Alignment);
+ uint64_t *TypeSize, unsigned *Alignment);
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
@@ -447,11 +453,12 @@ struct AddressSanitizer : public FunctionPass {
LLVMContext *C;
Triple TargetTriple;
int LongSize;
+ bool CompileKernel;
Type *IntptrTy;
ShadowMapping Mapping;
DominatorTree *DT;
- Function *AsanCtorFunction;
- Function *AsanInitFunction;
+ Function *AsanCtorFunction = nullptr;
+ Function *AsanInitFunction = nullptr;
Function *AsanHandleNoReturnFunc;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
// This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
@@ -470,7 +477,8 @@ struct AddressSanitizer : public FunctionPass {
class AddressSanitizerModule : public ModulePass {
public:
- AddressSanitizerModule() : ModulePass(ID) {}
+ explicit AddressSanitizerModule(bool CompileKernel = false)
+ : ModulePass(ID), CompileKernel(CompileKernel || ClEnableKasan) {}
bool runOnModule(Module &M) override;
static char ID; // Pass identification, replacement for typeid
const char *getPassName() const override { return "AddressSanitizerModule"; }
@@ -487,6 +495,7 @@ class AddressSanitizerModule : public ModulePass {
}
GlobalsMetadata GlobalsMD;
+ bool CompileKernel;
Type *IntptrTy;
LLVMContext *C;
Triple TargetTriple;
@@ -588,7 +597,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
Value *SavedStack) {
IRBuilder<> IRB(InstBefore);
IRB.CreateCall(AsanAllocasUnpoisonFunc,
- {IRB.CreateLoad(DynamicAllocaLayout),
+ {IRB.CreateLoad(DynamicAllocaLayout),
IRB.CreatePtrToInt(SavedStack, IntptrTy)});
}
@@ -692,8 +701,8 @@ INITIALIZE_PASS_END(
AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
false)
-FunctionPass *llvm::createAddressSanitizerFunctionPass() {
- return new AddressSanitizer();
+FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel) {
+ return new AddressSanitizer(CompileKernel);
}
char AddressSanitizerModule::ID = 0;
@@ -702,8 +711,8 @@ INITIALIZE_PASS(
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
"ModulePass",
false, false)
-ModulePass *llvm::createAddressSanitizerModulePass() {
- return new AddressSanitizerModule();
+ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel) {
+ return new AddressSanitizerModule(CompileKernel);
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
@@ -1347,16 +1356,18 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
- Mapping = getShadowMapping(TargetTriple, LongSize);
+ Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel);
initializeCallbacks(M);
bool Changed = false;
- Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
- assert(CtorFunc);
- IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
-
- if (ClGlobals) Changed |= InstrumentGlobals(IRB, M);
+ // TODO(glider): temporarily disabled globals instrumentation for KASan.
+ if (ClGlobals && !CompileKernel) {
+ Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
+ assert(CtorFunc);
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+ Changed |= InstrumentGlobals(IRB, M);
+ }
return Changed;
}
@@ -1369,38 +1380,44 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
const std::string TypeStr = AccessIsWrite ? "store" : "load";
const std::string ExpStr = Exp ? "exp_" : "";
+ const std::string SuffixStr = CompileKernel ? "N" : "_n";
+ const std::string EndingStr = CompileKernel ? "_noabort" : "";
const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
+ // TODO(glider): for KASan builds add _noabort to error reporting
+ // functions and make them actually noabort (remove the UnreachableInst).
AsanErrorCallbackSized[AccessIsWrite][Exp] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- kAsanReportErrorTemplate + ExpStr + TypeStr + "_n",
+ kAsanReportErrorTemplate + ExpStr + TypeStr + SuffixStr,
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N",
+ ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex);
AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(),
- IntptrTy, ExpType, nullptr));
+ kAsanReportErrorTemplate + ExpStr + Suffix,
+ IRB.getVoidTy(), IntptrTy, ExpType, nullptr));
AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + ExpStr + Suffix, IRB.getVoidTy(),
- IntptrTy, ExpType, nullptr));
+ ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
+ IRB.getVoidTy(), IntptrTy, ExpType, nullptr));
}
}
}
+ const std::string MemIntrinCallbackPrefix =
+ CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
+ MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
+ MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
+ MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction(
@@ -1427,14 +1444,14 @@ bool AddressSanitizer::doInitialization(Module &M) {
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
- std::tie(AsanCtorFunction, AsanInitFunction) =
- createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName,
- /*InitArgTypes=*/{},
- /*InitArgs=*/{});
-
- Mapping = getShadowMapping(TargetTriple, LongSize);
-
- appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
+ if (!CompileKernel) {
+ std::tie(AsanCtorFunction, AsanInitFunction) =
+ createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{});
+ appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
+ }
+ Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel);
return true;
}
@@ -1516,11 +1533,10 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
}
- bool UseCalls = false;
- if (ClInstrumentationWithCallsThreshold >= 0 &&
- ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
- UseCalls = true;
-
+ bool UseCalls =
+ CompileKernel ||
+ (ClInstrumentationWithCallsThreshold >= 0 &&
+ ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold);
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
const DataLayout &DL = F.getParent()->getDataLayout();
@@ -1706,8 +1722,7 @@ void FunctionStackPoisoner::poisonStack() {
if (ClInstrumentAllocas && DynamicAllocaVec.size() > 0) {
// Handle dynamic allocas.
createDynamicAllocasInitStorage();
- for (auto &AI : DynamicAllocaVec)
- handleDynamicAllocaCall(AI);
+ for (auto &AI : DynamicAllocaVec) handleDynamicAllocaCall(AI);
unpoisonDynamicAllocas();
}
@@ -1736,8 +1751,8 @@ void FunctionStackPoisoner::poisonStack() {
ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
uint64_t LocalStackSize = L.FrameSize;
- bool DoStackMalloc =
- ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
+ bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
+ LocalStackSize <= kMaxStackMallocSize;
// Don't do dynamic alloca in presence of inline asm: too often it makes
// assumptions on which registers are available. Don't do stack malloc in the
// presence of inline asm on 32-bit platforms for the same reason.
@@ -1901,9 +1916,9 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
// For now just insert the call to ASan runtime.
Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
Value *SizeArg = ConstantInt::get(IntptrTy, Size);
- IRB.CreateCall(DoPoison ? AsanPoisonStackMemoryFunc
- : AsanUnpoisonStackMemoryFunc,
- {AddrArg, SizeArg});
+ IRB.CreateCall(
+ DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
+ {AddrArg, SizeArg});
}
// Handling llvm.lifetime intrinsics for a given %alloca:
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index f6858034d79e..a8874251ee07 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -63,7 +63,7 @@ namespace {
void emitBranchToTrap(Value *Cmp = nullptr);
bool instrument(Value *Ptr, Value *Val, const DataLayout &DL);
};
-}
+} // namespace
char BoundsChecking::ID = 0;
INITIALIZE_PASS(BoundsChecking, "bounds-checking", "Run-time bounds checking",
diff --git a/lib/Transforms/Instrumentation/CMakeLists.txt b/lib/Transforms/Instrumentation/CMakeLists.txt
index b2ff03343eb0..9b81f4bb1619 100644
--- a/lib/Transforms/Instrumentation/CMakeLists.txt
+++ b/lib/Transforms/Instrumentation/CMakeLists.txt
@@ -6,6 +6,7 @@ add_llvm_library(LLVMInstrumentation
MemorySanitizer.cpp
Instrumentation.cpp
InstrProfiling.cpp
+ SafeStack.cpp
SanitizerCoverage.cpp
ThreadSanitizer.cpp
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 2de6e1afaba9..43091572aeb1 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -346,7 +346,7 @@ class DFSanVisitor : public InstVisitor<DFSanVisitor> {
void visitMemTransferInst(MemTransferInst &I);
};
-}
+} // namespace
char DataFlowSanitizer::ID;
INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 9a3ed5c04efc..43caf1fcb8d0 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -139,7 +139,7 @@ namespace {
LLVMContext *Ctx;
SmallVector<std::unique_ptr<GCOVFunction>, 16> Funcs;
};
-}
+} // namespace
char GCOVProfiler::ID = 0;
INITIALIZE_PASS(GCOVProfiler, "insert-gcov-profiling",
@@ -419,7 +419,7 @@ namespace {
DenseMap<BasicBlock *, GCOVBlock> Blocks;
GCOVBlock ReturnBlock;
};
-}
+} // namespace
std::string GCOVProfiler::mangleName(const DICompileUnit *CU,
const char *NewStem) {
diff --git a/lib/Transforms/Instrumentation/Instrumentation.cpp b/lib/Transforms/Instrumentation/Instrumentation.cpp
index a91fc0ec2a48..27505859100b 100644
--- a/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -30,6 +30,7 @@ void llvm::initializeInstrumentation(PassRegistry &Registry) {
initializeThreadSanitizerPass(Registry);
initializeSanitizerCoverageModulePass(Registry);
initializeDataFlowSanitizerPass(Registry);
+ initializeSafeStackPass(Registry);
}
/// LLVMInitializeInstrumentation - C binding for
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 100824e59af5..63eee2f7153a 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2022,6 +2022,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *CopyOp, *ConvertOp;
switch (I.getNumArgOperands()) {
+ case 3:
+ assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
case 2:
CopyOp = I.getArgOperand(0);
ConvertOp = I.getArgOperand(1);
diff --git a/lib/Transforms/Instrumentation/SafeStack.cpp b/lib/Transforms/Instrumentation/SafeStack.cpp
new file mode 100644
index 000000000000..13c541218313
--- /dev/null
+++ b/lib/Transforms/Instrumentation/SafeStack.cpp
@@ -0,0 +1,608 @@
+//===-- SafeStack.cpp - Safe Stack Insertion ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
+// and the unsafe stack (explicitly allocated and managed through the runtime
+// support library).
+//
+// http://clang.llvm.org/docs/SafeStack.html
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "safestack"
+
+namespace llvm {
+
+STATISTIC(NumFunctions, "Total number of functions");
+STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
+STATISTIC(NumUnsafeStackRestorePointsFunctions,
+ "Number of functions that use setjmp or exceptions");
+
+STATISTIC(NumAllocas, "Total number of allocas");
+STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
+STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
+STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
+
+} // namespace llvm
+
+namespace {
+
+/// Check whether a given alloca instruction (AI) should be put on the safe
+/// stack or not. The function analyzes all uses of AI and checks whether it is
+/// only accessed in a memory safe way (as decided statically).
+bool IsSafeStackAlloca(const AllocaInst *AI) {
+ // Go through all uses of this alloca and check whether all accesses to the
+ // allocated object are statically known to be memory safe and, hence, the
+ // object can be placed on the safe stack.
+
+ SmallPtrSet<const Value *, 16> Visited;
+ SmallVector<const Instruction *, 8> WorkList;
+ WorkList.push_back(AI);
+
+ // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
+ while (!WorkList.empty()) {
+ const Instruction *V = WorkList.pop_back_val();
+ for (const Use &UI : V->uses()) {
+ auto I = cast<const Instruction>(UI.getUser());
+ assert(V == UI.get());
+
+ switch (I->getOpcode()) {
+ case Instruction::Load:
+ // Loading from a pointer is safe.
+ break;
+ case Instruction::VAArg:
+ // "va-arg" from a pointer is safe.
+ break;
+ case Instruction::Store:
+ if (V == I->getOperand(0))
+ // Stored the pointer - conservatively assume it may be unsafe.
+ return false;
+ // Storing to the pointee is safe.
+ break;
+
+ case Instruction::GetElementPtr:
+ if (!cast<const GetElementPtrInst>(I)->hasAllConstantIndices())
+ // GEP with non-constant indices can lead to memory errors.
+ // This also applies to inbounds GEPs, as the inbounds attribute
+ // represents an assumption that the address is in bounds, rather than
+ // an assertion that it is.
+ return false;
+
+ // We assume that GEP on static alloca with constant indices is safe,
+ // otherwise a compiler would detect it and warn during compilation.
+
+ if (!isa<const ConstantInt>(AI->getArraySize()))
+ // However, if the array size itself is not constant, the access
+ // might still be unsafe at runtime.
+ return false;
+
+ /* fallthrough */
+
+ case Instruction::BitCast:
+ case Instruction::IntToPtr:
+ case Instruction::PHI:
+ case Instruction::PtrToInt:
+ case Instruction::Select:
+ // The object can be safe or not, depending on how the result of the
+ // instruction is used.
+ if (Visited.insert(I).second)
+ WorkList.push_back(cast<const Instruction>(I));
+ break;
+
+ case Instruction::Call:
+ case Instruction::Invoke: {
+ // FIXME: add support for memset and memcpy intrinsics.
+ ImmutableCallSite CS(I);
+
+ // LLVM 'nocapture' attribute is only set for arguments whose address
+ // is not stored, passed around, or used in any other non-trivial way.
+ // We assume that passing a pointer to an object as a 'nocapture'
+ // argument is safe.
+ // FIXME: a more precise solution would require an interprocedural
+ // analysis here, which would look at all uses of an argument inside
+ // the function being called.
+ ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
+ if (A->get() == V && !CS.doesNotCapture(A - B))
+ // The parameter is not marked 'nocapture' - unsafe.
+ return false;
+ continue;
+ }
+
+ default:
+ // The object is unsafe if it is used in any other way.
+ return false;
+ }
+ }
+ }
+
+ // All uses of the alloca are safe, we can place it on the safe stack.
+ return true;
+}
+
+/// The SafeStack pass splits the stack of each function into the
+/// safe stack, which is only accessed through memory safe dereferences
+/// (as determined statically), and the unsafe stack, which contains all
+/// local variables that are accessed in unsafe ways.
+class SafeStack : public FunctionPass {
+ const DataLayout *DL;
+
+ Type *StackPtrTy;
+ Type *IntPtrTy;
+ Type *Int32Ty;
+ Type *Int8Ty;
+
+ Constant *UnsafeStackPtr;
+
+ /// Unsafe stack alignment. Each stack frame must ensure that the stack is
+ /// aligned to this value. We need to re-align the unsafe stack if the
+ /// alignment of any object on the stack exceeds this value.
+ ///
+ /// 16 seems like a reasonable upper bound on the alignment of objects that we
+ /// might expect to appear on the stack on most common targets.
+ enum { StackAlignment = 16 };
+
+ /// \brief Build a constant representing a pointer to the unsafe stack
+ /// pointer.
+ Constant *getOrCreateUnsafeStackPtr(Module &M);
+
+ /// \brief Find all static allocas, dynamic allocas, return instructions and
+ /// stack restore points (exception unwind blocks and setjmp calls) in the
+ /// given function and append them to the respective vectors.
+ void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
+ SmallVectorImpl<AllocaInst *> &DynamicAllocas,
+ SmallVectorImpl<ReturnInst *> &Returns,
+ SmallVectorImpl<Instruction *> &StackRestorePoints);
+
+ /// \brief Allocate space for all static allocas in \p StaticAllocas,
+ /// replace allocas with pointers into the unsafe stack and generate code to
+ /// restore the stack pointer before all return instructions in \p Returns.
+ ///
+ /// \returns A pointer to the top of the unsafe stack after all unsafe static
+ /// allocas are allocated.
+ Value *moveStaticAllocasToUnsafeStack(Function &F,
+ ArrayRef<AllocaInst *> StaticAllocas,
+ ArrayRef<ReturnInst *> Returns);
+
+ /// \brief Generate code to restore the stack after all stack restore points
+ /// in \p StackRestorePoints.
+ ///
+ /// \returns A local variable in which to maintain the dynamic top of the
+ /// unsafe stack if needed.
+ AllocaInst *
+ createStackRestorePoints(Function &F,
+ ArrayRef<Instruction *> StackRestorePoints,
+ Value *StaticTop, bool NeedDynamicTop);
+
+ /// \brief Replace all allocas in \p DynamicAllocas with code to allocate
+ /// space dynamically on the unsafe stack and store the dynamic unsafe stack
+ /// top to \p DynamicTop if non-null.
+ void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
+ AllocaInst *DynamicTop,
+ ArrayRef<AllocaInst *> DynamicAllocas);
+
+public:
+ static char ID; // Pass identification, replacement for typeid.
+ SafeStack() : FunctionPass(ID), DL(nullptr) {
+ initializeSafeStackPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AliasAnalysis>();
+ }
+
+ virtual bool doInitialization(Module &M) {
+ DL = &M.getDataLayout();
+
+ StackPtrTy = Type::getInt8PtrTy(M.getContext());
+ IntPtrTy = DL->getIntPtrType(M.getContext());
+ Int32Ty = Type::getInt32Ty(M.getContext());
+ Int8Ty = Type::getInt8Ty(M.getContext());
+
+ UnsafeStackPtr = getOrCreateUnsafeStackPtr(M);
+
+ return false;
+ }
+
+ bool runOnFunction(Function &F);
+
+}; // class SafeStack
+
+Constant *SafeStack::getOrCreateUnsafeStackPtr(Module &M) {
+ // The unsafe stack pointer is stored in a global variable with a magic name.
+ const char *kUnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
+
+ auto UnsafeStackPtr =
+ dyn_cast_or_null<GlobalVariable>(M.getNamedValue(kUnsafeStackPtrVar));
+
+ if (!UnsafeStackPtr) {
+ // The global variable is not defined yet, define it ourselves.
+ // We use the initial-exec TLS model because we do not support the variable
+ // living anywhere other than in the main executable.
+ UnsafeStackPtr = new GlobalVariable(
+ /*Module=*/M, /*Type=*/StackPtrTy,
+ /*isConstant=*/false, /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, /*Name=*/kUnsafeStackPtrVar,
+ /*InsertBefore=*/nullptr,
+ /*ThreadLocalMode=*/GlobalValue::InitialExecTLSModel);
+ } else {
+ // The variable exists, check its type and attributes.
+ if (UnsafeStackPtr->getValueType() != StackPtrTy) {
+ report_fatal_error(Twine(kUnsafeStackPtrVar) + " must have void* type");
+ }
+
+ if (!UnsafeStackPtr->isThreadLocal()) {
+ report_fatal_error(Twine(kUnsafeStackPtrVar) + " must be thread-local");
+ }
+ }
+
+ return UnsafeStackPtr;
+}
+
+void SafeStack::findInsts(Function &F,
+ SmallVectorImpl<AllocaInst *> &StaticAllocas,
+ SmallVectorImpl<AllocaInst *> &DynamicAllocas,
+ SmallVectorImpl<ReturnInst *> &Returns,
+ SmallVectorImpl<Instruction *> &StackRestorePoints) {
+ for (Instruction &I : inst_range(&F)) {
+ if (auto AI = dyn_cast<AllocaInst>(&I)) {
+ ++NumAllocas;
+
+ if (IsSafeStackAlloca(AI))
+ continue;
+
+ if (AI->isStaticAlloca()) {
+ ++NumUnsafeStaticAllocas;
+ StaticAllocas.push_back(AI);
+ } else {
+ ++NumUnsafeDynamicAllocas;
+ DynamicAllocas.push_back(AI);
+ }
+ } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
+ Returns.push_back(RI);
+ } else if (auto CI = dyn_cast<CallInst>(&I)) {
+ // setjmps require stack restore.
+ if (CI->getCalledFunction() && CI->canReturnTwice())
+ StackRestorePoints.push_back(CI);
+ } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
+ // Exception landing pads require stack restore.
+ StackRestorePoints.push_back(LP);
+ } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
+ if (II->getIntrinsicID() == Intrinsic::gcroot)
+ llvm::report_fatal_error(
+ "gcroot intrinsic not compatible with safestack attribute");
+ }
+ }
+}
+
+AllocaInst *
+SafeStack::createStackRestorePoints(Function &F,
+ ArrayRef<Instruction *> StackRestorePoints,
+ Value *StaticTop, bool NeedDynamicTop) {
+ if (StackRestorePoints.empty())
+ return nullptr;
+
+ IRBuilder<> IRB(StaticTop
+ ? cast<Instruction>(StaticTop)->getNextNode()
+ : (Instruction *)F.getEntryBlock().getFirstInsertionPt());
+
+ // We need the current value of the shadow stack pointer to restore
+ // after longjmp or exception catching.
+
+ // FIXME: On some platforms this could be handled by the longjmp/exception
+ // runtime itself.
+
+ AllocaInst *DynamicTop = nullptr;
+ if (NeedDynamicTop)
+ // If we also have dynamic alloca's, the stack pointer value changes
+ // throughout the function. For now we store it in an alloca.
+ DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
+ "unsafe_stack_dynamic_ptr");
+
+ if (!StaticTop)
+ // We need the original unsafe stack pointer value, even if there are
+ // no unsafe static allocas.
+ StaticTop = IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
+
+ if (NeedDynamicTop)
+ IRB.CreateStore(StaticTop, DynamicTop);
+
+ // Restore current stack pointer after longjmp/exception catch.
+ for (Instruction *I : StackRestorePoints) {
+ ++NumUnsafeStackRestorePoints;
+
+ IRB.SetInsertPoint(cast<Instruction>(I->getNextNode()));
+ Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
+ IRB.CreateStore(CurrentTop, UnsafeStackPtr);
+ }
+
+ return DynamicTop;
+}
+
+Value *
+SafeStack::moveStaticAllocasToUnsafeStack(Function &F,
+ ArrayRef<AllocaInst *> StaticAllocas,
+ ArrayRef<ReturnInst *> Returns) {
+ if (StaticAllocas.empty())
+ return nullptr;
+
+ IRBuilder<> IRB(F.getEntryBlock().getFirstInsertionPt());
+ DIBuilder DIB(*F.getParent());
+
+ // We explicitly compute and set the unsafe stack layout for all unsafe
+ // static alloca instructions. We save the unsafe "base pointer" in the
+ // prologue into a local variable and restore it in the epilogue.
+
+ // Load the current stack pointer (we'll also use it as a base pointer).
+ // FIXME: use a dedicated register for it ?
+ Instruction *BasePointer =
+ IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
+ assert(BasePointer->getType() == StackPtrTy);
+
+ for (ReturnInst *RI : Returns) {
+ IRB.SetInsertPoint(RI);
+ IRB.CreateStore(BasePointer, UnsafeStackPtr);
+ }
+
+ // Compute maximum alignment among static objects on the unsafe stack.
+ unsigned MaxAlignment = 0;
+ for (AllocaInst *AI : StaticAllocas) {
+ Type *Ty = AI->getAllocatedType();
+ unsigned Align =
+ std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
+ if (Align > MaxAlignment)
+ MaxAlignment = Align;
+ }
+
+ if (MaxAlignment > StackAlignment) {
+ // Re-align the base pointer according to the max requested alignment.
+ assert(isPowerOf2_32(MaxAlignment));
+ IRB.SetInsertPoint(cast<Instruction>(BasePointer->getNextNode()));
+ BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
+ IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy),
+ ConstantInt::get(IntPtrTy, ~uint64_t(MaxAlignment - 1))),
+ StackPtrTy));
+ }
+
+ // Allocate space for every unsafe static AllocaInst on the unsafe stack.
+ int64_t StaticOffset = 0; // Current stack top.
+ for (AllocaInst *AI : StaticAllocas) {
+ IRB.SetInsertPoint(AI);
+
+ auto CArraySize = cast<ConstantInt>(AI->getArraySize());
+ Type *Ty = AI->getAllocatedType();
+
+ uint64_t Size = DL->getTypeAllocSize(Ty) * CArraySize->getZExtValue();
+ if (Size == 0)
+ Size = 1; // Don't create zero-sized stack objects.
+
+ // Ensure the object is properly aligned.
+ unsigned Align =
+ std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
+
+ // Add alignment.
+ // NOTE: we ensure that BasePointer itself is aligned to >= Align.
+ StaticOffset += Size;
+ StaticOffset = RoundUpToAlignment(StaticOffset, Align);
+
+ Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
+ ConstantInt::get(Int32Ty, -StaticOffset));
+ Value *NewAI = IRB.CreateBitCast(Off, AI->getType(), AI->getName());
+ if (AI->hasName() && isa<Instruction>(NewAI))
+ cast<Instruction>(NewAI)->takeName(AI);
+
+ // Replace alloc with the new location.
+ replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
+ AI->replaceAllUsesWith(NewAI);
+ AI->eraseFromParent();
+ }
+
+ // Re-align BasePointer so that our callees would see it aligned as
+ // expected.
+ // FIXME: no need to update BasePointer in leaf functions.
+ StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment);
+
+ // Update shadow stack pointer in the function epilogue.
+ IRB.SetInsertPoint(cast<Instruction>(BasePointer->getNextNode()));
+
+ Value *StaticTop =
+ IRB.CreateGEP(BasePointer, ConstantInt::get(Int32Ty, -StaticOffset),
+ "unsafe_stack_static_top");
+ IRB.CreateStore(StaticTop, UnsafeStackPtr);
+ return StaticTop;
+}
+
+void SafeStack::moveDynamicAllocasToUnsafeStack(
+ Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
+ ArrayRef<AllocaInst *> DynamicAllocas) {
+ DIBuilder DIB(*F.getParent());
+
+ for (AllocaInst *AI : DynamicAllocas) {
+ IRBuilder<> IRB(AI);
+
+ // Compute the new SP value (after AI).
+ Value *ArraySize = AI->getArraySize();
+ if (ArraySize->getType() != IntPtrTy)
+ ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
+
+ Type *Ty = AI->getAllocatedType();
+ uint64_t TySize = DL->getTypeAllocSize(Ty);
+ Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
+
+ Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
+ SP = IRB.CreateSub(SP, Size);
+
+ // Align the SP value to satisfy the AllocaInst, type and stack alignments.
+ unsigned Align = std::max(
+ std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()),
+ (unsigned)StackAlignment);
+
+ assert(isPowerOf2_32(Align));
+ Value *NewTop = IRB.CreateIntToPtr(
+ IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))),
+ StackPtrTy);
+
+ // Save the stack pointer.
+ IRB.CreateStore(NewTop, UnsafeStackPtr);
+ if (DynamicTop)
+ IRB.CreateStore(NewTop, DynamicTop);
+
+ Value *NewAI = IRB.CreateIntToPtr(SP, AI->getType());
+ if (AI->hasName() && isa<Instruction>(NewAI))
+ NewAI->takeName(AI);
+
+ replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
+ AI->replaceAllUsesWith(NewAI);
+ AI->eraseFromParent();
+ }
+
+ if (!DynamicAllocas.empty()) {
+ // Now go through the instructions again, replacing stacksave/stackrestore.
+ for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) {
+ Instruction *I = &*(It++);
+ auto II = dyn_cast<IntrinsicInst>(I);
+ if (!II)
+ continue;
+
+ if (II->getIntrinsicID() == Intrinsic::stacksave) {
+ IRBuilder<> IRB(II);
+ Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
+ LI->takeName(II);
+ II->replaceAllUsesWith(LI);
+ II->eraseFromParent();
+ } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
+ IRBuilder<> IRB(II);
+ Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
+ SI->takeName(II);
+ assert(II->use_empty());
+ II->eraseFromParent();
+ }
+ }
+ }
+}
+
+bool SafeStack::runOnFunction(Function &F) {
+ auto AA = &getAnalysis<AliasAnalysis>();
+
+ DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
+
+ if (!F.hasFnAttribute(Attribute::SafeStack)) {
+ DEBUG(dbgs() << "[SafeStack] safestack is not requested"
+ " for this function\n");
+ return false;
+ }
+
+ if (F.isDeclaration()) {
+ DEBUG(dbgs() << "[SafeStack] function definition"
+ " is not available\n");
+ return false;
+ }
+
+ {
+ // Make sure the regular stack protector won't run on this function
+ // (safestack attribute takes precedence).
+ AttrBuilder B;
+ B.addAttribute(Attribute::StackProtect)
+ .addAttribute(Attribute::StackProtectReq)
+ .addAttribute(Attribute::StackProtectStrong);
+ F.removeAttributes(
+ AttributeSet::FunctionIndex,
+ AttributeSet::get(F.getContext(), AttributeSet::FunctionIndex, B));
+ }
+
+ if (AA->onlyReadsMemory(&F)) {
+ // XXX: we don't protect against information leak attacks for now.
+ DEBUG(dbgs() << "[SafeStack] function only reads memory\n");
+ return false;
+ }
+
+ ++NumFunctions;
+
+ SmallVector<AllocaInst *, 16> StaticAllocas;
+ SmallVector<AllocaInst *, 4> DynamicAllocas;
+ SmallVector<ReturnInst *, 4> Returns;
+
+ // Collect all points where stack gets unwound and needs to be restored
+ // This is only necessary because the runtime (setjmp and unwind code) is
+ // not aware of the unsafe stack and won't unwind/restore it prorerly.
+ // To work around this problem without changing the runtime, we insert
+ // instrumentation to restore the unsafe stack pointer when necessary.
+ SmallVector<Instruction *, 4> StackRestorePoints;
+
+ // Find all static and dynamic alloca instructions that must be moved to the
+ // unsafe stack, all return instructions and stack restore points.
+ findInsts(F, StaticAllocas, DynamicAllocas, Returns, StackRestorePoints);
+
+ if (StaticAllocas.empty() && DynamicAllocas.empty() &&
+ StackRestorePoints.empty())
+ return false; // Nothing to do in this function.
+
+ if (!StaticAllocas.empty() || !DynamicAllocas.empty())
+ ++NumUnsafeStackFunctions; // This function has the unsafe stack.
+
+ if (!StackRestorePoints.empty())
+ ++NumUnsafeStackRestorePointsFunctions;
+
+ // The top of the unsafe stack after all unsafe static allocas are allocated.
+ Value *StaticTop = moveStaticAllocasToUnsafeStack(F, StaticAllocas, Returns);
+
+ // Safe stack object that stores the current unsafe stack top. It is updated
+ // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
+ // This is only needed if we need to restore stack pointer after longjmp
+ // or exceptions, and we have dynamic allocations.
+ // FIXME: a better alternative might be to store the unsafe stack pointer
+ // before setjmp / invoke instructions.
+ AllocaInst *DynamicTop = createStackRestorePoints(
+ F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
+
+ // Handle dynamic allocas.
+ moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
+ DynamicAllocas);
+
+ DEBUG(dbgs() << "[SafeStack] safestack applied\n");
+ return true;
+}
+
+} // end anonymous namespace
+
+char SafeStack::ID = 0;
+INITIALIZE_PASS_BEGIN(SafeStack, "safe-stack",
+ "Safe Stack instrumentation pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(SafeStack, "safe-stack", "Safe Stack instrumentation pass",
+ false, false)
+
+FunctionPass *llvm::createSafeStackPass() { return new SafeStack(); }
diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index f6ae0c2dd5f9..dff39efa5b96 100644
--- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -33,6 +33,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
@@ -385,9 +386,14 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
}
bool IsEntryBB = &BB == &F.getEntryBlock();
- DebugLoc EntryLoc = IsEntryBB && IP->getDebugLoc()
- ? IP->getDebugLoc().getFnDebugLoc()
- : IP->getDebugLoc();
+ DebugLoc EntryLoc;
+ if (IsEntryBB) {
+ if (auto SP = getDISubprogram(&F))
+ EntryLoc = DebugLoc::get(SP->getScopeLine(), 0, SP);
+ } else {
+ EntryLoc = IP->getDebugLoc();
+ }
+
IRBuilder<> IRB(IP);
IRB.SetCurrentDebugLocation(EntryLoc);
SmallVector<Value *, 1> Indices;