aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Instrumentation/MemorySanitizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms/Instrumentation/MemorySanitizer.cpp')
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp405
1 files changed, 339 insertions, 66 deletions
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 34aaa7f27d6e..970f9ab86e82 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -91,7 +91,6 @@
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Instrumentation.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -109,9 +108,9 @@
#include "llvm/IR/Type.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
@@ -191,6 +190,12 @@ static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
cl::desc("Insert checks for constant shadow values"),
cl::Hidden, cl::init(false));
+// This is off by default because of a bug in gold:
+// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
+static cl::opt<bool> ClWithComdat("msan-with-comdat",
+ cl::desc("Place MSan constructors in comdat sections"),
+ cl::Hidden, cl::init(false));
+
static const char *const kMsanModuleCtorName = "msan.module_ctor";
static const char *const kMsanInitName = "__msan_init";
@@ -312,6 +317,9 @@ class MemorySanitizer : public FunctionPass {
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
WarningFn(nullptr) {}
const char *getPassName() const override { return "MemorySanitizer"; }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ }
bool runOnFunction(Function &F) override;
bool doInitialization(Module &M) override;
static char ID; // Pass identification, replacement for typeid.
@@ -374,13 +382,18 @@ class MemorySanitizer : public FunctionPass {
friend struct VarArgAMD64Helper;
friend struct VarArgMIPS64Helper;
friend struct VarArgAArch64Helper;
+ friend struct VarArgPowerPC64Helper;
};
} // anonymous namespace
char MemorySanitizer::ID = 0;
-INITIALIZE_PASS(MemorySanitizer, "msan",
- "MemorySanitizer: detects uninitialized reads.",
- false, false)
+INITIALIZE_PASS_BEGIN(
+ MemorySanitizer, "msan",
+ "MemorySanitizer: detects uninitialized reads.", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_END(
+ MemorySanitizer, "msan",
+ "MemorySanitizer: detects uninitialized reads.", false, false)
FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
return new MemorySanitizer(TrackOrigins);
@@ -540,8 +553,14 @@ bool MemorySanitizer::doInitialization(Module &M) {
createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
/*InitArgTypes=*/{},
/*InitArgs=*/{});
+ if (ClWithComdat) {
+ Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
+ MsanCtorFunction->setComdat(MsanCtorComdat);
+ appendToGlobalCtors(M, MsanCtorFunction, 0, MsanCtorFunction);
+ } else {
+ appendToGlobalCtors(M, MsanCtorFunction, 0);
+ }
- appendToGlobalCtors(M, MsanCtorFunction, 0);
if (TrackOrigins)
new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
@@ -591,7 +610,7 @@ CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
if (TypeSize <= 8) return 0;
- return Log2_32_Ceil(TypeSize / 8);
+ return Log2_32_Ceil((TypeSize + 7) / 8);
}
/// This class does all the work for a given function. Store and Load
@@ -606,6 +625,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
ValueMap<Value*, Value*> ShadowMap, OriginMap;
std::unique_ptr<VarArgHelper> VAHelper;
+ const TargetLibraryInfo *TLI;
// The following flags disable parts of MSan instrumentation based on
// blacklist contents and command-line options.
@@ -623,7 +643,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
: Shadow(S), Origin(O), OrigIns(I) { }
};
SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
- SmallVector<Instruction*, 16> StoreList;
+ SmallVector<StoreInst *, 16> StoreList;
MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
: F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
@@ -635,6 +655,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// FIXME: Consider using SpecialCaseList to specify a list of functions that
// must always return fully initialized values. For now, we hardcode "main".
CheckReturnValue = SanitizeFunction && (F.getName() == "main");
+ TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
DEBUG(if (!InsertChecks)
dbgs() << "MemorySanitizer is not inserting checks into '"
@@ -731,26 +752,26 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void materializeStores(bool InstrumentWithCalls) {
- for (auto Inst : StoreList) {
- StoreInst &SI = *dyn_cast<StoreInst>(Inst);
-
- IRBuilder<> IRB(&SI);
- Value *Val = SI.getValueOperand();
- Value *Addr = SI.getPointerOperand();
- Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
+ for (StoreInst *SI : StoreList) {
+ IRBuilder<> IRB(SI);
+ Value *Val = SI->getValueOperand();
+ Value *Addr = SI->getPointerOperand();
+ Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
StoreInst *NewSI =
- IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
+ IRB.CreateAlignedStore(Shadow, ShadowPtr, SI->getAlignment());
DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
(void)NewSI;
- if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
+ if (ClCheckAccessAddress)
+ insertShadowCheck(Addr, SI);
- if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
+ if (SI->isAtomic())
+ SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
- if (MS.TrackOrigins && !SI.isAtomic())
- storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
+ if (MS.TrackOrigins && !SI->isAtomic())
+ storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI->getAlignment(),
InstrumentWithCalls);
}
}
@@ -1142,7 +1163,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOrigin(A, getCleanOrigin());
}
}
- ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
+ ArgOffset += alignTo(Size, kShadowTLSAlignment);
}
assert(*ShadowPtr && "Could not find shadow for an argument");
return *ShadowPtr;
@@ -1210,34 +1231,34 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
switch (a) {
- case NotAtomic:
- return NotAtomic;
- case Unordered:
- case Monotonic:
- case Release:
- return Release;
- case Acquire:
- case AcquireRelease:
- return AcquireRelease;
- case SequentiallyConsistent:
- return SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Release:
+ return AtomicOrdering::Release;
+ case AtomicOrdering::Acquire:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
switch (a) {
- case NotAtomic:
- return NotAtomic;
- case Unordered:
- case Monotonic:
- case Acquire:
- return Acquire;
- case Release:
- case AcquireRelease:
- return AcquireRelease;
- case SequentiallyConsistent:
- return SequentiallyConsistent;
+ case AtomicOrdering::NotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case AtomicOrdering::Unordered:
+ case AtomicOrdering::Monotonic:
+ case AtomicOrdering::Acquire:
+ return AtomicOrdering::Acquire;
+ case AtomicOrdering::Release:
+ case AtomicOrdering::AcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::SequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
}
llvm_unreachable("Unknown ordering");
}
@@ -1603,7 +1624,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
if (ConstantInt *Elt =
dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
- APInt V = Elt->getValue();
+ const APInt &V = Elt->getValue();
APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
Elements.push_back(ConstantInt::get(EltTy, V2));
} else {
@@ -1613,7 +1634,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
ShadowMul = ConstantVector::get(Elements);
} else {
if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
- APInt V = Elt->getValue();
+ const APInt &V = Elt->getValue();
APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
ShadowMul = ConstantInt::get(Ty, V2);
} else {
@@ -2123,6 +2144,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return CreateShadowCast(IRB, S2, T, /* Signed */ true);
}
+ // Given a vector, extract its first element, and return all
+ // zeroes if it is zero, and all ones otherwise.
+ Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
+ Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
+ Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
+ return CreateShadowCast(IRB, S2, T, /* Signed */ true);
+ }
+
Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
Type *T = S->getType();
assert(T->isVectorTy());
@@ -2270,15 +2299,39 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
setOriginForNaryOp(I);
}
+ // \brief Instrument compare-packed intrinsic.
+ // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
+ // all-ones shadow.
+ void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Type *ResTy = getShadowTy(&I);
+ Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ Value *S = IRB.CreateSExt(
+ IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
+ // \brief Instrument compare-scalar intrinsic.
+ // This handles both cmp* intrinsics which return the result in the first
+ // element of a vector, and comi* which return the result as i32.
+ void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
+ Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
+ setShadow(&I, S);
+ setOriginForNaryOp(I);
+ }
+
void visitIntrinsicInst(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case llvm::Intrinsic::bswap:
handleBswap(I);
break;
- case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
- case llvm::Intrinsic::x86_avx512_cvtsd2usi:
- case llvm::Intrinsic::x86_avx512_cvtss2usi64:
- case llvm::Intrinsic::x86_avx512_cvtss2usi:
+ case llvm::Intrinsic::x86_avx512_vcvtsd2usi64:
+ case llvm::Intrinsic::x86_avx512_vcvtsd2usi32:
+ case llvm::Intrinsic::x86_avx512_vcvtss2usi64:
+ case llvm::Intrinsic::x86_avx512_vcvtss2usi32:
case llvm::Intrinsic::x86_avx512_cvttss2usi64:
case llvm::Intrinsic::x86_avx512_cvttss2usi:
case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
@@ -2303,8 +2356,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case llvm::Intrinsic::x86_sse_cvttss2si:
handleVectorConvertIntrinsic(I, 1);
break;
- case llvm::Intrinsic::x86_sse2_cvtdq2pd:
- case llvm::Intrinsic::x86_sse2_cvtps2pd:
case llvm::Intrinsic::x86_sse_cvtps2pi:
case llvm::Intrinsic::x86_sse_cvttps2pi:
handleVectorConvertIntrinsic(I, 2);
@@ -2413,6 +2464,43 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleVectorPmaddIntrinsic(I, 16);
break;
+ case llvm::Intrinsic::x86_sse_cmp_ss:
+ case llvm::Intrinsic::x86_sse2_cmp_sd:
+ case llvm::Intrinsic::x86_sse_comieq_ss:
+ case llvm::Intrinsic::x86_sse_comilt_ss:
+ case llvm::Intrinsic::x86_sse_comile_ss:
+ case llvm::Intrinsic::x86_sse_comigt_ss:
+ case llvm::Intrinsic::x86_sse_comige_ss:
+ case llvm::Intrinsic::x86_sse_comineq_ss:
+ case llvm::Intrinsic::x86_sse_ucomieq_ss:
+ case llvm::Intrinsic::x86_sse_ucomilt_ss:
+ case llvm::Intrinsic::x86_sse_ucomile_ss:
+ case llvm::Intrinsic::x86_sse_ucomigt_ss:
+ case llvm::Intrinsic::x86_sse_ucomige_ss:
+ case llvm::Intrinsic::x86_sse_ucomineq_ss:
+ case llvm::Intrinsic::x86_sse2_comieq_sd:
+ case llvm::Intrinsic::x86_sse2_comilt_sd:
+ case llvm::Intrinsic::x86_sse2_comile_sd:
+ case llvm::Intrinsic::x86_sse2_comigt_sd:
+ case llvm::Intrinsic::x86_sse2_comige_sd:
+ case llvm::Intrinsic::x86_sse2_comineq_sd:
+ case llvm::Intrinsic::x86_sse2_ucomieq_sd:
+ case llvm::Intrinsic::x86_sse2_ucomilt_sd:
+ case llvm::Intrinsic::x86_sse2_ucomile_sd:
+ case llvm::Intrinsic::x86_sse2_ucomigt_sd:
+ case llvm::Intrinsic::x86_sse2_ucomige_sd:
+ case llvm::Intrinsic::x86_sse2_ucomineq_sd:
+ handleVectorCompareScalarIntrinsic(I);
+ break;
+
+ case llvm::Intrinsic::x86_sse_cmp_ps:
+ case llvm::Intrinsic::x86_sse2_cmp_pd:
+ // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
+ // generates reasonably looking IR that fails in the backend with "Do not
+ // know how to split the result of this operator!".
+ handleVectorComparePackedIntrinsic(I);
+ break;
+
default:
if (!handleUnknownIntrinsic(I))
visitInstruction(I);
@@ -2450,6 +2538,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
AttributeSet::FunctionIndex,
B));
}
+
+ maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
}
IRBuilder<> IRB(&I);
@@ -2498,7 +2588,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
(void)Store;
assert(Size != 0 && Store != nullptr);
DEBUG(dbgs() << " Param:" << *Store << "\n");
- ArgOffset += RoundUpToAlignment(Size, 8);
+ ArgOffset += alignTo(Size, 8);
}
DEBUG(dbgs() << " done with call args\n");
@@ -2811,14 +2901,19 @@ struct VarArgAMD64Helper : public VarArgHelper {
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
unsigned ArgNo = CS.getArgumentNo(ArgIt);
+ bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
if (IsByVal) {
// ByVal arguments always go to the overflow area.
+ // Fixed arguments passed through the overflow area will be stepped
+ // over by va_start, so don't count them towards the offset.
+ if (IsFixed)
+ continue;
assert(A->getType()->isPointerTy());
Type *RealTy = A->getType()->getPointerElementType();
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
- OverflowOffset += RoundUpToAlignment(ArgSize, 8);
+ OverflowOffset += alignTo(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
ArgSize, kShadowTLSAlignment);
} else {
@@ -2838,10 +2933,16 @@ struct VarArgAMD64Helper : public VarArgHelper {
FpOffset += 16;
break;
case AK_Memory:
+ if (IsFixed)
+ continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += RoundUpToAlignment(ArgSize, 8);
+ OverflowOffset += alignTo(ArgSize, 8);
}
+ // Take fixed arguments into account for GpOffset and FpOffset,
+ // but don't actually store shadows for them.
+ if (IsFixed)
+ continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
}
@@ -2952,20 +3053,22 @@ struct VarArgMIPS64Helper : public VarArgHelper {
void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
+ for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
+ CS.getFunctionType()->getNumParams(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
+ llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
Value *A = *ArgIt;
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
-#if defined(__MIPSEB__) || defined(MIPSEB)
- // Adjusting the shadow for argument with size < 8 to match the placement
- // of bits in big endian system
- if (ArgSize < 8)
- VAArgOffset += (8 - ArgSize);
-#endif
+ if (TargetTriple.getArch() == llvm::Triple::mips64) {
+ // Adjusting the shadow for argument with size < 8 to match the placement
+ // of bits in big endian system
+ if (ArgSize < 8)
+ VAArgOffset += (8 - ArgSize);
+ }
Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
VAArgOffset += ArgSize;
- VAArgOffset = RoundUpToAlignment(VAArgOffset, 8);
+ VAArgOffset = alignTo(VAArgOffset, 8);
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
@@ -3038,13 +3141,13 @@ struct VarArgMIPS64Helper : public VarArgHelper {
/// \brief AArch64-specific implementation of VarArgHelper.
struct VarArgAArch64Helper : public VarArgHelper {
- static const unsigned kAArch64GrArgSize = 56;
+ static const unsigned kAArch64GrArgSize = 64;
static const unsigned kAArch64VrArgSize = 128;
static const unsigned AArch64GrBegOffset = 0;
static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
// Make VR space aligned to 16 bytes.
- static const unsigned AArch64VrBegOffset = AArch64GrEndOffset + 8;
+ static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
+ kAArch64VrArgSize;
static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
@@ -3089,9 +3192,11 @@ struct VarArgAArch64Helper : public VarArgHelper {
unsigned OverflowOffset = AArch64VAEndOffset;
const DataLayout &DL = F.getParent()->getDataLayout();
- for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
+ for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
ArgIt != End; ++ArgIt) {
Value *A = *ArgIt;
+ unsigned ArgNo = CS.getArgumentNo(ArgIt);
+ bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
ArgKind AK = classifyArgument(A);
if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
AK = AK_Memory;
@@ -3108,11 +3213,19 @@ struct VarArgAArch64Helper : public VarArgHelper {
VrOffset += 16;
break;
case AK_Memory:
+ // Don't count fixed arguments in the overflow area - va_start will
+ // skip right over them.
+ if (IsFixed)
+ continue;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
- OverflowOffset += RoundUpToAlignment(ArgSize, 8);
+ OverflowOffset += alignTo(ArgSize, 8);
break;
}
+ // Count Gp/Vr fixed arguments to their respective offsets, but don't
+ // bother to actually store a shadow.
+ if (IsFixed)
+ continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}
Constant *OverflowSize =
@@ -3271,6 +3384,163 @@ struct VarArgAArch64Helper : public VarArgHelper {
}
};
+/// \brief PowerPC64-specific implementation of VarArgHelper.
+struct VarArgPowerPC64Helper : public VarArgHelper {
+ Function &F;
+ MemorySanitizer &MS;
+ MemorySanitizerVisitor &MSV;
+ Value *VAArgTLSCopy;
+ Value *VAArgSize;
+
+ SmallVector<CallInst*, 16> VAStartInstrumentationList;
+
+ VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
+ MemorySanitizerVisitor &MSV)
+ : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
+ VAArgSize(nullptr) {}
+
+ void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
+ // For PowerPC, we need to deal with alignment of stack arguments -
+ // they are mostly aligned to 8 bytes, but vectors and i128 arrays
+ // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
+ // and QPX vectors are aligned to 32 bytes. For that reason, we
+ // compute current offset from stack pointer (which is always properly
+ // aligned), and offset for the first vararg, then subtract them.
+ unsigned VAArgBase;
+ llvm::Triple TargetTriple(F.getParent()->getTargetTriple());
+ // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
+ // and 32 bytes for ABIv2. This is usually determined by target
+ // endianness, but in theory could be overriden by function attribute.
+ // For simplicity, we ignore it here (it'd only matter for QPX vectors).
+ if (TargetTriple.getArch() == llvm::Triple::ppc64)
+ VAArgBase = 48;
+ else
+ VAArgBase = 32;
+ unsigned VAArgOffset = VAArgBase;
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
+ ArgIt != End; ++ArgIt) {
+ Value *A = *ArgIt;
+ unsigned ArgNo = CS.getArgumentNo(ArgIt);
+ bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
+ bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ if (IsByVal) {
+ assert(A->getType()->isPointerTy());
+ Type *RealTy = A->getType()->getPointerElementType();
+ uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
+ uint64_t ArgAlign = CS.getParamAlignment(ArgNo + 1);
+ if (ArgAlign < 8)
+ ArgAlign = 8;
+ VAArgOffset = alignTo(VAArgOffset, ArgAlign);
+ if (!IsFixed) {
+ Value *Base = getShadowPtrForVAArgument(RealTy, IRB,
+ VAArgOffset - VAArgBase);
+ IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
+ ArgSize, kShadowTLSAlignment);
+ }
+ VAArgOffset += alignTo(ArgSize, 8);
+ } else {
+ Value *Base;
+ uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
+ uint64_t ArgAlign = 8;
+ if (A->getType()->isArrayTy()) {
+ // Arrays are aligned to element size, except for long double
+ // arrays, which are aligned to 8 bytes.
+ Type *ElementTy = A->getType()->getArrayElementType();
+ if (!ElementTy->isPPC_FP128Ty())
+ ArgAlign = DL.getTypeAllocSize(ElementTy);
+ } else if (A->getType()->isVectorTy()) {
+ // Vectors are naturally aligned.
+ ArgAlign = DL.getTypeAllocSize(A->getType());
+ }
+ if (ArgAlign < 8)
+ ArgAlign = 8;
+ VAArgOffset = alignTo(VAArgOffset, ArgAlign);
+ if (DL.isBigEndian()) {
+ // Adjusting the shadow for argument with size < 8 to match the placement
+ // of bits in big endian system
+ if (ArgSize < 8)
+ VAArgOffset += (8 - ArgSize);
+ }
+ if (!IsFixed) {
+ Base = getShadowPtrForVAArgument(A->getType(), IRB,
+ VAArgOffset - VAArgBase);
+ IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
+ }
+ VAArgOffset += ArgSize;
+ VAArgOffset = alignTo(VAArgOffset, 8);
+ }
+ if (IsFixed)
+ VAArgBase = VAArgOffset;
+ }
+
+ Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
+ VAArgOffset - VAArgBase);
+ // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
+ // a new class member i.e. it is the total size of all VarArgs.
+ IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
+ }
+
+ /// \brief Compute the shadow address for a given va_arg.
+ Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
+ int ArgOffset) {
+ Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
+ Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
+ return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
+ "_msarg");
+ }
+
+ void visitVAStartInst(VAStartInst &I) override {
+ IRBuilder<> IRB(&I);
+ VAStartInstrumentationList.push_back(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */8, /* alignment */8, false);
+ }
+
+ void visitVACopyInst(VACopyInst &I) override {
+ IRBuilder<> IRB(&I);
+ Value *VAListTag = I.getArgOperand(0);
+ Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
+ // Unpoison the whole __va_list_tag.
+ // FIXME: magic ABI constants.
+ IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
+ /* size */8, /* alignment */8, false);
+ }
+
+ void finalizeInstrumentation() override {
+ assert(!VAArgSize && !VAArgTLSCopy &&
+ "finalizeInstrumentation called twice");
+ IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
+ VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
+ Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
+ VAArgSize);
+
+ if (!VAStartInstrumentationList.empty()) {
+ // If there is a va_start in this function, make a backup copy of
+ // va_arg_tls somewhere in the function entry block.
+ VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
+ IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
+ }
+
+ // Instrument va_start.
+ // Copy va_list shadow from the backup copy of the TLS contents.
+ for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
+ CallInst *OrigInst = VAStartInstrumentationList[i];
+ IRBuilder<> IRB(OrigInst->getNextNode());
+ Value *VAListTag = OrigInst->getArgOperand(0);
+ Value *RegSaveAreaPtrPtr =
+ IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
+ Type::getInt64PtrTy(*MS.C));
+ Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
+ Value *RegSaveAreaShadowPtr =
+ MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
+ IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
+ }
+ }
+};
+
/// \brief A no-op implementation of VarArgHelper.
struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
@@ -3297,6 +3567,9 @@ VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
return new VarArgMIPS64Helper(Func, Msan, Visitor);
else if (TargetTriple.getArch() == llvm::Triple::aarch64)
return new VarArgAArch64Helper(Func, Msan, Visitor);
+ else if (TargetTriple.getArch() == llvm::Triple::ppc64 ||
+ TargetTriple.getArch() == llvm::Triple::ppc64le)
+ return new VarArgPowerPC64Helper(Func, Msan, Visitor);
else
return new VarArgNoOpHelper(Func, Msan, Visitor);
}