aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/Sema/SemaChecking.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/Sema/SemaChecking.cpp')
-rw-r--r--clang/lib/Sema/SemaChecking.cpp777
1 files changed, 574 insertions, 203 deletions
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index dca81d1d275f..74742023d1b3 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -201,6 +201,87 @@ static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
return false;
}
+/// Check that the value argument for __builtin_is_aligned(value, alignment) and
+/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
+/// type (but not a function pointer) and that the alignment is a power-of-two.
+static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
+ if (checkArgCount(S, TheCall, 2))
+ return true;
+
+ clang::Expr *Source = TheCall->getArg(0);
+ bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
+
+ auto IsValidIntegerType = [](QualType Ty) {
+ return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
+ };
+ QualType SrcTy = Source->getType();
+ // We should also be able to use it with arrays (but not functions!).
+ if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
+ SrcTy = S.Context.getDecayedType(SrcTy);
+ }
+ if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
+ SrcTy->isFunctionPointerType()) {
+ // FIXME: this is not quite the right error message since we don't allow
+ // floating point types, or member pointers.
+ S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
+ << SrcTy;
+ return true;
+ }
+
+ clang::Expr *AlignOp = TheCall->getArg(1);
+ if (!IsValidIntegerType(AlignOp->getType())) {
+ S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
+ << AlignOp->getType();
+ return true;
+ }
+ Expr::EvalResult AlignResult;
+ unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1;
+ // We can't check validity of alignment if it is type dependent.
+ if (!AlignOp->isInstantiationDependent() &&
+ AlignOp->EvaluateAsInt(AlignResult, S.Context,
+ Expr::SE_AllowSideEffects)) {
+ llvm::APSInt AlignValue = AlignResult.Val.getInt();
+ llvm::APSInt MaxValue(
+ llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits));
+ if (AlignValue < 1) {
+ S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
+ return true;
+ }
+ if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
+ S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
+ << MaxValue.toString(10);
+ return true;
+ }
+ if (!AlignValue.isPowerOf2()) {
+ S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
+ return true;
+ }
+ if (AlignValue == 1) {
+ S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
+ << IsBooleanAlignBuiltin;
+ }
+ }
+
+ ExprResult SrcArg = S.PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(S.Context, SrcTy, false),
+ SourceLocation(), Source);
+ if (SrcArg.isInvalid())
+ return true;
+ TheCall->setArg(0, SrcArg.get());
+ ExprResult AlignArg =
+ S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ S.Context, AlignOp->getType(), false),
+ SourceLocation(), AlignOp);
+ if (AlignArg.isInvalid())
+ return true;
+ TheCall->setArg(1, AlignArg.get());
+ // For align_up/align_down, the return type is the same as the (potentially
+ // decayed) argument type including qualifiers. For is_aligned(), the result
+ // is always bool.
+ TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
+ return false;
+}
+
static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
if (checkArgCount(S, TheCall, 3))
return true;
@@ -340,7 +421,8 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
case Builtin::BI__builtin___strncat_chk:
case Builtin::BI__builtin___strncpy_chk:
case Builtin::BI__builtin___stpncpy_chk:
- case Builtin::BI__builtin___memccpy_chk: {
+ case Builtin::BI__builtin___memccpy_chk:
+ case Builtin::BI__builtin___mempcpy_chk: {
DiagID = diag::warn_builtin_chk_overflow;
IsChkVariant = true;
SizeIndex = TheCall->getNumArgs() - 2;
@@ -379,7 +461,9 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove:
case Builtin::BImemset:
- case Builtin::BI__builtin_memset: {
+ case Builtin::BI__builtin_memset:
+ case Builtin::BImempcpy:
+ case Builtin::BI__builtin_mempcpy: {
DiagID = diag::warn_fortify_source_overflow;
SizeIndex = TheCall->getNumArgs() - 1;
ObjectIndex = 0;
@@ -1354,6 +1438,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_is_aligned:
+ case Builtin::BI__builtin_align_up:
+ case Builtin::BI__builtin_align_down:
+ if (SemaBuiltinAlignment(*this, TheCall, BuiltinID))
+ return ExprError();
+ break;
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
@@ -1536,6 +1626,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall))
return ExprError();
@@ -1685,6 +1776,7 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
+ Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be;
bool IsInt64Long =
Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong;
@@ -1717,6 +1809,14 @@ bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
+bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ return false;
+ #include "clang/Basic/arm_mve_builtin_sema.inc"
+ }
+}
+
bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
@@ -1857,6 +1957,8 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall))
return true;
+ if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
@@ -3039,8 +3141,37 @@ bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
CheckHexagonBuiltinArgument(BuiltinID, TheCall);
}
+bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ return CheckMipsBuiltinCpu(BuiltinID, TheCall) ||
+ CheckMipsBuiltinArgument(BuiltinID, TheCall);
+}
+
+bool Sema::CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall) {
+ const TargetInfo &TI = Context.getTargetInfo();
+
+ if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_mips_lwx) {
+ if (!TI.hasFeature("dsp"))
+ return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
+ }
-// CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
+ if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
+ if (!TI.hasFeature("dspr2"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_mips_builtin_requires_dspr2);
+ }
+
+ if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
+ BuiltinID <= Mips::BI__builtin_msa_xori_b) {
+ if (!TI.hasFeature("msa"))
+ return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
+ }
+
+ return false;
+}
+
+// CheckMipsBuiltinArgument - Checks the constant value passed to the
// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
// ordering for DSP is unspecified. MSA is ordered by the data format used
// by the underlying instruction i.e., df/m, df/n and then by size.
@@ -3049,7 +3180,7 @@ bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
// definitions from include/clang/Basic/BuiltinsMips.def.
// FIXME: GCC is strict on signedness for some of these intrinsics, we should
// be too.
-bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0, m = 0;
switch (BuiltinID) {
default: return false;
@@ -4560,20 +4691,19 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
&& sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
"need to update code for modified forms");
static_assert(AtomicExpr::AO__c11_atomic_init == 0 &&
- AtomicExpr::AO__c11_atomic_fetch_xor + 1 ==
+ AtomicExpr::AO__c11_atomic_fetch_min + 1 ==
AtomicExpr::AO__atomic_load,
"need to update code for modified C11 atomics");
bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
- Op <= AtomicExpr::AO__c11_atomic_fetch_xor) ||
+ Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
IsOpenCL;
bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
Op == AtomicExpr::AO__atomic_store_n ||
Op == AtomicExpr::AO__atomic_exchange_n ||
Op == AtomicExpr::AO__atomic_compare_exchange_n;
bool IsAddSub = false;
- bool IsMinMax = false;
switch (Op) {
case AtomicExpr::AO__c11_atomic_init:
@@ -4602,8 +4732,6 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
@@ -4624,12 +4752,14 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
- Form = Arithmetic;
- break;
-
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_max:
- IsMinMax = true;
Form = Arithmetic;
break;
@@ -4721,16 +4851,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
- if (IsMinMax) {
- const BuiltinType *BT = ValType->getAs<BuiltinType>();
- if (!BT || (BT->getKind() != BuiltinType::Int &&
- BT->getKind() != BuiltinType::UInt)) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_int32_or_ptr);
- return ExprError();
- }
- }
- if (!IsAddSub && !IsMinMax && !ValType->isIntegerType()) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_bitwise_needs_atomic_int)
+ if (!IsAddSub && !ValType->isIntegerType()) {
+ Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -5506,7 +5628,8 @@ ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
- bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64;
+ bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 ||
+ TT.getArch() == llvm::Triple::aarch64_32);
bool IsWindows = TT.isOSWindows();
bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
if (IsX64 || IsAArch64) {
@@ -5723,7 +5846,8 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
// Do standard promotions between the two arguments, returning their common
// type.
- QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
+ QualType Res = UsualArithmeticConversions(
+ OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison);
if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
return true;
@@ -5763,36 +5887,41 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
<< SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(),
(*(TheCall->arg_end() - 1))->getEndLoc());
+ // __builtin_fpclassify is the only case where NumArgs != 1, so we can count
+ // on all preceding parameters just being int. Try all of those.
+ for (unsigned i = 0; i < NumArgs - 1; ++i) {
+ Expr *Arg = TheCall->getArg(i);
+
+ if (Arg->isTypeDependent())
+ return false;
+
+ ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing);
+
+ if (Res.isInvalid())
+ return true;
+ TheCall->setArg(i, Res.get());
+ }
+
Expr *OrigArg = TheCall->getArg(NumArgs-1);
if (OrigArg->isTypeDependent())
return false;
+ // Usual Unary Conversions will convert half to float, which we want for
+ // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
+ // type how it is, but do normal L->Rvalue conversions.
+ if (Context.getTargetInfo().useFP16ConversionIntrinsics())
+ OrigArg = UsualUnaryConversions(OrigArg).get();
+ else
+ OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get();
+ TheCall->setArg(NumArgs - 1, OrigArg);
+
// This operation requires a non-_Complex floating-point number.
if (!OrigArg->getType()->isRealFloatingType())
return Diag(OrigArg->getBeginLoc(),
diag::err_typecheck_call_invalid_unary_fp)
<< OrigArg->getType() << OrigArg->getSourceRange();
- // If this is an implicit conversion from float -> float, double, or
- // long double, remove it.
- if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
- // Only remove standard FloatCasts, leaving other casts inplace
- if (Cast->getCastKind() == CK_FloatingCast) {
- Expr *CastArg = Cast->getSubExpr();
- if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
- assert(
- (Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
- Cast->getType()->isSpecificBuiltinType(BuiltinType::Float) ||
- Cast->getType()->isSpecificBuiltinType(BuiltinType::LongDouble)) &&
- "promotion from float to either float, double, or long double is "
- "the only expected cast here");
- Cast->setSubExpr(nullptr);
- TheCall->setArg(NumArgs-1, CastArg);
- }
- }
- }
-
return false;
}
@@ -6235,6 +6364,101 @@ bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
return false;
}
+/// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
+/// constant expression representing a power of 2.
+bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
+ // and only if x is a power of 2.
+ if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0)
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2)
+ << Arg->getSourceRange();
+}
+
+static bool IsShiftedByte(llvm::APSInt Value) {
+ if (Value.isNegative())
+ return false;
+
+ // Check if it's a shifted byte, by shifting it down
+ while (true) {
+ // If the value fits in the bottom byte, the check passes.
+ if (Value < 0x100)
+ return true;
+
+ // Otherwise, if the value has _any_ bits in the bottom byte, the check
+ // fails.
+ if ((Value & 0xFF) != 0)
+ return false;
+
+ // If the bottom 8 bits are all 0, but something above that is nonzero,
+ // then shifting the value right by 8 bits won't affect whether it's a
+ // shifted byte or not. So do that, and go round again.
+ Value >>= 8;
+ }
+}
+
+/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
+/// a constant expression representing an arbitrary byte value shifted left by
+/// a multiple of 8 bits.
+bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ if (IsShiftedByte(Result))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte)
+ << Arg->getSourceRange();
+}
+
+/// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
+/// TheCall is a constant expression representing either a shifted byte value,
+/// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
+/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
+/// Arm MVE intrinsics.
+bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
+ int ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ // Check to see if it's in either of the required forms.
+ if (IsShiftedByte(Result) ||
+ (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_argument_not_shifted_byte_or_xxff)
+ << Arg->getSourceRange();
+}
+
/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == AArch64::BI__builtin_arm_irg) {
@@ -9162,7 +9386,7 @@ void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
auto IsLiteralZeroArg = [](const Expr* E) -> bool {
const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
if (!MTE) return false;
- const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr());
+ const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr());
if (!Num) return false;
if (Num->getValue() != 0) return false;
return true;
@@ -11371,32 +11595,6 @@ static const IntegerLiteral *getIntegerLiteral(Expr *E) {
return IL;
}
-static void CheckConditionalWithEnumTypes(Sema &S, SourceLocation Loc,
- Expr *LHS, Expr *RHS) {
- QualType LHSStrippedType = LHS->IgnoreParenImpCasts()->getType();
- QualType RHSStrippedType = RHS->IgnoreParenImpCasts()->getType();
-
- const auto *LHSEnumType = LHSStrippedType->getAs<EnumType>();
- if (!LHSEnumType)
- return;
- const auto *RHSEnumType = RHSStrippedType->getAs<EnumType>();
- if (!RHSEnumType)
- return;
-
- // Ignore anonymous enums.
- if (!LHSEnumType->getDecl()->hasNameForLinkage())
- return;
- if (!RHSEnumType->getDecl()->hasNameForLinkage())
- return;
-
- if (S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType))
- return;
-
- S.Diag(Loc, diag::warn_conditional_mixed_enum_types)
- << LHSStrippedType << RHSStrippedType << LHS->getSourceRange()
- << RHS->getSourceRange();
-}
-
static void DiagnoseIntInBoolContext(Sema &S, Expr *E) {
E = E->IgnoreParenImpCasts();
SourceLocation ExprLoc = E->getExprLoc();
@@ -11737,7 +11935,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
return;
if (isObjCSignedCharBool(S, T) && !Source->isCharType() &&
- !E->isKnownToHaveBooleanValue()) {
+ !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
return adornObjCBoolConversionDiagWithTernaryFixit(
S, E,
S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
@@ -11888,8 +12086,6 @@ static void CheckConditionalOperator(Sema &S, ConditionalOperator *E,
bool Suspicious = false;
CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
- CheckConditionalWithEnumTypes(S, E->getBeginLoc(), E->getTrueExpr(),
- E->getFalseExpr());
if (T->isBooleanType())
DiagnoseIntInBoolContext(S, E);
@@ -12360,8 +12556,8 @@ namespace {
/// Visitor for expressions which looks for unsequenced operations on the
/// same object.
-class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
- using Base = EvaluatedExprVisitor<SequenceChecker>;
+class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
+ using Base = ConstEvaluatedExprVisitor<SequenceChecker>;
/// A tree of sequenced regions within an expression. Two regions are
/// unsequenced if one is an ancestor or a descendent of the other. When we
@@ -12431,7 +12627,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
};
/// An object for which we can track unsequenced uses.
- using Object = NamedDecl *;
+ using Object = const NamedDecl *;
/// Different flavors of object usage which we track. We only track the
/// least-sequenced usage of each kind.
@@ -12450,17 +12646,19 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
UK_Count = UK_ModAsSideEffect + 1
};
+ /// Bundle together a sequencing region and the expression corresponding
+ /// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
struct Usage {
- Expr *Use;
+ const Expr *UsageExpr;
SequenceTree::Seq Seq;
- Usage() : Use(nullptr), Seq() {}
+ Usage() : UsageExpr(nullptr), Seq() {}
};
struct UsageInfo {
Usage Uses[UK_Count];
- /// Have we issued a diagnostic for this variable already?
+ /// Have we issued a diagnostic for this object already?
bool Diagnosed;
UsageInfo() : Uses(), Diagnosed(false) {}
@@ -12484,7 +12682,7 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
/// Expressions to check later. We defer checking these to reduce
/// stack usage.
- SmallVectorImpl<Expr *> &WorkList;
+ SmallVectorImpl<const Expr *> &WorkList;
/// RAII object wrapping the visitation of a sequenced subexpression of an
/// expression. At the end of this process, the side-effects of the evaluation
@@ -12498,10 +12696,13 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
}
~SequencedSubexpression() {
- for (auto &M : llvm::reverse(ModAsSideEffect)) {
- UsageInfo &U = Self.UsageMap[M.first];
- auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect];
- Self.addUsage(U, M.first, SideEffectUsage.Use, UK_ModAsValue);
+ for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) {
+ // Add a new usage with usage kind UK_ModAsValue, and then restore
+ // the previous usage with UK_ModAsSideEffect (thus clearing it if
+ // the previous one was empty).
+ UsageInfo &UI = Self.UsageMap[M.first];
+ auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect];
+ Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue);
SideEffectUsage = M.second;
}
Self.ModAsSideEffect = OldModAsSideEffect;
@@ -12545,49 +12746,60 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
/// Find the object which is produced by the specified expression,
/// if any.
- Object getObject(Expr *E, bool Mod) const {
+ Object getObject(const Expr *E, bool Mod) const {
E = E->IgnoreParenCasts();
- if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
return getObject(UO->getSubExpr(), Mod);
- } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Comma)
return getObject(BO->getRHS(), Mod);
if (Mod && BO->isAssignmentOp())
return getObject(BO->getLHS(), Mod);
- } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
// FIXME: Check for more interesting cases, like "x.n = ++x.n".
if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts()))
return ME->getMemberDecl();
- } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
// FIXME: If this is a reference, map through to its value.
return DRE->getDecl();
return nullptr;
}
- /// Note that an object was modified or used by an expression.
- void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) {
+ /// Note that an object \p O was modified or used by an expression
+ /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for
+ /// the object \p O as obtained via the \p UsageMap.
+ void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) {
+ // Get the old usage for the given object and usage kind.
Usage &U = UI.Uses[UK];
- if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) {
+ if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) {
+ // If we have a modification as side effect and are in a sequenced
+ // subexpression, save the old Usage so that we can restore it later
+ // in SequencedSubexpression::~SequencedSubexpression.
if (UK == UK_ModAsSideEffect && ModAsSideEffect)
ModAsSideEffect->push_back(std::make_pair(O, U));
- U.Use = Ref;
+ // Then record the new usage with the current sequencing region.
+ U.UsageExpr = UsageExpr;
U.Seq = Region;
}
}
- /// Check whether a modification or use conflicts with a prior usage.
- void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind,
- bool IsModMod) {
+ /// Check whether a modification or use of an object \p O in an expression
+ /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is
+ /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap.
+ /// \p IsModMod is true when we are checking for a mod-mod unsequenced
+ /// usage and false we are checking for a mod-use unsequenced usage.
+ void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr,
+ UsageKind OtherKind, bool IsModMod) {
if (UI.Diagnosed)
return;
const Usage &U = UI.Uses[OtherKind];
- if (!U.Use || !Tree.isUnsequenced(Region, U.Seq))
+ if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq))
return;
- Expr *Mod = U.Use;
- Expr *ModOrUse = Ref;
+ const Expr *Mod = U.UsageExpr;
+ const Expr *ModOrUse = UsageExpr;
if (OtherKind == UK_Use)
std::swap(Mod, ModOrUse);
@@ -12599,47 +12811,79 @@ class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> {
UI.Diagnosed = true;
}
- void notePreUse(Object O, Expr *Use) {
- UsageInfo &U = UsageMap[O];
+ // A note on note{Pre, Post}{Use, Mod}:
+ //
+ // (It helps to follow the algorithm with an expression such as
+ // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced
+ // operations before C++17 and both are well-defined in C++17).
+ //
+ // When visiting a node which uses/modify an object we first call notePreUse
+ // or notePreMod before visiting its sub-expression(s). At this point the
+ // children of the current node have not yet been visited and so the eventual
+ // uses/modifications resulting from the children of the current node have not
+ // been recorded yet.
+ //
+ // We then visit the children of the current node. After that notePostUse or
+ // notePostMod is called. These will 1) detect an unsequenced modification
+ // as side effect (as in "k++ + k") and 2) add a new usage with the
+ // appropriate usage kind.
+ //
+ // We also have to be careful that some operation sequences modification as
+ // side effect as well (for example: || or ,). To account for this we wrap
+ // the visitation of such a sub-expression (for example: the LHS of || or ,)
+ // with SequencedSubexpression. SequencedSubexpression is an RAII object
+ // which record usages which are modifications as side effect, and then
+ // downgrade them (or more accurately restore the previous usage which was a
+ // modification as side effect) when exiting the scope of the sequenced
+ // subexpression.
+
+ void notePreUse(Object O, const Expr *UseExpr) {
+ UsageInfo &UI = UsageMap[O];
// Uses conflict with other modifications.
- checkUsage(O, U, Use, UK_ModAsValue, false);
+ checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false);
}
- void notePostUse(Object O, Expr *Use) {
- UsageInfo &U = UsageMap[O];
- checkUsage(O, U, Use, UK_ModAsSideEffect, false);
- addUsage(U, O, Use, UK_Use);
+ void notePostUse(Object O, const Expr *UseExpr) {
+ UsageInfo &UI = UsageMap[O];
+ checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect,
+ /*IsModMod=*/false);
+ addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use);
}
- void notePreMod(Object O, Expr *Mod) {
- UsageInfo &U = UsageMap[O];
+ void notePreMod(Object O, const Expr *ModExpr) {
+ UsageInfo &UI = UsageMap[O];
// Modifications conflict with other modifications and with uses.
- checkUsage(O, U, Mod, UK_ModAsValue, true);
- checkUsage(O, U, Mod, UK_Use, false);
+ checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true);
+ checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false);
}
- void notePostMod(Object O, Expr *Use, UsageKind UK) {
- UsageInfo &U = UsageMap[O];
- checkUsage(O, U, Use, UK_ModAsSideEffect, true);
- addUsage(U, O, Use, UK);
+ void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) {
+ UsageInfo &UI = UsageMap[O];
+ checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect,
+ /*IsModMod=*/true);
+ addUsage(O, UI, ModExpr, /*UsageKind=*/UK);
}
public:
- SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList)
+ SequenceChecker(Sema &S, const Expr *E,
+ SmallVectorImpl<const Expr *> &WorkList)
: Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
Visit(E);
+ // Silence a -Wunused-private-field since WorkList is now unused.
+ // TODO: Evaluate if it can be used, and if not remove it.
+ (void)this->WorkList;
}
- void VisitStmt(Stmt *S) {
+ void VisitStmt(const Stmt *S) {
// Skip all statements which aren't expressions for now.
}
- void VisitExpr(Expr *E) {
+ void VisitExpr(const Expr *E) {
// By default, just recurse to evaluated subexpressions.
Base::VisitStmt(E);
}
- void VisitCastExpr(CastExpr *E) {
+ void VisitCastExpr(const CastExpr *E) {
Object O = Object();
if (E->getCastKind() == CK_LValueToRValue)
O = getObject(E->getSubExpr(), false);
@@ -12651,7 +12895,8 @@ public:
notePostUse(O, E);
}
- void VisitSequencedExpressions(Expr *SequencedBefore, Expr *SequencedAfter) {
+ void VisitSequencedExpressions(const Expr *SequencedBefore,
+ const Expr *SequencedAfter) {
SequenceTree::Seq BeforeRegion = Tree.allocate(Region);
SequenceTree::Seq AfterRegion = Tree.allocate(Region);
SequenceTree::Seq OldRegion = Region;
@@ -12671,17 +12916,46 @@ public:
Tree.merge(AfterRegion);
}
- void VisitArraySubscriptExpr(ArraySubscriptExpr *ASE) {
+ void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) {
// C++17 [expr.sub]p1:
// The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
// expression E1 is sequenced before the expression E2.
if (SemaRef.getLangOpts().CPlusPlus17)
VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS());
- else
- Base::VisitStmt(ASE);
+ else {
+ Visit(ASE->getLHS());
+ Visit(ASE->getRHS());
+ }
+ }
+
+ void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
+ void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
+ void VisitBinPtrMem(const BinaryOperator *BO) {
+ // C++17 [expr.mptr.oper]p4:
+ // Abbreviating pm-expression.*cast-expression as E1.*E2, [...]
+ // the expression E1 is sequenced before the expression E2.
+ if (SemaRef.getLangOpts().CPlusPlus17)
+ VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
+ else {
+ Visit(BO->getLHS());
+ Visit(BO->getRHS());
+ }
}
- void VisitBinComma(BinaryOperator *BO) {
+ void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); }
+ void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); }
+ void VisitBinShlShr(const BinaryOperator *BO) {
+ // C++17 [expr.shift]p4:
+ // The expression E1 is sequenced before the expression E2.
+ if (SemaRef.getLangOpts().CPlusPlus17)
+ VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
+ else {
+ Visit(BO->getLHS());
+ Visit(BO->getRHS());
+ }
+ }
+
+ void VisitBinComma(const BinaryOperator *BO) {
// C++11 [expr.comma]p1:
// Every value computation and side effect associated with the left
// expression is sequenced before every value computation and side
@@ -12689,47 +12963,77 @@ public:
VisitSequencedExpressions(BO->getLHS(), BO->getRHS());
}
- void VisitBinAssign(BinaryOperator *BO) {
- // The modification is sequenced after the value computation of the LHS
- // and RHS, so check it before inspecting the operands and update the
+ void VisitBinAssign(const BinaryOperator *BO) {
+ SequenceTree::Seq RHSRegion;
+ SequenceTree::Seq LHSRegion;
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ RHSRegion = Tree.allocate(Region);
+ LHSRegion = Tree.allocate(Region);
+ } else {
+ RHSRegion = Region;
+ LHSRegion = Region;
+ }
+ SequenceTree::Seq OldRegion = Region;
+
+ // C++11 [expr.ass]p1:
+ // [...] the assignment is sequenced after the value computation
+ // of the right and left operands, [...]
+ //
+ // so check it before inspecting the operands and update the
// map afterwards.
- Object O = getObject(BO->getLHS(), true);
- if (!O)
- return VisitExpr(BO);
+ Object O = getObject(BO->getLHS(), /*Mod=*/true);
+ if (O)
+ notePreMod(O, BO);
+
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ // C++17 [expr.ass]p1:
+ // [...] The right operand is sequenced before the left operand. [...]
+ {
+ SequencedSubexpression SeqBefore(*this);
+ Region = RHSRegion;
+ Visit(BO->getRHS());
+ }
- notePreMod(O, BO);
+ Region = LHSRegion;
+ Visit(BO->getLHS());
- // C++11 [expr.ass]p7:
- // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated
- // only once.
- //
- // Therefore, for a compound assignment operator, O is considered used
- // everywhere except within the evaluation of E1 itself.
- if (isa<CompoundAssignOperator>(BO))
- notePreUse(O, BO);
+ if (O && isa<CompoundAssignOperator>(BO))
+ notePostUse(O, BO);
- Visit(BO->getLHS());
+ } else {
+ // C++11 does not specify any sequencing between the LHS and RHS.
+ Region = LHSRegion;
+ Visit(BO->getLHS());
- if (isa<CompoundAssignOperator>(BO))
- notePostUse(O, BO);
+ if (O && isa<CompoundAssignOperator>(BO))
+ notePostUse(O, BO);
- Visit(BO->getRHS());
+ Region = RHSRegion;
+ Visit(BO->getRHS());
+ }
// C++11 [expr.ass]p1:
- // the assignment is sequenced [...] before the value computation of the
- // assignment expression.
+ // the assignment is sequenced [...] before the value computation of the
+ // assignment expression.
// C11 6.5.16/3 has no such rule.
- notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
- : UK_ModAsSideEffect);
+ Region = OldRegion;
+ if (O)
+ notePostMod(O, BO,
+ SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
+ : UK_ModAsSideEffect);
+ if (SemaRef.getLangOpts().CPlusPlus17) {
+ Tree.merge(RHSRegion);
+ Tree.merge(LHSRegion);
+ }
}
- void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) {
+ void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) {
VisitBinAssign(CAO);
}
- void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
- void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
- void VisitUnaryPreIncDec(UnaryOperator *UO) {
+ void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
+ void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
+ void VisitUnaryPreIncDec(const UnaryOperator *UO) {
Object O = getObject(UO->getSubExpr(), true);
if (!O)
return VisitExpr(UO);
@@ -12738,13 +13042,14 @@ public:
Visit(UO->getSubExpr());
// C++11 [expr.pre.incr]p1:
// the expression ++x is equivalent to x+=1
- notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
- : UK_ModAsSideEffect);
+ notePostMod(O, UO,
+ SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
+ : UK_ModAsSideEffect);
}
- void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
- void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
- void VisitUnaryPostIncDec(UnaryOperator *UO) {
+ void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
+ void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
+ void VisitUnaryPostIncDec(const UnaryOperator *UO) {
Object O = getObject(UO->getSubExpr(), true);
if (!O)
return VisitExpr(UO);
@@ -12754,67 +13059,129 @@ public:
notePostMod(O, UO, UK_ModAsSideEffect);
}
- /// Don't visit the RHS of '&&' or '||' if it might not be evaluated.
- void VisitBinLOr(BinaryOperator *BO) {
- // The side-effects of the LHS of an '&&' are sequenced before the
- // value computation of the RHS, and hence before the value computation
- // of the '&&' itself, unless the LHS evaluates to zero. We treat them
- // as if they were unconditionally sequenced.
+ void VisitBinLOr(const BinaryOperator *BO) {
+ // C++11 [expr.log.or]p2:
+ // If the second expression is evaluated, every value computation and
+ // side effect associated with the first expression is sequenced before
+ // every value computation and side effect associated with the
+ // second expression.
+ SequenceTree::Seq LHSRegion = Tree.allocate(Region);
+ SequenceTree::Seq RHSRegion = Tree.allocate(Region);
+ SequenceTree::Seq OldRegion = Region;
+
EvaluationTracker Eval(*this);
{
SequencedSubexpression Sequenced(*this);
+ Region = LHSRegion;
Visit(BO->getLHS());
}
- bool Result;
- if (Eval.evaluate(BO->getLHS(), Result)) {
- if (!Result)
- Visit(BO->getRHS());
- } else {
- // Check for unsequenced operations in the RHS, treating it as an
- // entirely separate evaluation.
- //
- // FIXME: If there are operations in the RHS which are unsequenced
- // with respect to operations outside the RHS, and those operations
- // are unconditionally evaluated, diagnose them.
- WorkList.push_back(BO->getRHS());
+ // C++11 [expr.log.or]p1:
+ // [...] the second operand is not evaluated if the first operand
+ // evaluates to true.
+ bool EvalResult = false;
+ bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
+ bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult);
+ if (ShouldVisitRHS) {
+ Region = RHSRegion;
+ Visit(BO->getRHS());
}
- }
- void VisitBinLAnd(BinaryOperator *BO) {
+
+ Region = OldRegion;
+ Tree.merge(LHSRegion);
+ Tree.merge(RHSRegion);
+ }
+
+ void VisitBinLAnd(const BinaryOperator *BO) {
+ // C++11 [expr.log.and]p2:
+ // If the second expression is evaluated, every value computation and
+ // side effect associated with the first expression is sequenced before
+ // every value computation and side effect associated with the
+ // second expression.
+ SequenceTree::Seq LHSRegion = Tree.allocate(Region);
+ SequenceTree::Seq RHSRegion = Tree.allocate(Region);
+ SequenceTree::Seq OldRegion = Region;
+
EvaluationTracker Eval(*this);
{
SequencedSubexpression Sequenced(*this);
+ Region = LHSRegion;
Visit(BO->getLHS());
}
- bool Result;
- if (Eval.evaluate(BO->getLHS(), Result)) {
- if (Result)
- Visit(BO->getRHS());
- } else {
- WorkList.push_back(BO->getRHS());
+ // C++11 [expr.log.and]p1:
+ // [...] the second operand is not evaluated if the first operand is false.
+ bool EvalResult = false;
+ bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult);
+ bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult);
+ if (ShouldVisitRHS) {
+ Region = RHSRegion;
+ Visit(BO->getRHS());
}
- }
- // Only visit the condition, unless we can be sure which subexpression will
- // be chosen.
- void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) {
+ Region = OldRegion;
+ Tree.merge(LHSRegion);
+ Tree.merge(RHSRegion);
+ }
+
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) {
+ // C++11 [expr.cond]p1:
+ // [...] Every value computation and side effect associated with the first
+ // expression is sequenced before every value computation and side effect
+ // associated with the second or third expression.
+ SequenceTree::Seq ConditionRegion = Tree.allocate(Region);
+
+ // No sequencing is specified between the true and false expression.
+ // However since exactly one of both is going to be evaluated we can
+ // consider them to be sequenced. This is needed to avoid warning on
+ // something like "x ? y+= 1 : y += 2;" in the case where we will visit
+ // both the true and false expressions because we can't evaluate x.
+ // This will still allow us to detect an expression like (pre C++17)
+ // "(x ? y += 1 : y += 2) = y".
+ //
+ // We don't wrap the visitation of the true and false expression with
+ // SequencedSubexpression because we don't want to downgrade modifications
+ // as side effect in the true and false expressions after the visition
+ // is done. (for example in the expression "(x ? y++ : y++) + y" we should
+ // not warn between the two "y++", but we should warn between the "y++"
+ // and the "y".
+ SequenceTree::Seq TrueRegion = Tree.allocate(Region);
+ SequenceTree::Seq FalseRegion = Tree.allocate(Region);
+ SequenceTree::Seq OldRegion = Region;
+
EvaluationTracker Eval(*this);
{
SequencedSubexpression Sequenced(*this);
+ Region = ConditionRegion;
Visit(CO->getCond());
}
- bool Result;
- if (Eval.evaluate(CO->getCond(), Result))
- Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr());
- else {
- WorkList.push_back(CO->getTrueExpr());
- WorkList.push_back(CO->getFalseExpr());
+ // C++11 [expr.cond]p1:
+ // [...] The first expression is contextually converted to bool (Clause 4).
+ // It is evaluated and if it is true, the result of the conditional
+ // expression is the value of the second expression, otherwise that of the
+ // third expression. Only one of the second and third expressions is
+ // evaluated. [...]
+ bool EvalResult = false;
+ bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult);
+ bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult);
+ bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult);
+ if (ShouldVisitTrueExpr) {
+ Region = TrueRegion;
+ Visit(CO->getTrueExpr());
+ }
+ if (ShouldVisitFalseExpr) {
+ Region = FalseRegion;
+ Visit(CO->getFalseExpr());
}
+
+ Region = OldRegion;
+ Tree.merge(ConditionRegion);
+ Tree.merge(TrueRegion);
+ Tree.merge(FalseRegion);
}
- void VisitCallExpr(CallExpr *CE) {
+ void VisitCallExpr(const CallExpr *CE) {
// C++11 [intro.execution]p15:
// When calling a function [...], every value computation and side effect
// associated with any argument expression, or with the postfix expression
@@ -12822,12 +13189,13 @@ public:
// expression or statement in the body of the function [and thus before
// the value computation of its result].
SequencedSubexpression Sequenced(*this);
- Base::VisitCallExpr(CE);
+ SemaRef.runWithSufficientStackSpace(CE->getExprLoc(),
+ [&] { Base::VisitCallExpr(CE); });
// FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
}
- void VisitCXXConstructExpr(CXXConstructExpr *CCE) {
+ void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
// This is a call, so all subexpressions are sequenced before the result.
SequencedSubexpression Sequenced(*this);
@@ -12837,8 +13205,8 @@ public:
// In C++11, list initializations are sequenced.
SmallVector<SequenceTree::Seq, 32> Elts;
SequenceTree::Seq Parent = Region;
- for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(),
- E = CCE->arg_end();
+ for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
+ E = CCE->arg_end();
I != E; ++I) {
Region = Tree.allocate(Parent);
Elts.push_back(Region);
@@ -12851,7 +13219,7 @@ public:
Tree.merge(Elts[I]);
}
- void VisitInitListExpr(InitListExpr *ILE) {
+ void VisitInitListExpr(const InitListExpr *ILE) {
if (!SemaRef.getLangOpts().CPlusPlus11)
return VisitExpr(ILE);
@@ -12859,8 +13227,9 @@ public:
SmallVector<SequenceTree::Seq, 32> Elts;
SequenceTree::Seq Parent = Region;
for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
- Expr *E = ILE->getInit(I);
- if (!E) continue;
+ const Expr *E = ILE->getInit(I);
+ if (!E)
+ continue;
Region = Tree.allocate(Parent);
Elts.push_back(Region);
Visit(E);
@@ -12875,11 +13244,11 @@ public:
} // namespace
-void Sema::CheckUnsequencedOperations(Expr *E) {
- SmallVector<Expr *, 8> WorkList;
+void Sema::CheckUnsequencedOperations(const Expr *E) {
+ SmallVector<const Expr *, 8> WorkList;
WorkList.push_back(E);
while (!WorkList.empty()) {
- Expr *Item = WorkList.pop_back_val();
+ const Expr *Item = WorkList.pop_back_val();
SequenceChecker(*this, Item, WorkList);
}
}
@@ -14577,6 +14946,8 @@ void Sema::RefersToMemberWithReducedAlignment(
bool AnyIsPacked = false;
do {
QualType BaseType = ME->getBase()->getType();
+ if (BaseType->isDependentType())
+ return;
if (ME->isArrow())
BaseType = BaseType->getPointeeType();
RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();