aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/clang/Basic/DiagnosticSemaKinds.td2
-rw-r--r--include/clang/Sema/Sema.h2
-rw-r--r--lib/Basic/Targets.cpp32
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp50
-rw-r--r--lib/CodeGen/CodeGenFunction.h97
-rw-r--r--lib/Headers/CMakeLists.txt1
-rw-r--r--lib/Headers/msa.h583
-rw-r--r--lib/Sema/SemaChecking.cpp198
-rw-r--r--lib/Sema/SemaExprCXX.cpp14
-rw-r--r--test/CodeGen/builtins-mips-msa-error.c421
-rw-r--r--test/CodeGen/builtins-mips-msa.c1409
-rw-r--r--test/OpenMP/cancel_codegen.cpp2
-rw-r--r--test/SemaCXX/cxx11-crashes.cpp12
13 files changed, 2041 insertions, 782 deletions
diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td
index 3963f7581880..98b687b8e821 100644
--- a/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/include/clang/Basic/DiagnosticSemaKinds.td
@@ -7621,6 +7621,8 @@ def err_invalid_neon_type_code : Error<
"incompatible constant for this __builtin_neon function">;
def err_argument_invalid_range : Error<
"argument should be a value from %0 to %1">;
+def err_argument_not_multiple : Error<
+ "argument should be a multiple of %0">;
def warn_neon_vector_initializer_non_portable : Warning<
"vector initializers are not compatible with NEON intrinsics in big endian "
"mode">, InGroup<DiagGroup<"nonportable-vector-initialization">>;
diff --git a/include/clang/Sema/Sema.h b/include/clang/Sema/Sema.h
index 0d1c8fa48cdc..437a44a30711 100644
--- a/include/clang/Sema/Sema.h
+++ b/include/clang/Sema/Sema.h
@@ -9417,6 +9417,8 @@ private:
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
+ bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
+ unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 643e191fb01a..be5d4ad8feda 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -2081,21 +2081,23 @@ public:
static GPUKind parseAMDGCNName(StringRef Name) {
return llvm::StringSwitch<GPUKind>(Name)
- .Case("tahiti", GK_SOUTHERN_ISLANDS)
- .Case("pitcairn", GK_SOUTHERN_ISLANDS)
- .Case("verde", GK_SOUTHERN_ISLANDS)
- .Case("oland", GK_SOUTHERN_ISLANDS)
- .Case("hainan", GK_SOUTHERN_ISLANDS)
- .Case("bonaire", GK_SEA_ISLANDS)
- .Case("kabini", GK_SEA_ISLANDS)
- .Case("kaveri", GK_SEA_ISLANDS)
- .Case("hawaii", GK_SEA_ISLANDS)
- .Case("mullins", GK_SEA_ISLANDS)
- .Case("tonga", GK_VOLCANIC_ISLANDS)
- .Case("iceland", GK_VOLCANIC_ISLANDS)
- .Case("carrizo", GK_VOLCANIC_ISLANDS)
- .Case("fiji", GK_VOLCANIC_ISLANDS)
- .Case("stoney", GK_VOLCANIC_ISLANDS)
+ .Case("tahiti", GK_SOUTHERN_ISLANDS)
+ .Case("pitcairn", GK_SOUTHERN_ISLANDS)
+ .Case("verde", GK_SOUTHERN_ISLANDS)
+ .Case("oland", GK_SOUTHERN_ISLANDS)
+ .Case("hainan", GK_SOUTHERN_ISLANDS)
+ .Case("bonaire", GK_SEA_ISLANDS)
+ .Case("kabini", GK_SEA_ISLANDS)
+ .Case("kaveri", GK_SEA_ISLANDS)
+ .Case("hawaii", GK_SEA_ISLANDS)
+ .Case("mullins", GK_SEA_ISLANDS)
+ .Case("tonga", GK_VOLCANIC_ISLANDS)
+ .Case("iceland", GK_VOLCANIC_ISLANDS)
+ .Case("carrizo", GK_VOLCANIC_ISLANDS)
+ .Case("fiji", GK_VOLCANIC_ISLANDS)
+ .Case("stoney", GK_VOLCANIC_ISLANDS)
+ .Case("polaris10", GK_VOLCANIC_ISLANDS)
+ .Case("polaris11", GK_VOLCANIC_ISLANDS)
.Default(GK_NONE);
}
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index 8937685fdc7b..d214340bdafe 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -1767,17 +1767,11 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- SourceLocation ELoc = S.getLocEnd();
- auto &&CodeGen = [DynamicOrOrdered, ELoc](CodeGenFunction &CGF) {
+ auto &&CodeGen = [DynamicOrOrdered, &S](CodeGenFunction &CGF) {
if (!DynamicOrOrdered)
- CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, ELoc);
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
};
- CodeGen(*this);
-
- OpenMPDirectiveKind DKind = S.getDirectiveKind();
- if (DKind == OMPD_for || DKind == OMPD_parallel_for ||
- DKind == OMPD_distribute_parallel_for)
- OMPCancelStack.back().CodeGen = CodeGen;
+ OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
}
void CodeGenFunction::EmitOMPForOuterLoop(
@@ -1889,11 +1883,12 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
void CodeGenFunction::EmitOMPDistributeParallelForDirective(
const OMPDistributeParallelForDirective &S) {
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
- OMPCancelStackRAII CancelRegion(*this);
CGM.getOpenMPRuntime().emitInlinedDirective(
*this, OMPD_distribute_parallel_for,
[&S](CodeGenFunction &CGF, PrePostActionTy &) {
OMPLoopScope PreInitScope(CGF, S);
+ OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for,
+ /*HasCancel=*/false);
CGF.EmitStmt(
cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
});
@@ -2082,15 +2077,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
[](CodeGenFunction &) {});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
- SourceLocation ELoc = S.getLocEnd();
- auto &&CodeGen = [ELoc](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, ELoc);
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
};
- CodeGen(*this);
- OpenMPDirectiveKind DKind = S.getDirectiveKind();
- if (DKind == OMPD_for || DKind == OMPD_parallel_for ||
- DKind == OMPD_distribute_parallel_for)
- OMPCancelStack.back().CodeGen = CodeGen;
+ OMPCancelStack.emitExit(*this, S.getDirectiveKind(), CodeGen);
} else {
const bool IsMonotonic =
Ordered || ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
@@ -2140,11 +2130,11 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
+ OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
};
{
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
- OMPCancelStackRAII CancelRegion(*this);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen,
S.hasCancel());
}
@@ -2187,7 +2177,6 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, Stmt, CS, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
- OMPCancelStackRAII CancelRegion(CGF);
auto &C = CGF.CGM.getContext();
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
@@ -2282,12 +2271,10 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
- SourceLocation ELoc = S.getLocEnd();
- auto &&FinalCodeGen = [ELoc](CodeGenFunction &CGF) {
- CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, ELoc);
+ auto &&CodeGen = [&S](CodeGenFunction &CGF) {
+ CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocEnd());
};
- FinalCodeGen(CGF);
- CGF.OMPCancelStack.back().CodeGen = FinalCodeGen;
+ CGF.OMPCancelStack.emitExit(CGF, S.getDirectiveKind(), CodeGen);
CGF.EmitOMPReductionClauseFinal(S);
// Emit post-update of the reduction variables if IsLastIter != 0.
emitPostUpdateForReductionClause(
@@ -2309,6 +2296,7 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
HasCancel = OSD->hasCancel();
else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
HasCancel = OPSD->hasCancel();
+ OMPCancelStackRAII CancelRegion(*this, S.getDirectiveKind(), HasCancel);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
HasCancel);
// Emit barrier for lastprivates only if 'sections' directive has 'nowait'
@@ -2412,7 +2400,7 @@ void CodeGenFunction::EmitOMPParallelForDirective(
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- OMPCancelStackRAII CancelRegion(CGF);
+ OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
CGF.EmitOMPWorksharingLoop(S);
};
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
@@ -3412,14 +3400,14 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
CodeGenFunction::JumpDest
CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
- if (Kind == OMPD_parallel || Kind == OMPD_task)
+ if (Kind == OMPD_parallel || Kind == OMPD_task ||
+ Kind == OMPD_target_parallel)
return ReturnBlock;
assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
- Kind == OMPD_distribute_parallel_for);
- if (!OMPCancelStack.back().ExitBlock.isValid())
- OMPCancelStack.back().ExitBlock = getJumpDestInCurrentScope("cancel.exit");
- return OMPCancelStack.back().ExitBlock;
+ Kind == OMPD_distribute_parallel_for ||
+ Kind == OMPD_target_parallel_for);
+ return OMPCancelStack.getExitBlock();
}
// Generate the instructions for '#pragma omp target data' directive.
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index f61ba69e3a0c..fb19a2657c9c 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -965,33 +965,92 @@ private:
};
SmallVector<BreakContinue, 8> BreakContinueStack;
- /// Data for exit block for proper support of OpenMP cancellation constructs.
- struct OMPCancel {
- JumpDest ExitBlock;
- llvm::function_ref<void(CodeGenFunction &CGF)> CodeGen;
- OMPCancel() : CodeGen([](CodeGenFunction &CGF) {}) {}
+ /// Handles cancellation exit points in OpenMP-related constructs.
+ class OpenMPCancelExitStack {
+ /// Tracks cancellation exit point and join point for cancel-related exit
+ /// and normal exit.
+ struct CancelExit {
+ CancelExit() = default;
+ CancelExit(OpenMPDirectiveKind Kind, JumpDest ExitBlock,
+ JumpDest ContBlock)
+ : Kind(Kind), ExitBlock(ExitBlock), ContBlock(ContBlock) {}
+ OpenMPDirectiveKind Kind = OMPD_unknown;
+ /// true if the exit block has been emitted already by the special
+ /// emitExit() call, false if the default codegen is used.
+ bool HasBeenEmitted = false;
+ JumpDest ExitBlock;
+ JumpDest ContBlock;
+ };
+
+ SmallVector<CancelExit, 8> Stack;
+
+ public:
+ OpenMPCancelExitStack() : Stack(1) {}
+ ~OpenMPCancelExitStack() = default;
+ /// Fetches the exit block for the current OpenMP construct.
+ JumpDest getExitBlock() const { return Stack.back().ExitBlock; }
+ /// Emits exit block with special codegen procedure specific for the related
+ /// OpenMP construct + emits code for normal construct cleanup.
+ void emitExit(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
+ const llvm::function_ref<void(CodeGenFunction &)> &CodeGen) {
+ if (Stack.back().Kind == Kind && getExitBlock().isValid()) {
+ assert(CGF.getOMPCancelDestination(Kind).isValid());
+ assert(CGF.HaveInsertPoint());
+ assert(!Stack.back().HasBeenEmitted);
+ auto IP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
+ CodeGen(CGF);
+ CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
+ CGF.Builder.restoreIP(IP);
+ Stack.back().HasBeenEmitted = true;
+ }
+ CodeGen(CGF);
+ }
+ /// Enter the cancel supporting \a Kind construct.
+ /// \param Kind OpenMP directive that supports cancel constructs.
+ /// \param HasCancel true, if the construct has inner cancel directive,
+ /// false otherwise.
+ void enter(CodeGenFunction &CGF, OpenMPDirectiveKind Kind, bool HasCancel) {
+ Stack.push_back({Kind,
+ HasCancel ? CGF.getJumpDestInCurrentScope("cancel.exit")
+ : JumpDest(),
+ HasCancel ? CGF.getJumpDestInCurrentScope("cancel.cont")
+ : JumpDest()});
+ }
+ /// Emits default exit point for the cancel construct (if the special one
+ /// has not be used) + join point for cancel/normal exits.
+ void exit(CodeGenFunction &CGF) {
+ if (getExitBlock().isValid()) {
+ assert(CGF.getOMPCancelDestination(Stack.back().Kind).isValid());
+ bool HaveIP = CGF.HaveInsertPoint();
+ if (!Stack.back().HasBeenEmitted) {
+ if (HaveIP)
+ CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
+ CGF.EmitBlock(Stack.back().ExitBlock.getBlock());
+ CGF.EmitBranchThroughCleanup(Stack.back().ContBlock);
+ }
+ CGF.EmitBlock(Stack.back().ContBlock.getBlock());
+ if (!HaveIP) {
+ CGF.Builder.CreateUnreachable();
+ CGF.Builder.ClearInsertionPoint();
+ }
+ }
+ Stack.pop_back();
+ }
};
- SmallVector<OMPCancel, 8> OMPCancelStack;
+ OpenMPCancelExitStack OMPCancelStack;
/// Controls insertion of cancellation exit blocks in worksharing constructs.
class OMPCancelStackRAII {
CodeGenFunction &CGF;
public:
- OMPCancelStackRAII(CodeGenFunction &CGF) : CGF(CGF) {
- CGF.OMPCancelStack.push_back({});
- }
- ~OMPCancelStackRAII() {
- if (CGF.HaveInsertPoint() &&
- CGF.OMPCancelStack.back().ExitBlock.isValid()) {
- auto CJD = CGF.getJumpDestInCurrentScope("cancel.cont");
- CGF.EmitBranchThroughCleanup(CJD);
- CGF.EmitBlock(CGF.OMPCancelStack.back().ExitBlock.getBlock());
- CGF.OMPCancelStack.back().CodeGen(CGF);
- CGF.EmitBranchThroughCleanup(CJD);
- CGF.EmitBlock(CJD.getBlock());
- }
+ OMPCancelStackRAII(CodeGenFunction &CGF, OpenMPDirectiveKind Kind,
+ bool HasCancel)
+ : CGF(CGF) {
+ CGF.OMPCancelStack.enter(CGF, Kind, HasCancel);
}
+ ~OMPCancelStackRAII() { CGF.OMPCancelStack.exit(CGF); }
};
CodeGenPGO PGO;
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index 657ef7e30cfa..fa2d2107781b 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -47,6 +47,7 @@ set(files
mmintrin.h
mm_malloc.h
module.modulemap
+ msa.h
mwaitxintrin.h
nmmintrin.h
opencl-c.h
diff --git a/lib/Headers/msa.h b/lib/Headers/msa.h
new file mode 100644
index 000000000000..da680f5ca9ee
--- /dev/null
+++ b/lib/Headers/msa.h
@@ -0,0 +1,583 @@
+/*===---- msa.h - MIPS MSA intrinsics --------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MSA_H
+#define _MSA_H 1
+
+#if defined(__mips_msa)
+typedef signed char v16i8 __attribute__((vector_size(16), aligned(16)));
+typedef signed char v16i8_b __attribute__((vector_size(16), aligned(1)));
+typedef unsigned char v16u8 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned char v16u8_b __attribute__((vector_size(16), aligned(1)));
+typedef short v8i16 __attribute__((vector_size(16), aligned(16)));
+typedef short v8i16_h __attribute__((vector_size(16), aligned(2)));
+typedef unsigned short v8u16 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned short v8u16_h __attribute__((vector_size(16), aligned(2)));
+typedef int v4i32 __attribute__((vector_size(16), aligned(16)));
+typedef int v4i32_w __attribute__((vector_size(16), aligned(4)));
+typedef unsigned int v4u32 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned int v4u32_w __attribute__((vector_size(16), aligned(4)));
+typedef long long v2i64 __attribute__((vector_size(16), aligned(16)));
+typedef long long v2i64_d __attribute__((vector_size(16), aligned(8)));
+typedef unsigned long long v2u64 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned long long v2u64_d __attribute__((vector_size(16), aligned(8)));
+typedef float v4f32 __attribute__((vector_size(16), aligned(16)));
+typedef float v4f32_w __attribute__((vector_size(16), aligned(4)));
+typedef double v2f64 __attribute__ ((vector_size(16), aligned(16)));
+typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8)));
+
+#define __msa_sll_b __builtin_msa_sll_b
+#define __msa_sll_h __builtin_msa_sll_h
+#define __msa_sll_w __builtin_msa_sll_w
+#define __msa_sll_d __builtin_msa_sll_d
+#define __msa_slli_b __builtin_msa_slli_b
+#define __msa_slli_h __builtin_msa_slli_h
+#define __msa_slli_w __builtin_msa_slli_w
+#define __msa_slli_d __builtin_msa_slli_d
+#define __msa_sra_b __builtin_msa_sra_b
+#define __msa_sra_h __builtin_msa_sra_h
+#define __msa_sra_w __builtin_msa_sra_w
+#define __msa_sra_d __builtin_msa_sra_d
+#define __msa_srai_b __builtin_msa_srai_b
+#define __msa_srai_h __builtin_msa_srai_h
+#define __msa_srai_w __builtin_msa_srai_w
+#define __msa_srai_d __builtin_msa_srai_d
+#define __msa_srar_b __builtin_msa_srar_b
+#define __msa_srar_h __builtin_msa_srar_h
+#define __msa_srar_w __builtin_msa_srar_w
+#define __msa_srar_d __builtin_msa_srar_d
+#define __msa_srari_b __builtin_msa_srari_b
+#define __msa_srari_h __builtin_msa_srari_h
+#define __msa_srari_w __builtin_msa_srari_w
+#define __msa_srari_d __builtin_msa_srari_d
+#define __msa_srl_b __builtin_msa_srl_b
+#define __msa_srl_h __builtin_msa_srl_h
+#define __msa_srl_w __builtin_msa_srl_w
+#define __msa_srl_d __builtin_msa_srl_d
+#define __msa_srli_b __builtin_msa_srli_b
+#define __msa_srli_h __builtin_msa_srli_h
+#define __msa_srli_w __builtin_msa_srli_w
+#define __msa_srli_d __builtin_msa_srli_d
+#define __msa_srlr_b __builtin_msa_srlr_b
+#define __msa_srlr_h __builtin_msa_srlr_h
+#define __msa_srlr_w __builtin_msa_srlr_w
+#define __msa_srlr_d __builtin_msa_srlr_d
+#define __msa_srlri_b __builtin_msa_srlri_b
+#define __msa_srlri_h __builtin_msa_srlri_h
+#define __msa_srlri_w __builtin_msa_srlri_w
+#define __msa_srlri_d __builtin_msa_srlri_d
+#define __msa_bclr_b __builtin_msa_bclr_b
+#define __msa_bclr_h __builtin_msa_bclr_h
+#define __msa_bclr_w __builtin_msa_bclr_w
+#define __msa_bclr_d __builtin_msa_bclr_d
+#define __msa_bclri_b __builtin_msa_bclri_b
+#define __msa_bclri_h __builtin_msa_bclri_h
+#define __msa_bclri_w __builtin_msa_bclri_w
+#define __msa_bclri_d __builtin_msa_bclri_d
+#define __msa_bset_b __builtin_msa_bset_b
+#define __msa_bset_h __builtin_msa_bset_h
+#define __msa_bset_w __builtin_msa_bset_w
+#define __msa_bset_d __builtin_msa_bset_d
+#define __msa_bseti_b __builtin_msa_bseti_b
+#define __msa_bseti_h __builtin_msa_bseti_h
+#define __msa_bseti_w __builtin_msa_bseti_w
+#define __msa_bseti_d __builtin_msa_bseti_d
+#define __msa_bneg_b __builtin_msa_bneg_b
+#define __msa_bneg_h __builtin_msa_bneg_h
+#define __msa_bneg_w __builtin_msa_bneg_w
+#define __msa_bneg_d __builtin_msa_bneg_d
+#define __msa_bnegi_b __builtin_msa_bnegi_b
+#define __msa_bnegi_h __builtin_msa_bnegi_h
+#define __msa_bnegi_w __builtin_msa_bnegi_w
+#define __msa_bnegi_d __builtin_msa_bnegi_d
+#define __msa_binsl_b __builtin_msa_binsl_b
+#define __msa_binsl_h __builtin_msa_binsl_h
+#define __msa_binsl_w __builtin_msa_binsl_w
+#define __msa_binsl_d __builtin_msa_binsl_d
+#define __msa_binsli_b __builtin_msa_binsli_b
+#define __msa_binsli_h __builtin_msa_binsli_h
+#define __msa_binsli_w __builtin_msa_binsli_w
+#define __msa_binsli_d __builtin_msa_binsli_d
+#define __msa_binsr_b __builtin_msa_binsr_b
+#define __msa_binsr_h __builtin_msa_binsr_h
+#define __msa_binsr_w __builtin_msa_binsr_w
+#define __msa_binsr_d __builtin_msa_binsr_d
+#define __msa_binsri_b __builtin_msa_binsri_b
+#define __msa_binsri_h __builtin_msa_binsri_h
+#define __msa_binsri_w __builtin_msa_binsri_w
+#define __msa_binsri_d __builtin_msa_binsri_d
+#define __msa_addv_b __builtin_msa_addv_b
+#define __msa_addv_h __builtin_msa_addv_h
+#define __msa_addv_w __builtin_msa_addv_w
+#define __msa_addv_d __builtin_msa_addv_d
+#define __msa_addvi_b __builtin_msa_addvi_b
+#define __msa_addvi_h __builtin_msa_addvi_h
+#define __msa_addvi_w __builtin_msa_addvi_w
+#define __msa_addvi_d __builtin_msa_addvi_d
+#define __msa_subv_b __builtin_msa_subv_b
+#define __msa_subv_h __builtin_msa_subv_h
+#define __msa_subv_w __builtin_msa_subv_w
+#define __msa_subv_d __builtin_msa_subv_d
+#define __msa_subvi_b __builtin_msa_subvi_b
+#define __msa_subvi_h __builtin_msa_subvi_h
+#define __msa_subvi_w __builtin_msa_subvi_w
+#define __msa_subvi_d __builtin_msa_subvi_d
+#define __msa_max_s_b __builtin_msa_max_s_b
+#define __msa_max_s_h __builtin_msa_max_s_h
+#define __msa_max_s_w __builtin_msa_max_s_w
+#define __msa_max_s_d __builtin_msa_max_s_d
+#define __msa_maxi_s_b __builtin_msa_maxi_s_b
+#define __msa_maxi_s_h __builtin_msa_maxi_s_h
+#define __msa_maxi_s_w __builtin_msa_maxi_s_w
+#define __msa_maxi_s_d __builtin_msa_maxi_s_d
+#define __msa_max_u_b __builtin_msa_max_u_b
+#define __msa_max_u_h __builtin_msa_max_u_h
+#define __msa_max_u_w __builtin_msa_max_u_w
+#define __msa_max_u_d __builtin_msa_max_u_d
+#define __msa_maxi_u_b __builtin_msa_maxi_u_b
+#define __msa_maxi_u_h __builtin_msa_maxi_u_h
+#define __msa_maxi_u_w __builtin_msa_maxi_u_w
+#define __msa_maxi_u_d __builtin_msa_maxi_u_d
+#define __msa_min_s_b __builtin_msa_min_s_b
+#define __msa_min_s_h __builtin_msa_min_s_h
+#define __msa_min_s_w __builtin_msa_min_s_w
+#define __msa_min_s_d __builtin_msa_min_s_d
+#define __msa_mini_s_b __builtin_msa_mini_s_b
+#define __msa_mini_s_h __builtin_msa_mini_s_h
+#define __msa_mini_s_w __builtin_msa_mini_s_w
+#define __msa_mini_s_d __builtin_msa_mini_s_d
+#define __msa_min_u_b __builtin_msa_min_u_b
+#define __msa_min_u_h __builtin_msa_min_u_h
+#define __msa_min_u_w __builtin_msa_min_u_w
+#define __msa_min_u_d __builtin_msa_min_u_d
+#define __msa_mini_u_b __builtin_msa_mini_u_b
+#define __msa_mini_u_h __builtin_msa_mini_u_h
+#define __msa_mini_u_w __builtin_msa_mini_u_w
+#define __msa_mini_u_d __builtin_msa_mini_u_d
+#define __msa_max_a_b __builtin_msa_max_a_b
+#define __msa_max_a_h __builtin_msa_max_a_h
+#define __msa_max_a_w __builtin_msa_max_a_w
+#define __msa_max_a_d __builtin_msa_max_a_d
+#define __msa_min_a_b __builtin_msa_min_a_b
+#define __msa_min_a_h __builtin_msa_min_a_h
+#define __msa_min_a_w __builtin_msa_min_a_w
+#define __msa_min_a_d __builtin_msa_min_a_d
+#define __msa_ceq_b __builtin_msa_ceq_b
+#define __msa_ceq_h __builtin_msa_ceq_h
+#define __msa_ceq_w __builtin_msa_ceq_w
+#define __msa_ceq_d __builtin_msa_ceq_d
+#define __msa_ceqi_b __builtin_msa_ceqi_b
+#define __msa_ceqi_h __builtin_msa_ceqi_h
+#define __msa_ceqi_w __builtin_msa_ceqi_w
+#define __msa_ceqi_d __builtin_msa_ceqi_d
+#define __msa_clt_s_b __builtin_msa_clt_s_b
+#define __msa_clt_s_h __builtin_msa_clt_s_h
+#define __msa_clt_s_w __builtin_msa_clt_s_w
+#define __msa_clt_s_d __builtin_msa_clt_s_d
+#define __msa_clti_s_b __builtin_msa_clti_s_b
+#define __msa_clti_s_h __builtin_msa_clti_s_h
+#define __msa_clti_s_w __builtin_msa_clti_s_w
+#define __msa_clti_s_d __builtin_msa_clti_s_d
+#define __msa_clt_u_b __builtin_msa_clt_u_b
+#define __msa_clt_u_h __builtin_msa_clt_u_h
+#define __msa_clt_u_w __builtin_msa_clt_u_w
+#define __msa_clt_u_d __builtin_msa_clt_u_d
+#define __msa_clti_u_b __builtin_msa_clti_u_b
+#define __msa_clti_u_h __builtin_msa_clti_u_h
+#define __msa_clti_u_w __builtin_msa_clti_u_w
+#define __msa_clti_u_d __builtin_msa_clti_u_d
+#define __msa_cle_s_b __builtin_msa_cle_s_b
+#define __msa_cle_s_h __builtin_msa_cle_s_h
+#define __msa_cle_s_w __builtin_msa_cle_s_w
+#define __msa_cle_s_d __builtin_msa_cle_s_d
+#define __msa_clei_s_b __builtin_msa_clei_s_b
+#define __msa_clei_s_h __builtin_msa_clei_s_h
+#define __msa_clei_s_w __builtin_msa_clei_s_w
+#define __msa_clei_s_d __builtin_msa_clei_s_d
+#define __msa_cle_u_b __builtin_msa_cle_u_b
+#define __msa_cle_u_h __builtin_msa_cle_u_h
+#define __msa_cle_u_w __builtin_msa_cle_u_w
+#define __msa_cle_u_d __builtin_msa_cle_u_d
+#define __msa_clei_u_b __builtin_msa_clei_u_b
+#define __msa_clei_u_h __builtin_msa_clei_u_h
+#define __msa_clei_u_w __builtin_msa_clei_u_w
+#define __msa_clei_u_d __builtin_msa_clei_u_d
+#define __msa_ld_b __builtin_msa_ld_b
+#define __msa_ld_h __builtin_msa_ld_h
+#define __msa_ld_w __builtin_msa_ld_w
+#define __msa_ld_d __builtin_msa_ld_d
+#define __msa_st_b __builtin_msa_st_b
+#define __msa_st_h __builtin_msa_st_h
+#define __msa_st_w __builtin_msa_st_w
+#define __msa_st_d __builtin_msa_st_d
+#define __msa_sat_s_b __builtin_msa_sat_s_b
+#define __msa_sat_s_h __builtin_msa_sat_s_h
+#define __msa_sat_s_w __builtin_msa_sat_s_w
+#define __msa_sat_s_d __builtin_msa_sat_s_d
+#define __msa_sat_u_b __builtin_msa_sat_u_b
+#define __msa_sat_u_h __builtin_msa_sat_u_h
+#define __msa_sat_u_w __builtin_msa_sat_u_w
+#define __msa_sat_u_d __builtin_msa_sat_u_d
+#define __msa_add_a_b __builtin_msa_add_a_b
+#define __msa_add_a_h __builtin_msa_add_a_h
+#define __msa_add_a_w __builtin_msa_add_a_w
+#define __msa_add_a_d __builtin_msa_add_a_d
+#define __msa_adds_a_b __builtin_msa_adds_a_b
+#define __msa_adds_a_h __builtin_msa_adds_a_h
+#define __msa_adds_a_w __builtin_msa_adds_a_w
+#define __msa_adds_a_d __builtin_msa_adds_a_d
+#define __msa_adds_s_b __builtin_msa_adds_s_b
+#define __msa_adds_s_h __builtin_msa_adds_s_h
+#define __msa_adds_s_w __builtin_msa_adds_s_w
+#define __msa_adds_s_d __builtin_msa_adds_s_d
+#define __msa_adds_u_b __builtin_msa_adds_u_b
+#define __msa_adds_u_h __builtin_msa_adds_u_h
+#define __msa_adds_u_w __builtin_msa_adds_u_w
+#define __msa_adds_u_d __builtin_msa_adds_u_d
+#define __msa_ave_s_b __builtin_msa_ave_s_b
+#define __msa_ave_s_h __builtin_msa_ave_s_h
+#define __msa_ave_s_w __builtin_msa_ave_s_w
+#define __msa_ave_s_d __builtin_msa_ave_s_d
+#define __msa_ave_u_b __builtin_msa_ave_u_b
+#define __msa_ave_u_h __builtin_msa_ave_u_h
+#define __msa_ave_u_w __builtin_msa_ave_u_w
+#define __msa_ave_u_d __builtin_msa_ave_u_d
+#define __msa_aver_s_b __builtin_msa_aver_s_b
+#define __msa_aver_s_h __builtin_msa_aver_s_h
+#define __msa_aver_s_w __builtin_msa_aver_s_w
+#define __msa_aver_s_d __builtin_msa_aver_s_d
+#define __msa_aver_u_b __builtin_msa_aver_u_b
+#define __msa_aver_u_h __builtin_msa_aver_u_h
+#define __msa_aver_u_w __builtin_msa_aver_u_w
+#define __msa_aver_u_d __builtin_msa_aver_u_d
+#define __msa_subs_s_b __builtin_msa_subs_s_b
+#define __msa_subs_s_h __builtin_msa_subs_s_h
+#define __msa_subs_s_w __builtin_msa_subs_s_w
+#define __msa_subs_s_d __builtin_msa_subs_s_d
+#define __msa_subs_u_b __builtin_msa_subs_u_b
+#define __msa_subs_u_h __builtin_msa_subs_u_h
+#define __msa_subs_u_w __builtin_msa_subs_u_w
+#define __msa_subs_u_d __builtin_msa_subs_u_d
+#define __msa_subsuu_s_b __builtin_msa_subsuu_s_b
+#define __msa_subsuu_s_h __builtin_msa_subsuu_s_h
+#define __msa_subsuu_s_w __builtin_msa_subsuu_s_w
+#define __msa_subsuu_s_d __builtin_msa_subsuu_s_d
+#define __msa_subsus_u_b __builtin_msa_subsus_u_b
+#define __msa_subsus_u_h __builtin_msa_subsus_u_h
+#define __msa_subsus_u_w __builtin_msa_subsus_u_w
+#define __msa_subsus_u_d __builtin_msa_subsus_u_d
+#define __msa_asub_s_b __builtin_msa_asub_s_b
+#define __msa_asub_s_h __builtin_msa_asub_s_h
+#define __msa_asub_s_w __builtin_msa_asub_s_w
+#define __msa_asub_s_d __builtin_msa_asub_s_d
+#define __msa_asub_u_b __builtin_msa_asub_u_b
+#define __msa_asub_u_h __builtin_msa_asub_u_h
+#define __msa_asub_u_w __builtin_msa_asub_u_w
+#define __msa_asub_u_d __builtin_msa_asub_u_d
+#define __msa_mulv_b __builtin_msa_mulv_b
+#define __msa_mulv_h __builtin_msa_mulv_h
+#define __msa_mulv_w __builtin_msa_mulv_w
+#define __msa_mulv_d __builtin_msa_mulv_d
+#define __msa_maddv_b __builtin_msa_maddv_b
+#define __msa_maddv_h __builtin_msa_maddv_h
+#define __msa_maddv_w __builtin_msa_maddv_w
+#define __msa_maddv_d __builtin_msa_maddv_d
+#define __msa_msubv_b __builtin_msa_msubv_b
+#define __msa_msubv_h __builtin_msa_msubv_h
+#define __msa_msubv_w __builtin_msa_msubv_w
+#define __msa_msubv_d __builtin_msa_msubv_d
+#define __msa_div_s_b __builtin_msa_div_s_b
+#define __msa_div_s_h __builtin_msa_div_s_h
+#define __msa_div_s_w __builtin_msa_div_s_w
+#define __msa_div_s_d __builtin_msa_div_s_d
+#define __msa_div_u_b __builtin_msa_div_u_b
+#define __msa_div_u_h __builtin_msa_div_u_h
+#define __msa_div_u_w __builtin_msa_div_u_w
+#define __msa_div_u_d __builtin_msa_div_u_d
+#define __msa_hadd_s_h __builtin_msa_hadd_s_h
+#define __msa_hadd_s_w __builtin_msa_hadd_s_w
+#define __msa_hadd_s_d __builtin_msa_hadd_s_d
+#define __msa_hadd_u_h __builtin_msa_hadd_u_h
+#define __msa_hadd_u_w __builtin_msa_hadd_u_w
+#define __msa_hadd_u_d __builtin_msa_hadd_u_d
+#define __msa_hsub_s_h __builtin_msa_hsub_s_h
+#define __msa_hsub_s_w __builtin_msa_hsub_s_w
+#define __msa_hsub_s_d __builtin_msa_hsub_s_d
+#define __msa_hsub_u_h __builtin_msa_hsub_u_h
+#define __msa_hsub_u_w __builtin_msa_hsub_u_w
+#define __msa_hsub_u_d __builtin_msa_hsub_u_d
+#define __msa_mod_s_b __builtin_msa_mod_s_b
+#define __msa_mod_s_h __builtin_msa_mod_s_h
+#define __msa_mod_s_w __builtin_msa_mod_s_w
+#define __msa_mod_s_d __builtin_msa_mod_s_d
+#define __msa_mod_u_b __builtin_msa_mod_u_b
+#define __msa_mod_u_h __builtin_msa_mod_u_h
+#define __msa_mod_u_w __builtin_msa_mod_u_w
+#define __msa_mod_u_d __builtin_msa_mod_u_d
+#define __msa_dotp_s_h __builtin_msa_dotp_s_h
+#define __msa_dotp_s_w __builtin_msa_dotp_s_w
+#define __msa_dotp_s_d __builtin_msa_dotp_s_d
+#define __msa_dotp_u_h __builtin_msa_dotp_u_h
+#define __msa_dotp_u_w __builtin_msa_dotp_u_w
+#define __msa_dotp_u_d __builtin_msa_dotp_u_d
+#define __msa_dpadd_s_h __builtin_msa_dpadd_s_h
+#define __msa_dpadd_s_w __builtin_msa_dpadd_s_w
+#define __msa_dpadd_s_d __builtin_msa_dpadd_s_d
+#define __msa_dpadd_u_h __builtin_msa_dpadd_u_h
+#define __msa_dpadd_u_w __builtin_msa_dpadd_u_w
+#define __msa_dpadd_u_d __builtin_msa_dpadd_u_d
+#define __msa_dpsub_s_h __builtin_msa_dpsub_s_h
+#define __msa_dpsub_s_w __builtin_msa_dpsub_s_w
+#define __msa_dpsub_s_d __builtin_msa_dpsub_s_d
+#define __msa_dpsub_u_h __builtin_msa_dpsub_u_h
+#define __msa_dpsub_u_w __builtin_msa_dpsub_u_w
+#define __msa_dpsub_u_d __builtin_msa_dpsub_u_d
+#define __msa_sld_b __builtin_msa_sld_b
+#define __msa_sld_h __builtin_msa_sld_h
+#define __msa_sld_w __builtin_msa_sld_w
+#define __msa_sld_d __builtin_msa_sld_d
+#define __msa_sldi_b __builtin_msa_sldi_b
+#define __msa_sldi_h __builtin_msa_sldi_h
+#define __msa_sldi_w __builtin_msa_sldi_w
+#define __msa_sldi_d __builtin_msa_sldi_d
+#define __msa_splat_b __builtin_msa_splat_b
+#define __msa_splat_h __builtin_msa_splat_h
+#define __msa_splat_w __builtin_msa_splat_w
+#define __msa_splat_d __builtin_msa_splat_d
+#define __msa_splati_b __builtin_msa_splati_b
+#define __msa_splati_h __builtin_msa_splati_h
+#define __msa_splati_w __builtin_msa_splati_w
+#define __msa_splati_d __builtin_msa_splati_d
+#define __msa_pckev_b __builtin_msa_pckev_b
+#define __msa_pckev_h __builtin_msa_pckev_h
+#define __msa_pckev_w __builtin_msa_pckev_w
+#define __msa_pckev_d __builtin_msa_pckev_d
+#define __msa_pckod_b __builtin_msa_pckod_b
+#define __msa_pckod_h __builtin_msa_pckod_h
+#define __msa_pckod_w __builtin_msa_pckod_w
+#define __msa_pckod_d __builtin_msa_pckod_d
+#define __msa_ilvl_b __builtin_msa_ilvl_b
+#define __msa_ilvl_h __builtin_msa_ilvl_h
+#define __msa_ilvl_w __builtin_msa_ilvl_w
+#define __msa_ilvl_d __builtin_msa_ilvl_d
+#define __msa_ilvr_b __builtin_msa_ilvr_b
+#define __msa_ilvr_h __builtin_msa_ilvr_h
+#define __msa_ilvr_w __builtin_msa_ilvr_w
+#define __msa_ilvr_d __builtin_msa_ilvr_d
+#define __msa_ilvev_b __builtin_msa_ilvev_b
+#define __msa_ilvev_h __builtin_msa_ilvev_h
+#define __msa_ilvev_w __builtin_msa_ilvev_w
+#define __msa_ilvev_d __builtin_msa_ilvev_d
+#define __msa_ilvod_b __builtin_msa_ilvod_b
+#define __msa_ilvod_h __builtin_msa_ilvod_h
+#define __msa_ilvod_w __builtin_msa_ilvod_w
+#define __msa_ilvod_d __builtin_msa_ilvod_d
+#define __msa_vshf_b __builtin_msa_vshf_b
+#define __msa_vshf_h __builtin_msa_vshf_h
+#define __msa_vshf_w __builtin_msa_vshf_w
+#define __msa_vshf_d __builtin_msa_vshf_d
+#define __msa_and_v __builtin_msa_and_v
+#define __msa_andi_b __builtin_msa_andi_b
+#define __msa_or_v __builtin_msa_or_v
+#define __msa_ori_b __builtin_msa_ori_b
+#define __msa_nor_v __builtin_msa_nor_v
+#define __msa_nori_b __builtin_msa_nori_b
+#define __msa_xor_v __builtin_msa_xor_v
+#define __msa_xori_b __builtin_msa_xori_b
+#define __msa_bmnz_v __builtin_msa_bmnz_v
+#define __msa_bmnzi_b __builtin_msa_bmnzi_b
+#define __msa_bmz_v __builtin_msa_bmz_v
+#define __msa_bmzi_b __builtin_msa_bmzi_b
+#define __msa_bsel_v __builtin_msa_bsel_v
+#define __msa_bseli_b __builtin_msa_bseli_b
+#define __msa_shf_b __builtin_msa_shf_b
+#define __msa_shf_h __builtin_msa_shf_h
+#define __msa_shf_w __builtin_msa_shf_w
+#define __msa_test_bnz_v __builtin_msa_bnz_v
+#define __msa_test_bz_v __builtin_msa_bz_v
+#define __msa_fill_b __builtin_msa_fill_b
+#define __msa_fill_h __builtin_msa_fill_h
+#define __msa_fill_w __builtin_msa_fill_w
+#define __msa_fill_d __builtin_msa_fill_d
+#define __msa_pcnt_b __builtin_msa_pcnt_b
+#define __msa_pcnt_h __builtin_msa_pcnt_h
+#define __msa_pcnt_w __builtin_msa_pcnt_w
+#define __msa_pcnt_d __builtin_msa_pcnt_d
+#define __msa_nloc_b __builtin_msa_nloc_b
+#define __msa_nloc_h __builtin_msa_nloc_h
+#define __msa_nloc_w __builtin_msa_nloc_w
+#define __msa_nloc_d __builtin_msa_nloc_d
+#define __msa_nlzc_b __builtin_msa_nlzc_b
+#define __msa_nlzc_h __builtin_msa_nlzc_h
+#define __msa_nlzc_w __builtin_msa_nlzc_w
+#define __msa_nlzc_d __builtin_msa_nlzc_d
+#define __msa_copy_s_b __builtin_msa_copy_s_b
+#define __msa_copy_s_h __builtin_msa_copy_s_h
+#define __msa_copy_s_w __builtin_msa_copy_s_w
+#define __msa_copy_s_d __builtin_msa_copy_s_d
+#define __msa_copy_u_b __builtin_msa_copy_u_b
+#define __msa_copy_u_h __builtin_msa_copy_u_h
+#define __msa_copy_u_w __builtin_msa_copy_u_w
+#define __msa_copy_u_d __builtin_msa_copy_u_d
+#define __msa_insert_b __builtin_msa_insert_b
+#define __msa_insert_h __builtin_msa_insert_h
+#define __msa_insert_w __builtin_msa_insert_w
+#define __msa_insert_d __builtin_msa_insert_d
+#define __msa_insve_b __builtin_msa_insve_b
+#define __msa_insve_h __builtin_msa_insve_h
+#define __msa_insve_w __builtin_msa_insve_w
+#define __msa_insve_d __builtin_msa_insve_d
+#define __msa_test_bnz_b __builtin_msa_bnz_b
+#define __msa_test_bnz_h __builtin_msa_bnz_h
+#define __msa_test_bnz_w __builtin_msa_bnz_w
+#define __msa_test_bnz_d __builtin_msa_bnz_d
+#define __msa_test_bz_b __builtin_msa_bz_b
+#define __msa_test_bz_h __builtin_msa_bz_h
+#define __msa_test_bz_w __builtin_msa_bz_w
+#define __msa_test_bz_d __builtin_msa_bz_d
+#define __msa_ldi_b __builtin_msa_ldi_b
+#define __msa_ldi_h __builtin_msa_ldi_h
+#define __msa_ldi_w __builtin_msa_ldi_w
+#define __msa_ldi_d __builtin_msa_ldi_d
+#define __msa_fcaf_w __builtin_msa_fcaf_w
+#define __msa_fcaf_d __builtin_msa_fcaf_d
+#define __msa_fcor_w __builtin_msa_fcor_w
+#define __msa_fcor_d __builtin_msa_fcor_d
+#define __msa_fcun_w __builtin_msa_fcun_w
+#define __msa_fcun_d __builtin_msa_fcun_d
+#define __msa_fcune_w __builtin_msa_fcune_w
+#define __msa_fcune_d __builtin_msa_fcune_d
+#define __msa_fcueq_w __builtin_msa_fcueq_w
+#define __msa_fcueq_d __builtin_msa_fcueq_d
+#define __msa_fceq_w __builtin_msa_fceq_w
+#define __msa_fceq_d __builtin_msa_fceq_d
+#define __msa_fcne_w __builtin_msa_fcne_w
+#define __msa_fcne_d __builtin_msa_fcne_d
+#define __msa_fclt_w __builtin_msa_fclt_w
+#define __msa_fclt_d __builtin_msa_fclt_d
+#define __msa_fcult_w __builtin_msa_fcult_w
+#define __msa_fcult_d __builtin_msa_fcult_d
+#define __msa_fcle_w __builtin_msa_fcle_w
+#define __msa_fcle_d __builtin_msa_fcle_d
+#define __msa_fcule_w __builtin_msa_fcule_w
+#define __msa_fcule_d __builtin_msa_fcule_d
+#define __msa_fsaf_w __builtin_msa_fsaf_w
+#define __msa_fsaf_d __builtin_msa_fsaf_d
+#define __msa_fsor_w __builtin_msa_fsor_w
+#define __msa_fsor_d __builtin_msa_fsor_d
+#define __msa_fsun_w __builtin_msa_fsun_w
+#define __msa_fsun_d __builtin_msa_fsun_d
+#define __msa_fsune_w __builtin_msa_fsune_w
+#define __msa_fsune_d __builtin_msa_fsune_d
+#define __msa_fsueq_w __builtin_msa_fsueq_w
+#define __msa_fsueq_d __builtin_msa_fsueq_d
+#define __msa_fseq_w __builtin_msa_fseq_w
+#define __msa_fseq_d __builtin_msa_fseq_d
+#define __msa_fsne_w __builtin_msa_fsne_w
+#define __msa_fsne_d __builtin_msa_fsne_d
+#define __msa_fslt_w __builtin_msa_fslt_w
+#define __msa_fslt_d __builtin_msa_fslt_d
+#define __msa_fsult_w __builtin_msa_fsult_w
+#define __msa_fsult_d __builtin_msa_fsult_d
+#define __msa_fsle_w __builtin_msa_fsle_w
+#define __msa_fsle_d __builtin_msa_fsle_d
+#define __msa_fsule_w __builtin_msa_fsule_w
+#define __msa_fsule_d __builtin_msa_fsule_d
+#define __msa_fadd_w __builtin_msa_fadd_w
+#define __msa_fadd_d __builtin_msa_fadd_d
+#define __msa_fsub_w __builtin_msa_fsub_w
+#define __msa_fsub_d __builtin_msa_fsub_d
+#define __msa_fmul_w __builtin_msa_fmul_w
+#define __msa_fmul_d __builtin_msa_fmul_d
+#define __msa_fdiv_w __builtin_msa_fdiv_w
+#define __msa_fdiv_d __builtin_msa_fdiv_d
+#define __msa_fmadd_w __builtin_msa_fmadd_w
+#define __msa_fmadd_d __builtin_msa_fmadd_d
+#define __msa_fmsub_w __builtin_msa_fmsub_w
+#define __msa_fmsub_d __builtin_msa_fmsub_d
+#define __msa_fexp2_w __builtin_msa_fexp2_w
+#define __msa_fexp2_d __builtin_msa_fexp2_d
+#define __msa_fexdo_h __builtin_msa_fexdo_h
+#define __msa_fexdo_w __builtin_msa_fexdo_w
+#define __msa_ftq_h __builtin_msa_ftq_h
+#define __msa_ftq_w __builtin_msa_ftq_w
+#define __msa_fmin_w __builtin_msa_fmin_w
+#define __msa_fmin_d __builtin_msa_fmin_d
+#define __msa_fmin_a_w __builtin_msa_fmin_a_w
+#define __msa_fmin_a_d __builtin_msa_fmin_a_d
+#define __msa_fmax_w __builtin_msa_fmax_w
+#define __msa_fmax_d __builtin_msa_fmax_d
+#define __msa_fmax_a_w __builtin_msa_fmax_a_w
+#define __msa_fmax_a_d __builtin_msa_fmax_a_d
+#define __msa_mul_q_h __builtin_msa_mul_q_h
+#define __msa_mul_q_w __builtin_msa_mul_q_w
+#define __msa_mulr_q_h __builtin_msa_mulr_q_h
+#define __msa_mulr_q_w __builtin_msa_mulr_q_w
+#define __msa_madd_q_h __builtin_msa_madd_q_h
+#define __msa_madd_q_w __builtin_msa_madd_q_w
+#define __msa_maddr_q_h __builtin_msa_maddr_q_h
+#define __msa_maddr_q_w __builtin_msa_maddr_q_w
+#define __msa_msub_q_h __builtin_msa_msub_q_h
+#define __msa_msub_q_w __builtin_msa_msub_q_w
+#define __msa_msubr_q_h __builtin_msa_msubr_q_h
+#define __msa_msubr_q_w __builtin_msa_msubr_q_w
+#define __msa_fclass_w __builtin_msa_fclass_w
+#define __msa_fclass_d __builtin_msa_fclass_d
+#define __msa_fsqrt_w __builtin_msa_fsqrt_w
+#define __msa_fsqrt_d __builtin_msa_fsqrt_d
+#define __msa_frcp_w __builtin_msa_frcp_w
+#define __msa_frcp_d __builtin_msa_frcp_d
+#define __msa_frint_w __builtin_msa_frint_w
+#define __msa_frint_d __builtin_msa_frint_d
+#define __msa_frsqrt_w __builtin_msa_frsqrt_w
+#define __msa_frsqrt_d __builtin_msa_frsqrt_d
+#define __msa_flog2_w __builtin_msa_flog2_w
+#define __msa_flog2_d __builtin_msa_flog2_d
+#define __msa_fexupl_w __builtin_msa_fexupl_w
+#define __msa_fexupl_d __builtin_msa_fexupl_d
+#define __msa_fexupr_w __builtin_msa_fexupr_w
+#define __msa_fexupr_d __builtin_msa_fexupr_d
+#define __msa_ffql_w __builtin_msa_ffql_w
+#define __msa_ffql_d __builtin_msa_ffql_d
+#define __msa_ffqr_w __builtin_msa_ffqr_w
+#define __msa_ffqr_d __builtin_msa_ffqr_d
+#define __msa_ftint_s_w __builtin_msa_ftint_s_w
+#define __msa_ftint_s_d __builtin_msa_ftint_s_d
+#define __msa_ftint_u_w __builtin_msa_ftint_u_w
+#define __msa_ftint_u_d __builtin_msa_ftint_u_d
+#define __msa_ftrunc_s_w __builtin_msa_ftrunc_s_w
+#define __msa_ftrunc_s_d __builtin_msa_ftrunc_s_d
+#define __msa_ftrunc_u_w __builtin_msa_ftrunc_u_w
+#define __msa_ftrunc_u_d __builtin_msa_ftrunc_u_d
+#define __msa_ffint_s_w __builtin_msa_ffint_s_w
+#define __msa_ffint_s_d __builtin_msa_ffint_s_d
+#define __msa_ffint_u_w __builtin_msa_ffint_u_w
+#define __msa_ffint_u_d __builtin_msa_ffint_u_d
+#define __msa_cfcmsa __builtin_msa_cfcmsa
+#define __msa_move_v __builtin_msa_move_v
+#define __msa_cast_to_vector_float __builtin_msa_cast_to_vector_float
+#define __msa_cast_to_vector_double __builtin_msa_cast_to_vector_double
+#define __msa_cast_to_scalar_float __builtin_msa_cast_to_scalar_float
+#define __msa_cast_to_scalar_double __builtin_msa_cast_to_scalar_double
+#endif /* defined(__mips_msa) */
+#endif /* _MSA_H */
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index b8e7ede2716c..7f7dbe8873d4 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -1454,8 +1454,17 @@ bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, i, l, u + l);
}
+// CheckMipsBuiltinFunctionCall - Checks the constant value passed to the
+// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
+// ordering for DSP is unspecified. MSA is ordered by the data format used
+// by the underlying instruction i.e., df/m, df/n and then by size.
+//
+// FIXME: The size tests here should instead be tablegen'd along with the
+// definitions from include/clang/Basic/BuiltinsMips.def.
+// FIXME: GCC is strict on signedness for some of these intrinsics, we should
+// be too.
bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- unsigned i = 0, l = 0, u = 0;
+ unsigned i = 0, l = 0, u = 0, m = 0;
switch (BuiltinID) {
default: return false;
case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
@@ -1465,9 +1474,168 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
- }
-
- return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+ // MSA instrinsics. Instructions (which the intrinsics maps to) which use the
+ // df/m field.
+ // These intrinsics take an unsigned 3 bit immediate.
+ case Mips::BI__builtin_msa_bclri_b:
+ case Mips::BI__builtin_msa_bnegi_b:
+ case Mips::BI__builtin_msa_bseti_b:
+ case Mips::BI__builtin_msa_sat_s_b:
+ case Mips::BI__builtin_msa_sat_u_b:
+ case Mips::BI__builtin_msa_slli_b:
+ case Mips::BI__builtin_msa_srai_b:
+ case Mips::BI__builtin_msa_srari_b:
+ case Mips::BI__builtin_msa_srli_b:
+ case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
+ case Mips::BI__builtin_msa_binsli_b:
+ case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
+ // These intrinsics take an unsigned 4 bit immediate.
+ case Mips::BI__builtin_msa_bclri_h:
+ case Mips::BI__builtin_msa_bnegi_h:
+ case Mips::BI__builtin_msa_bseti_h:
+ case Mips::BI__builtin_msa_sat_s_h:
+ case Mips::BI__builtin_msa_sat_u_h:
+ case Mips::BI__builtin_msa_slli_h:
+ case Mips::BI__builtin_msa_srai_h:
+ case Mips::BI__builtin_msa_srari_h:
+ case Mips::BI__builtin_msa_srli_h:
+ case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
+ case Mips::BI__builtin_msa_binsli_h:
+ case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
+ // These intrinsics take an unsigned 5 bit immedate.
+ // The first block of intrinsics actually have an unsigned 5 bit field,
+ // not a df/n field.
+ case Mips::BI__builtin_msa_clei_u_b:
+ case Mips::BI__builtin_msa_clei_u_h:
+ case Mips::BI__builtin_msa_clei_u_w:
+ case Mips::BI__builtin_msa_clei_u_d:
+ case Mips::BI__builtin_msa_clti_u_b:
+ case Mips::BI__builtin_msa_clti_u_h:
+ case Mips::BI__builtin_msa_clti_u_w:
+ case Mips::BI__builtin_msa_clti_u_d:
+ case Mips::BI__builtin_msa_maxi_u_b:
+ case Mips::BI__builtin_msa_maxi_u_h:
+ case Mips::BI__builtin_msa_maxi_u_w:
+ case Mips::BI__builtin_msa_maxi_u_d:
+ case Mips::BI__builtin_msa_mini_u_b:
+ case Mips::BI__builtin_msa_mini_u_h:
+ case Mips::BI__builtin_msa_mini_u_w:
+ case Mips::BI__builtin_msa_mini_u_d:
+ case Mips::BI__builtin_msa_addvi_b:
+ case Mips::BI__builtin_msa_addvi_h:
+ case Mips::BI__builtin_msa_addvi_w:
+ case Mips::BI__builtin_msa_addvi_d:
+ case Mips::BI__builtin_msa_bclri_w:
+ case Mips::BI__builtin_msa_bnegi_w:
+ case Mips::BI__builtin_msa_bseti_w:
+ case Mips::BI__builtin_msa_sat_s_w:
+ case Mips::BI__builtin_msa_sat_u_w:
+ case Mips::BI__builtin_msa_slli_w:
+ case Mips::BI__builtin_msa_srai_w:
+ case Mips::BI__builtin_msa_srari_w:
+ case Mips::BI__builtin_msa_srli_w:
+ case Mips::BI__builtin_msa_srlri_w:
+ case Mips::BI__builtin_msa_subvi_b:
+ case Mips::BI__builtin_msa_subvi_h:
+ case Mips::BI__builtin_msa_subvi_w:
+ case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
+ case Mips::BI__builtin_msa_binsli_w:
+ case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
+ // These intrinsics take an unsigned 6 bit immediate.
+ case Mips::BI__builtin_msa_bclri_d:
+ case Mips::BI__builtin_msa_bnegi_d:
+ case Mips::BI__builtin_msa_bseti_d:
+ case Mips::BI__builtin_msa_sat_s_d:
+ case Mips::BI__builtin_msa_sat_u_d:
+ case Mips::BI__builtin_msa_slli_d:
+ case Mips::BI__builtin_msa_srai_d:
+ case Mips::BI__builtin_msa_srari_d:
+ case Mips::BI__builtin_msa_srli_d:
+ case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
+ case Mips::BI__builtin_msa_binsli_d:
+ case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
+ // These intrinsics take a signed 5 bit immediate.
+ case Mips::BI__builtin_msa_ceqi_b:
+ case Mips::BI__builtin_msa_ceqi_h:
+ case Mips::BI__builtin_msa_ceqi_w:
+ case Mips::BI__builtin_msa_ceqi_d:
+ case Mips::BI__builtin_msa_clti_s_b:
+ case Mips::BI__builtin_msa_clti_s_h:
+ case Mips::BI__builtin_msa_clti_s_w:
+ case Mips::BI__builtin_msa_clti_s_d:
+ case Mips::BI__builtin_msa_clei_s_b:
+ case Mips::BI__builtin_msa_clei_s_h:
+ case Mips::BI__builtin_msa_clei_s_w:
+ case Mips::BI__builtin_msa_clei_s_d:
+ case Mips::BI__builtin_msa_maxi_s_b:
+ case Mips::BI__builtin_msa_maxi_s_h:
+ case Mips::BI__builtin_msa_maxi_s_w:
+ case Mips::BI__builtin_msa_maxi_s_d:
+ case Mips::BI__builtin_msa_mini_s_b:
+ case Mips::BI__builtin_msa_mini_s_h:
+ case Mips::BI__builtin_msa_mini_s_w:
+ case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
+ // These intrinsics take an unsigned 8 bit immediate.
+ case Mips::BI__builtin_msa_andi_b:
+ case Mips::BI__builtin_msa_nori_b:
+ case Mips::BI__builtin_msa_ori_b:
+ case Mips::BI__builtin_msa_shf_b:
+ case Mips::BI__builtin_msa_shf_h:
+ case Mips::BI__builtin_msa_shf_w:
+ case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
+ case Mips::BI__builtin_msa_bseli_b:
+ case Mips::BI__builtin_msa_bmnzi_b:
+ case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
+ // df/n format
+ // These intrinsics take an unsigned 4 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_b:
+ case Mips::BI__builtin_msa_copy_u_b:
+ case Mips::BI__builtin_msa_insve_b:
+ case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
+ case Mips::BI__builtin_msa_sld_b:
+ case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
+ // These intrinsics take an unsigned 3 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_h:
+ case Mips::BI__builtin_msa_copy_u_h:
+ case Mips::BI__builtin_msa_insve_h:
+ case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
+ case Mips::BI__builtin_msa_sld_h:
+ case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
+ // These intrinsics take an unsigned 2 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_w:
+ case Mips::BI__builtin_msa_copy_u_w:
+ case Mips::BI__builtin_msa_insve_w:
+ case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
+ case Mips::BI__builtin_msa_sld_w:
+ case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
+ // These intrinsics take an unsigned 1 bit immediate.
+ case Mips::BI__builtin_msa_copy_s_d:
+ case Mips::BI__builtin_msa_copy_u_d:
+ case Mips::BI__builtin_msa_insve_d:
+ case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
+ case Mips::BI__builtin_msa_sld_d:
+ case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
+ // Memory offsets and immediate loads.
+ // These intrinsics take a signed 10 bit immediate.
+ case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 127; break;
+ case Mips::BI__builtin_msa_ldi_h:
+ case Mips::BI__builtin_msa_ldi_w:
+ case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
+ case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 16; break;
+ case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 16; break;
+ case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 16; break;
+ case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 16; break;
+ case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 16; break;
+ case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 16; break;
+ case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 16; break;
+ case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 16; break;
+ }
+
+ if (!m)
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u);
+
+ return SemaBuiltinConstantArgRange(TheCall, i, l, u) ||
+ SemaBuiltinConstantArgMultiple(TheCall, i, m);
}
bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
@@ -3605,6 +3773,28 @@ bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
return false;
}
+/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
+/// TheCall is a constant expression is a multiple of Num..
+bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
+ unsigned Num) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ if (Result.getSExtValue() % Num != 0)
+ return Diag(TheCall->getLocStart(), diag::err_argument_not_multiple)
+ << Num << Arg->getSourceRange();
+
+ return false;
+}
+
/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
/// TheCall is an ARM/AArch64 special register string literal.
bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index 56f4019bfbb6..dfdd36752bf6 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -4221,9 +4221,12 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// A template constructor is never a copy constructor.
// FIXME: However, it may actually be selected at the actual overload
// resolution point.
- if (isa<FunctionTemplateDecl>(ND))
+ if (isa<FunctionTemplateDecl>(ND->getUnderlyingDecl()))
continue;
- const CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(ND);
+ // UsingDecl itself is not a constructor
+ if (isa<UsingDecl>(ND))
+ continue;
+ auto *Constructor = cast<CXXConstructorDecl>(ND->getUnderlyingDecl());
if (Constructor->isCopyConstructor(FoundTQs)) {
FoundConstructor = true;
const FunctionProtoType *CPT
@@ -4257,9 +4260,12 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
bool FoundConstructor = false;
for (const auto *ND : Self.LookupConstructors(RD)) {
// FIXME: In C++0x, a constructor template can be a default constructor.
- if (isa<FunctionTemplateDecl>(ND))
+ if (isa<FunctionTemplateDecl>(ND->getUnderlyingDecl()))
+ continue;
+ // UsingDecl itself is not a constructor
+ if (isa<UsingDecl>(ND))
continue;
- const CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(ND);
+ auto *Constructor = cast<CXXConstructorDecl>(ND->getUnderlyingDecl());
if (Constructor->isDefaultConstructor()) {
FoundConstructor = true;
const FunctionProtoType *CPT
diff --git a/test/CodeGen/builtins-mips-msa-error.c b/test/CodeGen/builtins-mips-msa-error.c
new file mode 100644
index 000000000000..fcdf6f0c48c8
--- /dev/null
+++ b/test/CodeGen/builtins-mips-msa-error.c
@@ -0,0 +1,421 @@
+// REQUIRES: mips-registered-target
+// RUN: not %clang_cc1 -triple mips-unknown-linux-gnu -fsyntax-only %s \
+// RUN: -target-feature +msa -target-feature +fp64 \
+// RUN: -mfloat-abi hard -o - 2>&1 | FileCheck %s
+
+#include <msa.h>
+
+void test(void) {
+ v16i8 v16i8_a = (v16i8) {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ v16i8 v16i8_r;
+ v8i16 v8i16_a = (v8i16) {0, 1, 2, 3, 4, 5, 6, 7};
+ v8i16 v8i16_r;
+ v4i32 v4i32_a = (v4i32) {0, 1, 2, 3};
+ v4i32 v4i32_r;
+ v2i64 v2i64_a = (v2i64) {0, 1};
+ v2i64 v2i64_r;
+
+ v16u8 v16u8_a = (v16u8) {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+ v16u8 v16u8_r;
+ v8u16 v8u16_a = (v8u16) {0, 1, 2, 3, 4, 5, 6, 7};
+ v8u16 v8u16_r;
+ v4u32 v4u32_a = (v4u32) {0, 1, 2, 3};
+ v4u32 v4u32_r;
+ v2u64 v2u64_a = (v2u64) {0, 1};
+ v2u64 v2u64_r;
+
+
+ int int_r;
+ long long ll_r;
+
+ v16u8_r = __msa_addvi_b(v16u8_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_addvi_h(v8u16_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_addvi_w(v4u32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_addvi_d(v2u64_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_andi_b(v16i8_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8i16_r = __msa_andi_b(v8i16_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4i32_r = __msa_andi_b(v4i32_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v2i64_r = __msa_andi_b(v2i64_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bclri_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_bclri_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_bclri_w(v4i32_a, 33); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_bclri_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_binsli_b(v16i8_r, v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_binsli_h(v8i16_r, v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_binsli_w(v4i32_r, v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_binsli_d(v2i64_r, v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_binsri_b(v16i8_r, v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_binsri_h(v8i16_r, v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_binsri_w(v4i32_r, v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_binsri_d(v2i64_r, v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_bmnzi_b(v16i8_r, v16i8_a, 256); // expected-error {{argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bmzi_b(v16i8_r, v16i8_a, 256); // expected-error {{argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bnegi_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_bnegi_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_bnegi_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_bnegi_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_bseli_b(v16i8_r, v16i8_a, 256); // expected-error {{argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bseti_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_bseti_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_bseti_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_bseti_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_ceqi_b(v16i8_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_ceqi_h(v8i16_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_ceqi_w(v4i32_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_ceqi_d(v2i64_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16i8_r = __msa_clei_s_b(v16i8_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_clei_s_h(v8i16_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_clei_s_w(v4i32_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_clei_s_d(v2i64_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_clei_u_b(v16u8_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_clei_u_h(v8u16_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_clei_u_w(v4u32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_clei_u_d(v2u64_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_clti_s_b(v16i8_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_clti_s_h(v8i16_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_clti_s_w(v4i32_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_clti_s_d(v2i64_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_clti_u_b(v16u8_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_clti_u_h(v8u16_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_clti_u_w(v4u32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_clti_u_d(v2u64_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+
+ int_r = __msa_copy_s_b(v16i8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ int_r = __msa_copy_s_h(v8i16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ int_r = __msa_copy_s_w(v4i32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
+ ll_r = __msa_copy_s_d(v2i64_a, 2); // expected-error {{argument should be a value from 0 to 1}}
+
+ int_r = __msa_copy_u_b(v16u8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ int_r = __msa_copy_u_h(v8u16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ int_r = __msa_copy_u_w(v4u32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
+ ll_r = __msa_copy_u_d(v2i64_a, 2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_insve_b(v16i8_r, 16, v16i8_a); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_insve_h(v8i16_r, 8, v8i16_a); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_insve_w(v4i32_r, 4, v4i32_a); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_insve_d(v2i64_r, 2, v2i64_a); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_ld_b(&v16i8_a, 23); // expected-error {{argument should be a multiple of 16}}
+ v8i16_r = __msa_ld_h(&v8i16_a, 77); // expected-error {{argument should be a multiple of 16}}
+ v4i32_r = __msa_ld_w(&v4i32_a, 14); // expected-error {{argument should be a multiple of 16}}
+ v2i64_r = __msa_ld_d(&v2i64_a, 23); // expected-error {{argument should be a multiple of 16}}
+
+ v16i8_r = __msa_ld_b(&v16i8_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+ v8i16_r = __msa_ld_h(&v8i16_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+ v4i32_r = __msa_ld_w(&v4i32_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+ v2i64_r = __msa_ld_d(&v2i64_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+
+ v16i8_r = __msa_ldi_b(512); // expected-error {{argument should be a value from -512 to 511}}
+ v8i16_r = __msa_ldi_h(512); // expected-error {{argument should be a value from -512 to 511}}
+ v4i32_r = __msa_ldi_w(512); // expected-error {{argument should be a value from -512 to 511}}
+ v2i64_r = __msa_ldi_d(512); // expected-error {{argument should be a value from -512 to 511}}
+
+ v16i8_r = __msa_maxi_s_b(v16i8_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_maxi_s_h(v8i16_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_maxi_s_w(v4i32_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_maxi_s_d(v2i64_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_maxi_u_b(v16u8_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_maxi_u_h(v8u16_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_maxi_u_w(v4u32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_maxi_u_d(v2u64_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_mini_s_b(v16i8_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_mini_s_h(v8i16_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_mini_s_w(v4i32_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_mini_s_d(v2i64_a, 16); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_mini_u_b(v16u8_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_mini_u_h(v8u16_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_mini_u_w(v4u32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_mini_u_d(v2u64_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_nori_b(v16i8_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_ori_b(v16i8_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_sat_s_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_sat_s_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_sat_s_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_sat_s_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_sat_u_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_sat_u_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_sat_u_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_sat_u_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_shf_b(v16i8_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8i16_r = __msa_shf_h(v8i16_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4i32_r = __msa_shf_w(v4i32_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_sld_b(v16i8_r, v16i8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_sld_h(v8i16_r, v8i16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, 2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_sldi_b(v16i8_r, v16i8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_sldi_h(v8i16_r, v8i16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_sldi_w(v4i32_r, v4i32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_sldi_d(v2i64_r, v2i64_a, 2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_slli_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_slli_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_slli_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_slli_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_splati_b(v16i8_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_splati_h(v8i16_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_splati_w(v4i32_a, 4); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_splati_d(v2i64_a, 2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_srai_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srai_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srai_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srai_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_srari_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srari_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srari_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srari_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_srli_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srli_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srli_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srli_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_srlri_b(v16i8_a, 8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srlri_h(v8i16_a, 16); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srlri_w(v4i32_a, 32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srlri_d(v2i64_a, 64); // expected-error {{argument should be a value from 0 to 63}}
+
+ __msa_st_b(v16i8_b, &v16i8_a, 52); // expected-error {{argument should be a multiple of 16}}
+ __msa_st_h(v8i16_b, &v8i16_a, 51); // expected-error {{argument should be a multiple of 16}}
+ __msa_st_w(v4i32_b, &v4i32_a, 51); // expected-error {{argument should be a multiple of 16}}
+ __msa_st_d(v2i64_b, &v2i64_a, 12); // expected-error {{argument should be a multiple of 16}}
+
+ __msa_st_b(v16i8_b, &v16i8_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+ __msa_st_h(v8i16_b, &v8i16_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+ __msa_st_w(v4i32_b, &v4i32_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+ __msa_st_d(v2i64_b, &v2i64_a, 512); // expected-error {{argument should be a value from -512 to 511}}
+
+ v16i8_r = __msa_subvi_b(v16i8_a, 256); // expected-error {{argument should be a value from 0 to 31}}
+ v8i16_r = __msa_subvi_h(v8i16_a, 256); // expected-error {{argument should be a value from 0 to 31}}
+ v4i32_r = __msa_subvi_w(v4i32_a, 256); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_subvi_d(v2i64_a, 256); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_xori_b(v16i8_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8i16_r = __msa_xori_b(v8i16_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4i32_r = __msa_xori_b(v4i32_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v2i64_r = __msa_xori_b(v2i64_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16u8_r = __msa_xori_b(v16u8_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8u16_r = __msa_xori_b(v8u16_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4u32_r = __msa_xori_b(v4u32_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+ v2u64_r = __msa_xori_b(v2u64_a, 256); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ // Test the lower bounds
+
+ v16u8_r = __msa_addvi_b(v16u8_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_addvi_h(v8u16_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_addvi_w(v4u32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_addvi_d(v2u64_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_andi_b(v16i8_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8i16_r = __msa_andi_b(v8i16_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4i32_r = __msa_andi_b(v4i32_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v2i64_r = __msa_andi_b(v2i64_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bclri_b(v16i8_a, -1); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_bclri_h(v8i16_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_bclri_w(v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_bclri_d(v2i64_a, -1); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_binsli_b(v16i8_r, v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_binsli_h(v8i16_r, v8i16_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_binsli_w(v4i32_r, v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_binsli_d(v2i64_r, v2i64_a, -1); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_binsri_b(v16i8_r, v16i8_a, -1); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_binsri_h(v8i16_r, v8i16_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_binsri_w(v4i32_r, v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_binsri_d(v2i64_r, v2i64_a, -1); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_bmnzi_b(v16i8_r, v16i8_a, -1); // expected-error {{argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bmzi_b(v16i8_r, v16i8_a, -1); // expected-error {{argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bnegi_b(v16i8_a, -1); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_bnegi_h(v8i16_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_bnegi_w(v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_bnegi_d(v2i64_a, -1); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_bseli_b(v16i8_r, v16i8_a, -1); // expected-error {{argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_bseti_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_bseti_h(v8i16_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_bseti_w(v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_bseti_d(v2i64_a, -1); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_ceqi_b(v16i8_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_ceqi_h(v8i16_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_ceqi_w(v4i32_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_ceqi_d(v2i64_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16i8_r = __msa_clei_s_b(v16i8_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_clei_s_h(v8i16_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_clei_s_w(v4i32_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_clei_s_d(v2i64_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_clei_u_b(v16u8_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_clei_u_h(v8u16_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_clei_u_w(v4u32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_clei_u_d(v2u64_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_clti_s_b(v16i8_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_clti_s_h(v8i16_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_clti_s_w(v4i32_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_clti_s_d(v2i64_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_clti_u_b(v16u8_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_clti_u_h(v8u16_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_clti_u_w(v4u32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_clti_u_d(v2u64_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+
+ int_r = __msa_copy_s_b(v16i8_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ int_r = __msa_copy_s_h(v8i16_a, -1); // expected-error {{argument should be a value from 0 to 7}}
+ int_r = __msa_copy_s_w(v4i32_a, -1); // expected-error {{argument should be a value from 0 to 3}}
+ ll_r = __msa_copy_s_d(v2i64_a, -1); // expected-error {{argument should be a value from 0 to 1}}
+
+ int_r = __msa_copy_u_b(v16u8_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ int_r = __msa_copy_u_h(v8u16_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ int_r = __msa_copy_u_w(v4u32_a, -4); // expected-error {{argument should be a value from 0 to 3}}
+ ll_r = __msa_copy_u_d(v2i64_a, -2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_insve_b(v16i8_r, 16, v16i8_a); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_insve_h(v8i16_r, 8, v8i16_a); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_insve_w(v4i32_r, 4, v4i32_a); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_insve_d(v2i64_r, 2, v2i64_a); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_ld_b(&v16i8_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+ v8i16_r = __msa_ld_h(&v8i16_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+ v4i32_r = __msa_ld_w(&v4i32_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+ v2i64_r = __msa_ld_d(&v2i64_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+
+ v16i8_r = __msa_ldi_b(-513); // expected-error {{argument should be a value from -512 to 511}}
+ v8i16_r = __msa_ldi_h(-513); // expected-error {{argument should be a value from -512 to 511}}
+ v4i32_r = __msa_ldi_w(-513); // expected-error {{argument should be a value from -512 to 511}}
+ v2i64_r = __msa_ldi_d(-513); // expected-error {{argument should be a value from -512 to 511}}
+
+ v16i8_r = __msa_maxi_s_b(v16i8_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_maxi_s_h(v8i16_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_maxi_s_w(v4i32_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_maxi_s_d(v2i64_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_maxi_u_b(v16u8_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_maxi_u_h(v8u16_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_maxi_u_w(v4u32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_maxi_u_d(v2u64_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_mini_s_b(v16i8_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v8i16_r = __msa_mini_s_h(v8i16_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v4i32_r = __msa_mini_s_w(v4i32_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+ v2i64_r = __msa_mini_s_d(v2i64_a, -17); // expected-error {{argument should be a value from -16 to 15}}
+
+ v16u8_r = __msa_mini_u_b(v16u8_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v8u16_r = __msa_mini_u_h(v8u16_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v4u32_r = __msa_mini_u_w(v4u32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2u64_r = __msa_mini_u_d(v2u64_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_nori_b(v16i8_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_ori_b(v16i8_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_sat_s_b(v16i8_a, -1); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_sat_s_h(v8i16_a, -1); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_sat_s_w(v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_sat_s_d(v2i64_a, -1); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_sat_u_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_sat_u_h(v8i16_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_sat_u_w(v4i32_a, -32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_sat_u_d(v2i64_a, -64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_shf_b(v16i8_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8i16_r = __msa_shf_h(v8i16_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4i32_r = __msa_shf_w(v4i32_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16i8_r = __msa_sld_b(v16i8_r, v16i8_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_sld_h(v8i16_r, v8i16_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, -4); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, -2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_sldi_b(v16i8_r, v16i8_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_sldi_h(v8i16_r, v8i16_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_sldi_w(v4i32_r, v4i32_a, -4); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_sldi_d(v2i64_r, v2i64_a, -2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_slli_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_slli_h(v8i16_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_slli_w(v4i32_a, -32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_slli_d(v2i64_a, -64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_splati_b(v16i8_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v8i16_r = __msa_splati_h(v8i16_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v4i32_r = __msa_splati_w(v4i32_a, -4); // expected-error {{argument should be a value from 0 to 3}}
+ v2i64_r = __msa_splati_d(v2i64_a, -2); // expected-error {{argument should be a value from 0 to 1}}
+
+ v16i8_r = __msa_srai_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srai_h(v8i16_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srai_w(v4i32_a, -32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srai_d(v2i64_a, -64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_srari_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srari_h(v8i16_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srari_w(v4i32_a, -32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srari_d(v2i64_a, -64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_srli_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srli_h(v8i16_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srli_w(v4i32_a, -32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srli_d(v2i64_a, -64); // expected-error {{argument should be a value from 0 to 63}}
+
+ v16i8_r = __msa_srlri_b(v16i8_a, -8); // expected-error {{argument should be a value from 0 to 7}}
+ v8i16_r = __msa_srlri_h(v8i16_a, -17); // expected-error {{argument should be a value from 0 to 15}}
+ v4i32_r = __msa_srlri_w(v4i32_a, -32); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_srlri_d(v2i64_a, -64); // expected-error {{argument should be a value from 0 to 63}}
+
+ __msa_st_b(v16i8_b, &v16i8_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+ __msa_st_h(v8i16_b, &v8i16_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+ __msa_st_w(v4i32_b, &v4i32_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+ __msa_st_d(v2i64_b, &v2i64_a, -513); // expected-error {{argument should be a value from -512 to 511}}
+
+ v16i8_r = __msa_subvi_b(v16i8_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v8i16_r = __msa_subvi_h(v8i16_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v4i32_r = __msa_subvi_w(v4i32_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+ v2i64_r = __msa_subvi_d(v2i64_a, -1); // expected-error {{argument should be a value from 0 to 31}}
+
+ v16i8_r = __msa_xori_b(v16i8_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8i16_r = __msa_xori_b(v8i16_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4i32_r = __msa_xori_b(v4i32_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v2i64_r = __msa_xori_b(v2i64_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+
+ v16u8_r = __msa_xori_b(v16u8_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v8u16_r = __msa_xori_b(v8u16_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v4u32_r = __msa_xori_b(v4u32_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+ v2u64_r = __msa_xori_b(v2u64_a, -1); // CHECK: warning: argument should be a value from 0 to 255}}
+
+}
diff --git a/test/CodeGen/builtins-mips-msa.c b/test/CodeGen/builtins-mips-msa.c
index 38aea04d9c36..125679545601 100644
--- a/test/CodeGen/builtins-mips-msa.c
+++ b/test/CodeGen/builtins-mips-msa.c
@@ -1,18 +1,11 @@
// REQUIRES: mips-registered-target
-// RUN: %clang_cc1 -triple mips-unknown-linux-gnu -emit-llvm %s -o - \
-// RUN: | FileCheck %s
-
-typedef signed char v16i8 __attribute__ ((vector_size(16)));
-typedef signed short v8i16 __attribute__ ((vector_size(16)));
-typedef signed int v4i32 __attribute__ ((vector_size(16)));
-typedef signed long long v2i64 __attribute__ ((vector_size(16)));
-typedef unsigned char v16u8 __attribute__ ((vector_size(16)));
-typedef unsigned short v8u16 __attribute__ ((vector_size(16)));
-typedef unsigned int v4u32 __attribute__ ((vector_size(16)));
-typedef unsigned long long v2u64 __attribute__ ((vector_size(16)));
+// RUN: %clang_cc1 -triple mips-unknown-linux-gnu -emit-llvm %s \
+// RUN: -target-feature +msa -target-feature +fp64 \
+// RUN: -mfloat-abi hard -o - | FileCheck %s
+
+#include <msa.h>
+
typedef __fp16 v8f16 __attribute__ ((vector_size(16)));
-typedef float v4f32 __attribute__ ((vector_size(16)));
-typedef double v2f64 __attribute__ ((vector_size(16)));
void test(void) {
v16i8 v16i8_a = (v16i8) {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
@@ -55,775 +48,775 @@ void test(void) {
long long ll_r;
int int_a = 0;
- v16i8_r = __builtin_msa_add_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.add.a.b(
- v8i16_r = __builtin_msa_add_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.add.a.h(
- v4i32_r = __builtin_msa_add_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.add.a.w(
- v2i64_r = __builtin_msa_add_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.add.a.d(
-
- v16i8_r = __builtin_msa_adds_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.adds.a.b(
- v8i16_r = __builtin_msa_adds_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.adds.a.h(
- v4i32_r = __builtin_msa_adds_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.adds.a.w(
- v2i64_r = __builtin_msa_adds_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.adds.a.d(
-
- v16i8_r = __builtin_msa_adds_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.adds.s.b(
- v8i16_r = __builtin_msa_adds_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.adds.s.h(
- v4i32_r = __builtin_msa_adds_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.adds.s.w(
- v2i64_r = __builtin_msa_adds_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.adds.s.d(
-
- v16u8_r = __builtin_msa_adds_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.adds.u.b(
- v8u16_r = __builtin_msa_adds_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.adds.u.h(
- v4u32_r = __builtin_msa_adds_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.adds.u.w(
- v2u64_r = __builtin_msa_adds_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.adds.u.d(
-
- v16i8_r = __builtin_msa_addv_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.addv.b(
- v8i16_r = __builtin_msa_addv_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.addv.h(
- v4i32_r = __builtin_msa_addv_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.addv.w(
- v2i64_r = __builtin_msa_addv_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.addv.d(
-
- v16u8_r = __builtin_msa_addv_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.addv.b(
- v8u16_r = __builtin_msa_addv_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.addv.h(
- v4u32_r = __builtin_msa_addv_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.addv.w(
- v2u64_r = __builtin_msa_addv_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.addv.d(
-
- v16i8_r = __builtin_msa_addvi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.addvi.b(
- v8i16_r = __builtin_msa_addvi_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.addvi.h(
- v4i32_r = __builtin_msa_addvi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.addvi.w(
- v2i64_r = __builtin_msa_addvi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.addvi.d(
-
- v16u8_r = __builtin_msa_addvi_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.addvi.b(
- v8u16_r = __builtin_msa_addvi_h(v8u16_a, 25); // CHECK: call <8 x i16> @llvm.mips.addvi.h(
- v4u32_r = __builtin_msa_addvi_w(v4u32_a, 25); // CHECK: call <4 x i32> @llvm.mips.addvi.w(
- v2u64_r = __builtin_msa_addvi_d(v2u64_a, 25); // CHECK: call <2 x i64> @llvm.mips.addvi.d(
-
- v16i8_r = __builtin_msa_and_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
- v8i16_r = __builtin_msa_and_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
- v4i32_r = __builtin_msa_and_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
- v2i64_r = __builtin_msa_and_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
-
- v16i8_r = __builtin_msa_andi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
- v8i16_r = __builtin_msa_andi_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
- v4i32_r = __builtin_msa_andi_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
- v2i64_r = __builtin_msa_andi_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
-
- v16u8_r = __builtin_msa_andi_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
- v8u16_r = __builtin_msa_andi_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
- v4u32_r = __builtin_msa_andi_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
- v2u64_r = __builtin_msa_andi_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
-
- v16i8_r = __builtin_msa_asub_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.asub.s.b(
- v8i16_r = __builtin_msa_asub_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.asub.s.h(
- v4i32_r = __builtin_msa_asub_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.asub.s.w(
- v2i64_r = __builtin_msa_asub_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.asub.s.d(
-
- v16u8_r = __builtin_msa_asub_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.asub.u.b(
- v8u16_r = __builtin_msa_asub_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.asub.u.h(
- v4u32_r = __builtin_msa_asub_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.asub.u.w(
- v2u64_r = __builtin_msa_asub_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.asub.u.d(
-
- v16i8_r = __builtin_msa_ave_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ave.s.b(
- v8i16_r = __builtin_msa_ave_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ave.s.h(
- v4i32_r = __builtin_msa_ave_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ave.s.w(
- v2i64_r = __builtin_msa_ave_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ave.s.d(
-
- v16u8_r = __builtin_msa_ave_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.ave.u.b(
- v8u16_r = __builtin_msa_ave_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.ave.u.h(
- v4u32_r = __builtin_msa_ave_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.ave.u.w(
- v2u64_r = __builtin_msa_ave_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.ave.u.d(
-
- v16i8_r = __builtin_msa_aver_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.aver.s.b(
- v8i16_r = __builtin_msa_aver_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.aver.s.h(
- v4i32_r = __builtin_msa_aver_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.aver.s.w(
- v2i64_r = __builtin_msa_aver_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.aver.s.d(
-
- v16u8_r = __builtin_msa_aver_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.aver.u.b(
- v8u16_r = __builtin_msa_aver_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.aver.u.h(
- v4u32_r = __builtin_msa_aver_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.aver.u.w(
- v2u64_r = __builtin_msa_aver_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.aver.u.d(
-
- v16i8_r = __builtin_msa_bclr_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bclr.b(
- v8i16_r = __builtin_msa_bclr_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bclr.h(
- v4i32_r = __builtin_msa_bclr_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bclr.w(
- v2i64_r = __builtin_msa_bclr_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.bclr.d(
-
- v16i8_r = __builtin_msa_bclri_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bclri.b(
- v8i16_r = __builtin_msa_bclri_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.bclri.h(
- v4i32_r = __builtin_msa_bclri_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.bclri.w(
- v2i64_r = __builtin_msa_bclri_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.bclri.d(
-
- v16i8_r = __builtin_msa_binsl_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.binsl.b(
- v8i16_r = __builtin_msa_binsl_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.binsl.h(
- v4i32_r = __builtin_msa_binsl_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.binsl.w(
- v2i64_r = __builtin_msa_binsl_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.binsl.d(
-
- v16i8_r = __builtin_msa_binsli_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.binsli.b(
- v8i16_r = __builtin_msa_binsli_h(v8i16_r, v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.binsli.h(
- v4i32_r = __builtin_msa_binsli_w(v4i32_r, v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.binsli.w(
- v2i64_r = __builtin_msa_binsli_d(v2i64_r, v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.binsli.d(
-
- v16i8_r = __builtin_msa_binsr_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.binsr.b(
- v8i16_r = __builtin_msa_binsr_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.binsr.h(
- v4i32_r = __builtin_msa_binsr_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.binsr.w(
- v2i64_r = __builtin_msa_binsr_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.binsr.d(
-
- v16i8_r = __builtin_msa_binsri_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.binsri.b(
- v8i16_r = __builtin_msa_binsri_h(v8i16_r, v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.binsri.h(
- v4i32_r = __builtin_msa_binsri_w(v4i32_r, v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.binsri.w(
- v2i64_r = __builtin_msa_binsri_d(v2i64_r, v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.binsri.d(
-
- v16i8_r = __builtin_msa_bmnz_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
- v8i16_r = __builtin_msa_bmnz_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
- v4i32_r = __builtin_msa_bmnz_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
- v2i64_r = __builtin_msa_bmnz_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
-
- v16i8_r = __builtin_msa_bmnzi_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmnzi.b(
-
- v16i8_r = __builtin_msa_bmz_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
- v8i16_r = __builtin_msa_bmz_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
- v4i32_r = __builtin_msa_bmz_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
- v2i64_r = __builtin_msa_bmz_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
-
- v16i8_r = __builtin_msa_bmzi_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmzi.b(
-
- v16i8_r = __builtin_msa_bneg_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bneg.b(
- v8i16_r = __builtin_msa_bneg_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bneg.h(
- v4i32_r = __builtin_msa_bneg_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bneg.w(
- v2i64_r = __builtin_msa_bneg_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.bneg.d(
-
- v16i8_r = __builtin_msa_bnegi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bnegi.b(
- v8i16_r = __builtin_msa_bnegi_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.bnegi.h(
- v4i32_r = __builtin_msa_bnegi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.bnegi.w(
- v2i64_r = __builtin_msa_bnegi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.bnegi.d(
-
- int_r = __builtin_msa_bnz_b(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.b(
- int_r = __builtin_msa_bnz_h(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.h(
- int_r = __builtin_msa_bnz_w(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.w(
- int_r = __builtin_msa_bnz_d(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.d(
-
- int_r = __builtin_msa_bnz_v(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.v(
-
- v16i8_r = __builtin_msa_bsel_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
- v8i16_r = __builtin_msa_bsel_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
- v4i32_r = __builtin_msa_bsel_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
- v2i64_r = __builtin_msa_bsel_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
-
- v16i8_r = __builtin_msa_bseli_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bseli.b(
-
- v16i8_r = __builtin_msa_bset_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bset.b(
- v8i16_r = __builtin_msa_bset_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bset.h(
- v4i32_r = __builtin_msa_bset_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bset.w(
- v2i64_r = __builtin_msa_bset_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.bset.d(
-
- v16i8_r = __builtin_msa_bseti_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bseti.b(
- v8i16_r = __builtin_msa_bseti_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.bseti.h(
- v4i32_r = __builtin_msa_bseti_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.bseti.w(
- v2i64_r = __builtin_msa_bseti_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.bseti.d(
-
- int_r = __builtin_msa_bz_b(v16i8_a); // CHECK: call i32 @llvm.mips.bz.b(
- int_r = __builtin_msa_bz_h(v16i8_a); // CHECK: call i32 @llvm.mips.bz.h(
- int_r = __builtin_msa_bz_w(v16i8_a); // CHECK: call i32 @llvm.mips.bz.w(
- int_r = __builtin_msa_bz_d(v16i8_a); // CHECK: call i32 @llvm.mips.bz.d(
-
- int_r = __builtin_msa_bz_v(v16i8_a); // CHECK: call i32 @llvm.mips.bz.v(
-
- v16i8_r = __builtin_msa_ceq_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ceq.b(
- v8i16_r = __builtin_msa_ceq_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ceq.h(
- v4i32_r = __builtin_msa_ceq_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ceq.w(
- v2i64_r = __builtin_msa_ceq_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ceq.d(
-
- v16i8_r = __builtin_msa_ceqi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.ceqi.b(
- v8i16_r = __builtin_msa_ceqi_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.ceqi.h(
- v4i32_r = __builtin_msa_ceqi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.ceqi.w(
- v2i64_r = __builtin_msa_ceqi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.ceqi.d(
-
- int_r = __builtin_msa_cfcmsa(1); // CHECK: call i32 @llvm.mips.cfcmsa(
-
- v16i8_r = __builtin_msa_cle_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.cle.s.b(
- v8i16_r = __builtin_msa_cle_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.cle.s.h(
- v4i32_r = __builtin_msa_cle_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.cle.s.w(
- v2i64_r = __builtin_msa_cle_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.cle.s.d(
-
- v16u8_r = __builtin_msa_cle_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.cle.u.b(
- v8u16_r = __builtin_msa_cle_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.cle.u.h(
- v4u32_r = __builtin_msa_cle_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.cle.u.w(
- v2u64_r = __builtin_msa_cle_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.cle.u.d(
-
- v16i8_r = __builtin_msa_clei_s_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.clei.s.b(
- v8i16_r = __builtin_msa_clei_s_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.clei.s.h(
- v4i32_r = __builtin_msa_clei_s_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.clei.s.w(
- v2i64_r = __builtin_msa_clei_s_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.clei.s.d(
-
- v16u8_r = __builtin_msa_clei_u_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.clei.u.b(
- v8u16_r = __builtin_msa_clei_u_h(v8u16_a, 25); // CHECK: call <8 x i16> @llvm.mips.clei.u.h(
- v4u32_r = __builtin_msa_clei_u_w(v4u32_a, 25); // CHECK: call <4 x i32> @llvm.mips.clei.u.w(
- v2u64_r = __builtin_msa_clei_u_d(v2u64_a, 25); // CHECK: call <2 x i64> @llvm.mips.clei.u.d(
-
- v16i8_r = __builtin_msa_clt_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.clt.s.b(
- v8i16_r = __builtin_msa_clt_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.clt.s.h(
- v4i32_r = __builtin_msa_clt_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.clt.s.w(
- v2i64_r = __builtin_msa_clt_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.clt.s.d(
-
- v16u8_r = __builtin_msa_clt_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.clt.u.b(
- v8u16_r = __builtin_msa_clt_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.clt.u.h(
- v4u32_r = __builtin_msa_clt_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.clt.u.w(
- v2u64_r = __builtin_msa_clt_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.clt.u.d(
-
- v16i8_r = __builtin_msa_clti_s_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.clti.s.b(
- v8i16_r = __builtin_msa_clti_s_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.clti.s.h(
- v4i32_r = __builtin_msa_clti_s_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.clti.s.w(
- v2i64_r = __builtin_msa_clti_s_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.clti.s.d(
-
- v16u8_r = __builtin_msa_clti_u_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.clti.u.b(
- v8u16_r = __builtin_msa_clti_u_h(v8u16_a, 25); // CHECK: call <8 x i16> @llvm.mips.clti.u.h(
- v4u32_r = __builtin_msa_clti_u_w(v4u32_a, 25); // CHECK: call <4 x i32> @llvm.mips.clti.u.w(
- v2u64_r = __builtin_msa_clti_u_d(v2u64_a, 25); // CHECK: call <2 x i64> @llvm.mips.clti.u.d(
-
- int_r = __builtin_msa_copy_s_b(v16i8_a, 1); // CHECK: call i32 @llvm.mips.copy.s.b(
- int_r = __builtin_msa_copy_s_h(v8i16_a, 1); // CHECK: call i32 @llvm.mips.copy.s.h(
- int_r = __builtin_msa_copy_s_w(v4i32_a, 1); // CHECK: call i32 @llvm.mips.copy.s.w(
- ll_r = __builtin_msa_copy_s_d(v2i64_a, 1); // CHECK: call i64 @llvm.mips.copy.s.d(
-
- int_r = __builtin_msa_copy_u_b(v16u8_a, 1); // CHECK: call i32 @llvm.mips.copy.u.b(
- int_r = __builtin_msa_copy_u_h(v8u16_a, 1); // CHECK: call i32 @llvm.mips.copy.u.h(
- int_r = __builtin_msa_copy_u_w(v4u32_a, 1); // CHECK: call i32 @llvm.mips.copy.u.w(
- ll_r = __builtin_msa_copy_u_d(v2i64_a, 1); // CHECK: call i64 @llvm.mips.copy.u.d(
+ v16i8_r = __msa_add_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.add.a.b(
+ v8i16_r = __msa_add_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.add.a.h(
+ v4i32_r = __msa_add_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.add.a.w(
+ v2i64_r = __msa_add_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.add.a.d(
+
+ v16i8_r = __msa_adds_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.adds.a.b(
+ v8i16_r = __msa_adds_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.adds.a.h(
+ v4i32_r = __msa_adds_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.adds.a.w(
+ v2i64_r = __msa_adds_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.adds.a.d(
+
+ v16i8_r = __msa_adds_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.adds.s.b(
+ v8i16_r = __msa_adds_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.adds.s.h(
+ v4i32_r = __msa_adds_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.adds.s.w(
+ v2i64_r = __msa_adds_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.adds.s.d(
+
+ v16u8_r = __msa_adds_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.adds.u.b(
+ v8u16_r = __msa_adds_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.adds.u.h(
+ v4u32_r = __msa_adds_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.adds.u.w(
+ v2u64_r = __msa_adds_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.adds.u.d(
+
+ v16i8_r = __msa_addv_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.addv.b(
+ v8i16_r = __msa_addv_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.addv.h(
+ v4i32_r = __msa_addv_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.addv.w(
+ v2i64_r = __msa_addv_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.addv.d(
+
+ v16u8_r = __msa_addv_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.addv.b(
+ v8u16_r = __msa_addv_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.addv.h(
+ v4u32_r = __msa_addv_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.addv.w(
+ v2u64_r = __msa_addv_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.addv.d(
+
+ v16i8_r = __msa_addvi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.addvi.b(
+ v8i16_r = __msa_addvi_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.addvi.h(
+ v4i32_r = __msa_addvi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.addvi.w(
+ v2i64_r = __msa_addvi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.addvi.d(
+
+ v16u8_r = __msa_addvi_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.addvi.b(
+ v8u16_r = __msa_addvi_h(v8u16_a, 25); // CHECK: call <8 x i16> @llvm.mips.addvi.h(
+ v4u32_r = __msa_addvi_w(v4u32_a, 25); // CHECK: call <4 x i32> @llvm.mips.addvi.w(
+ v2u64_r = __msa_addvi_d(v2u64_a, 25); // CHECK: call <2 x i64> @llvm.mips.addvi.d(
+
+ v16i8_r = __msa_and_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
+ v8i16_r = __msa_and_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
+ v4i32_r = __msa_and_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
+ v2i64_r = __msa_and_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.and.v(
+
+ v16i8_r = __msa_andi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+ v8i16_r = __msa_andi_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+ v4i32_r = __msa_andi_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+ v2i64_r = __msa_andi_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+
+ v16u8_r = __msa_andi_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+ v8u16_r = __msa_andi_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+ v4u32_r = __msa_andi_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+ v2u64_r = __msa_andi_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.andi.b(
+
+ v16i8_r = __msa_asub_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.asub.s.b(
+ v8i16_r = __msa_asub_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.asub.s.h(
+ v4i32_r = __msa_asub_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.asub.s.w(
+ v2i64_r = __msa_asub_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.asub.s.d(
+
+ v16u8_r = __msa_asub_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.asub.u.b(
+ v8u16_r = __msa_asub_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.asub.u.h(
+ v4u32_r = __msa_asub_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.asub.u.w(
+ v2u64_r = __msa_asub_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.asub.u.d(
+
+ v16i8_r = __msa_ave_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ave.s.b(
+ v8i16_r = __msa_ave_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ave.s.h(
+ v4i32_r = __msa_ave_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ave.s.w(
+ v2i64_r = __msa_ave_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ave.s.d(
+
+ v16u8_r = __msa_ave_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.ave.u.b(
+ v8u16_r = __msa_ave_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.ave.u.h(
+ v4u32_r = __msa_ave_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.ave.u.w(
+ v2u64_r = __msa_ave_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.ave.u.d(
+
+ v16i8_r = __msa_aver_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.aver.s.b(
+ v8i16_r = __msa_aver_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.aver.s.h(
+ v4i32_r = __msa_aver_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.aver.s.w(
+ v2i64_r = __msa_aver_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.aver.s.d(
+
+ v16u8_r = __msa_aver_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.aver.u.b(
+ v8u16_r = __msa_aver_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.aver.u.h(
+ v4u32_r = __msa_aver_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.aver.u.w(
+ v2u64_r = __msa_aver_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.aver.u.d(
+
+ v16i8_r = __msa_bclr_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bclr.b(
+ v8i16_r = __msa_bclr_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bclr.h(
+ v4i32_r = __msa_bclr_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bclr.w(
+ v2i64_r = __msa_bclr_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.bclr.d(
+
+ v16i8_r = __msa_bclri_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.bclri.b(
+ v8i16_r = __msa_bclri_h(v8i16_a, 8); // CHECK: call <8 x i16> @llvm.mips.bclri.h(
+ v4i32_r = __msa_bclri_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.bclri.w(
+ v2i64_r = __msa_bclri_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.bclri.d(
+
+ v16i8_r = __msa_binsl_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.binsl.b(
+ v8i16_r = __msa_binsl_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.binsl.h(
+ v4i32_r = __msa_binsl_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.binsl.w(
+ v2i64_r = __msa_binsl_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.binsl.d(
+
+ v16i8_r = __msa_binsli_b(v16i8_r, v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.binsli.b(
+ v8i16_r = __msa_binsli_h(v8i16_r, v8i16_a, 8); // CHECK: call <8 x i16> @llvm.mips.binsli.h(
+ v4i32_r = __msa_binsli_w(v4i32_r, v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.binsli.w(
+ v2i64_r = __msa_binsli_d(v2i64_r, v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.binsli.d(
+
+ v16i8_r = __msa_binsr_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.binsr.b(
+ v8i16_r = __msa_binsr_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.binsr.h(
+ v4i32_r = __msa_binsr_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.binsr.w(
+ v2i64_r = __msa_binsr_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.binsr.d(
+
+ v16i8_r = __msa_binsri_b(v16i8_r, v16i8_a, 5); // CHECK: call <16 x i8> @llvm.mips.binsri.b(
+ v8i16_r = __msa_binsri_h(v8i16_r, v8i16_a, 15); // CHECK: call <8 x i16> @llvm.mips.binsri.h(
+ v4i32_r = __msa_binsri_w(v4i32_r, v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.binsri.w(
+ v2i64_r = __msa_binsri_d(v2i64_r, v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.binsri.d(
+
+ v16i8_r = __msa_bmnz_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
+ v8i16_r = __msa_bmnz_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
+ v4i32_r = __msa_bmnz_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
+ v2i64_r = __msa_bmnz_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmnz.v(
+
+ v16i8_r = __msa_bmnzi_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmnzi.b(
+
+ v16i8_r = __msa_bmz_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
+ v8i16_r = __msa_bmz_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
+ v4i32_r = __msa_bmz_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
+ v2i64_r = __msa_bmz_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bmz.v(
+
+ v16i8_r = __msa_bmzi_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bmzi.b(
+
+ v16i8_r = __msa_bneg_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bneg.b(
+ v8i16_r = __msa_bneg_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bneg.h(
+ v4i32_r = __msa_bneg_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bneg.w(
+ v2i64_r = __msa_bneg_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.bneg.d(
+
+ v16i8_r = __msa_bnegi_b(v16i8_a, 6); // CHECK: call <16 x i8> @llvm.mips.bnegi.b(
+ v8i16_r = __msa_bnegi_h(v8i16_a, 14); // CHECK: call <8 x i16> @llvm.mips.bnegi.h(
+ v4i32_r = __msa_bnegi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.bnegi.w(
+ v2i64_r = __msa_bnegi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.bnegi.d(
+
+ int_r = __msa_test_bnz_b(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.b(
+ int_r = __msa_test_bnz_h(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.h(
+ int_r = __msa_test_bnz_w(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.w(
+ int_r = __msa_test_bnz_d(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.d(
+
+ int_r = __msa_test_bnz_v(v16i8_a); // CHECK: call i32 @llvm.mips.bnz.v(
+
+ v16i8_r = __msa_bsel_v(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
+ v8i16_r = __msa_bsel_v(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
+ v4i32_r = __msa_bsel_v(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
+ v2i64_r = __msa_bsel_v(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.bsel.v(
+
+ v16i8_r = __msa_bseli_b(v16i8_r, v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.bseli.b(
+
+ v16i8_r = __msa_bset_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.bset.b(
+ v8i16_r = __msa_bset_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.bset.h(
+ v4i32_r = __msa_bset_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.bset.w(
+ v2i64_r = __msa_bset_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.bset.d(
+
+ v16i8_r = __msa_bseti_b(v16i8_a, 5); // CHECK: call <16 x i8> @llvm.mips.bseti.b(
+ v8i16_r = __msa_bseti_h(v8i16_a, 15); // CHECK: call <8 x i16> @llvm.mips.bseti.h(
+ v4i32_r = __msa_bseti_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.bseti.w(
+ v2i64_r = __msa_bseti_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.bseti.d(
+
+ int_r = __msa_test_bz_b(v16i8_a); // CHECK: call i32 @llvm.mips.bz.b(
+ int_r = __msa_test_bz_h(v16i8_a); // CHECK: call i32 @llvm.mips.bz.h(
+ int_r = __msa_test_bz_w(v16i8_a); // CHECK: call i32 @llvm.mips.bz.w(
+ int_r = __msa_test_bz_d(v16i8_a); // CHECK: call i32 @llvm.mips.bz.d(
+
+ int_r = __msa_test_bz_v(v16i8_a); // CHECK: call i32 @llvm.mips.bz.v(
+
+ v16i8_r = __msa_ceq_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ceq.b(
+ v8i16_r = __msa_ceq_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ceq.h(
+ v4i32_r = __msa_ceq_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ceq.w(
+ v2i64_r = __msa_ceq_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ceq.d(
+
+ v16i8_r = __msa_ceqi_b(v16i8_a, -3); // CHECK: call <16 x i8> @llvm.mips.ceqi.b(
+ v8i16_r = __msa_ceqi_h(v8i16_a, -12); // CHECK: call <8 x i16> @llvm.mips.ceqi.h(
+ v4i32_r = __msa_ceqi_w(v4i32_a, 14); // CHECK: call <4 x i32> @llvm.mips.ceqi.w(
+ v2i64_r = __msa_ceqi_d(v2i64_a, 15); // CHECK: call <2 x i64> @llvm.mips.ceqi.d(
+
+ int_r = __msa_cfcmsa(1); // CHECK: call i32 @llvm.mips.cfcmsa(
+
+ v16i8_r = __msa_cle_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.cle.s.b(
+ v8i16_r = __msa_cle_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.cle.s.h(
+ v4i32_r = __msa_cle_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.cle.s.w(
+ v2i64_r = __msa_cle_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.cle.s.d(
+
+ v16u8_r = __msa_cle_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.cle.u.b(
+ v8u16_r = __msa_cle_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.cle.u.h(
+ v4u32_r = __msa_cle_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.cle.u.w(
+ v2u64_r = __msa_cle_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.cle.u.d(
+
+ v16i8_r = __msa_clei_s_b(v16i8_a, 12); // CHECK: call <16 x i8> @llvm.mips.clei.s.b(
+ v8i16_r = __msa_clei_s_h(v8i16_a, 13); // CHECK: call <8 x i16> @llvm.mips.clei.s.h(
+ v4i32_r = __msa_clei_s_w(v4i32_a, 14); // CHECK: call <4 x i32> @llvm.mips.clei.s.w(
+ v2i64_r = __msa_clei_s_d(v2i64_a, 15); // CHECK: call <2 x i64> @llvm.mips.clei.s.d(
+
+ v16u8_r = __msa_clei_u_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.clei.u.b(
+ v8u16_r = __msa_clei_u_h(v8u16_a, 25); // CHECK: call <8 x i16> @llvm.mips.clei.u.h(
+ v4u32_r = __msa_clei_u_w(v4u32_a, 25); // CHECK: call <4 x i32> @llvm.mips.clei.u.w(
+ v2u64_r = __msa_clei_u_d(v2u64_a, 25); // CHECK: call <2 x i64> @llvm.mips.clei.u.d(
+
+ v16i8_r = __msa_clt_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.clt.s.b(
+ v8i16_r = __msa_clt_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.clt.s.h(
+ v4i32_r = __msa_clt_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.clt.s.w(
+ v2i64_r = __msa_clt_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.clt.s.d(
+
+ v16u8_r = __msa_clt_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.clt.u.b(
+ v8u16_r = __msa_clt_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.clt.u.h(
+ v4u32_r = __msa_clt_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.clt.u.w(
+ v2u64_r = __msa_clt_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.clt.u.d(
+
+ v16i8_r = __msa_clti_s_b(v16i8_a, 15); // CHECK: call <16 x i8> @llvm.mips.clti.s.b(
+ v8i16_r = __msa_clti_s_h(v8i16_a, 15); // CHECK: call <8 x i16> @llvm.mips.clti.s.h(
+ v4i32_r = __msa_clti_s_w(v4i32_a, 15); // CHECK: call <4 x i32> @llvm.mips.clti.s.w(
+ v2i64_r = __msa_clti_s_d(v2i64_a, 15); // CHECK: call <2 x i64> @llvm.mips.clti.s.d(
+
+ v16u8_r = __msa_clti_u_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.clti.u.b(
+ v8u16_r = __msa_clti_u_h(v8u16_a, 25); // CHECK: call <8 x i16> @llvm.mips.clti.u.h(
+ v4u32_r = __msa_clti_u_w(v4u32_a, 25); // CHECK: call <4 x i32> @llvm.mips.clti.u.w(
+ v2u64_r = __msa_clti_u_d(v2u64_a, 25); // CHECK: call <2 x i64> @llvm.mips.clti.u.d(
+
+ int_r = __msa_copy_s_b(v16i8_a, 1); // CHECK: call i32 @llvm.mips.copy.s.b(
+ int_r = __msa_copy_s_h(v8i16_a, 1); // CHECK: call i32 @llvm.mips.copy.s.h(
+ int_r = __msa_copy_s_w(v4i32_a, 1); // CHECK: call i32 @llvm.mips.copy.s.w(
+ ll_r = __msa_copy_s_d(v2i64_a, 1); // CHECK: call i64 @llvm.mips.copy.s.d(
+
+ int_r = __msa_copy_u_b(v16u8_a, 1); // CHECK: call i32 @llvm.mips.copy.u.b(
+ int_r = __msa_copy_u_h(v8u16_a, 1); // CHECK: call i32 @llvm.mips.copy.u.h(
+ int_r = __msa_copy_u_w(v4u32_a, 1); // CHECK: call i32 @llvm.mips.copy.u.w(
+ ll_r = __msa_copy_u_d(v2i64_a, 1); // CHECK: call i64 @llvm.mips.copy.u.d(
__builtin_msa_ctcmsa(1, int_a); // CHECK: call void @llvm.mips.ctcmsa(
- v16i8_r = __builtin_msa_div_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.div.s.b(
- v8i16_r = __builtin_msa_div_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.div.s.h(
- v4i32_r = __builtin_msa_div_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.div.s.w(
- v2i64_r = __builtin_msa_div_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.div.s.d(
+ v16i8_r = __msa_div_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.div.s.b(
+ v8i16_r = __msa_div_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.div.s.h(
+ v4i32_r = __msa_div_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.div.s.w(
+ v2i64_r = __msa_div_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.div.s.d(
- v16u8_r = __builtin_msa_div_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.div.u.b(
- v8u16_r = __builtin_msa_div_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.div.u.h(
- v4u32_r = __builtin_msa_div_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.div.u.w(
- v2u64_r = __builtin_msa_div_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.div.u.d(
+ v16u8_r = __msa_div_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.div.u.b(
+ v8u16_r = __msa_div_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.div.u.h(
+ v4u32_r = __msa_div_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.div.u.w(
+ v2u64_r = __msa_div_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.div.u.d(
- v8i16_r = __builtin_msa_dotp_s_h(v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.dotp.s.h(
- v4i32_r = __builtin_msa_dotp_s_w(v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.dotp.s.w(
- v2i64_r = __builtin_msa_dotp_s_d(v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.dotp.s.d(
+ v8i16_r = __msa_dotp_s_h(v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.dotp.s.h(
+ v4i32_r = __msa_dotp_s_w(v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.dotp.s.w(
+ v2i64_r = __msa_dotp_s_d(v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.dotp.s.d(
- v8u16_r = __builtin_msa_dotp_u_h(v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.dotp.u.h(
- v4u32_r = __builtin_msa_dotp_u_w(v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.dotp.u.w(
- v2u64_r = __builtin_msa_dotp_u_d(v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.dotp.u.d(
+ v8u16_r = __msa_dotp_u_h(v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.dotp.u.h(
+ v4u32_r = __msa_dotp_u_w(v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.dotp.u.w(
+ v2u64_r = __msa_dotp_u_d(v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.dotp.u.d(
- v8i16_r = __builtin_msa_dpadd_s_h(v8i16_r, v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.dpadd.s.h(
- v4i32_r = __builtin_msa_dpadd_s_w(v4i32_r, v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.dpadd.s.w(
- v2i64_r = __builtin_msa_dpadd_s_d(v2i64_r, v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.dpadd.s.d(
+ v8i16_r = __msa_dpadd_s_h(v8i16_r, v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.dpadd.s.h(
+ v4i32_r = __msa_dpadd_s_w(v4i32_r, v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.dpadd.s.w(
+ v2i64_r = __msa_dpadd_s_d(v2i64_r, v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.dpadd.s.d(
- v8u16_r = __builtin_msa_dpadd_u_h(v8u16_r, v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.dpadd.u.h(
- v4u32_r = __builtin_msa_dpadd_u_w(v4u32_r, v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.dpadd.u.w(
- v2u64_r = __builtin_msa_dpadd_u_d(v2u64_r, v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.dpadd.u.d(
+ v8u16_r = __msa_dpadd_u_h(v8u16_r, v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.dpadd.u.h(
+ v4u32_r = __msa_dpadd_u_w(v4u32_r, v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.dpadd.u.w(
+ v2u64_r = __msa_dpadd_u_d(v2u64_r, v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.dpadd.u.d(
- v8i16_r = __builtin_msa_dpsub_s_h(v8i16_r, v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.dpsub.s.h(
- v4i32_r = __builtin_msa_dpsub_s_w(v4i32_r, v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.dpsub.s.w(
- v2i64_r = __builtin_msa_dpsub_s_d(v2i64_r, v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.dpsub.s.d(
+ v8i16_r = __msa_dpsub_s_h(v8i16_r, v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.dpsub.s.h(
+ v4i32_r = __msa_dpsub_s_w(v4i32_r, v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.dpsub.s.w(
+ v2i64_r = __msa_dpsub_s_d(v2i64_r, v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.dpsub.s.d(
- v8u16_r = __builtin_msa_dpsub_u_h(v8u16_r, v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.dpsub.u.h(
- v4u32_r = __builtin_msa_dpsub_u_w(v4u32_r, v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.dpsub.u.w(
- v2u64_r = __builtin_msa_dpsub_u_d(v2u64_r, v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.dpsub.u.d(
+ v8u16_r = __msa_dpsub_u_h(v8u16_r, v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.dpsub.u.h(
+ v4u32_r = __msa_dpsub_u_w(v4u32_r, v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.dpsub.u.w(
+ v2u64_r = __msa_dpsub_u_d(v2u64_r, v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.dpsub.u.d(
- v4f32_r = __builtin_msa_fadd_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fadd.w(
- v2f64_r = __builtin_msa_fadd_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fadd.d(
+ v4f32_r = __msa_fadd_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fadd.w(
+ v2f64_r = __msa_fadd_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fadd.d(
- v4i32_r = __builtin_msa_fcaf_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcaf.w(
- v2i64_r = __builtin_msa_fcaf_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcaf.d(
+ v4i32_r = __msa_fcaf_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcaf.w(
+ v2i64_r = __msa_fcaf_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcaf.d(
- v4i32_r = __builtin_msa_fceq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fceq.w(
- v2i64_r = __builtin_msa_fceq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fceq.d(
+ v4i32_r = __msa_fceq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fceq.w(
+ v2i64_r = __msa_fceq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fceq.d(
- v4i32_r = __builtin_msa_fclass_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.fclass.w(
- v2i64_r = __builtin_msa_fclass_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.fclass.d(
+ v4i32_r = __msa_fclass_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.fclass.w(
+ v2i64_r = __msa_fclass_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.fclass.d(
- v4i32_r = __builtin_msa_fcle_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcle.w(
- v2i64_r = __builtin_msa_fcle_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcle.d(
+ v4i32_r = __msa_fcle_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcle.w(
+ v2i64_r = __msa_fcle_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcle.d(
- v4i32_r = __builtin_msa_fclt_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fclt.w(
- v2i64_r = __builtin_msa_fclt_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fclt.d(
+ v4i32_r = __msa_fclt_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fclt.w(
+ v2i64_r = __msa_fclt_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fclt.d(
- v4i32_r = __builtin_msa_fcne_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcne.w(
- v2i64_r = __builtin_msa_fcne_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcne.d(
+ v4i32_r = __msa_fcne_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcne.w(
+ v2i64_r = __msa_fcne_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcne.d(
- v4i32_r = __builtin_msa_fcor_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcor.w(
- v2i64_r = __builtin_msa_fcor_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcor.d(
+ v4i32_r = __msa_fcor_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcor.w(
+ v2i64_r = __msa_fcor_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcor.d(
- v4i32_r = __builtin_msa_fcueq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcueq.w(
- v2i64_r = __builtin_msa_fcueq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcueq.d(
+ v4i32_r = __msa_fcueq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcueq.w(
+ v2i64_r = __msa_fcueq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcueq.d(
- v4i32_r = __builtin_msa_fcule_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcule.w(
- v2i64_r = __builtin_msa_fcule_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcule.d(
+ v4i32_r = __msa_fcule_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcule.w(
+ v2i64_r = __msa_fcule_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcule.d(
- v4i32_r = __builtin_msa_fcult_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcult.w(
- v2i64_r = __builtin_msa_fcult_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcult.d(
+ v4i32_r = __msa_fcult_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcult.w(
+ v2i64_r = __msa_fcult_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcult.d(
- v4i32_r = __builtin_msa_fcun_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcun.w(
- v2i64_r = __builtin_msa_fcun_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcun.d(
+ v4i32_r = __msa_fcun_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcun.w(
+ v2i64_r = __msa_fcun_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcun.d(
- v4i32_r = __builtin_msa_fcune_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcune.w(
- v2i64_r = __builtin_msa_fcune_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcune.d(
+ v4i32_r = __msa_fcune_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fcune.w(
+ v2i64_r = __msa_fcune_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fcune.d(
- v4f32_r = __builtin_msa_fdiv_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fdiv.w(
- v2f64_r = __builtin_msa_fdiv_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fdiv.d(
+ v4f32_r = __msa_fdiv_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fdiv.w(
+ v2f64_r = __msa_fdiv_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fdiv.d(
- v8f16_r = __builtin_msa_fexdo_h(v4f32_a, v4f32_b); // CHECK: call <8 x half> @llvm.mips.fexdo.h(
- v4f32_r = __builtin_msa_fexdo_w(v2f64_a, v2f64_b); // CHECK: call <4 x float> @llvm.mips.fexdo.w(
+ v8f16_r = __msa_fexdo_h(v4f32_a, v4f32_b); // CHECK: call <8 x half> @llvm.mips.fexdo.h(
+ v4f32_r = __msa_fexdo_w(v2f64_a, v2f64_b); // CHECK: call <4 x float> @llvm.mips.fexdo.w(
- v4f32_r = __builtin_msa_fexp2_w(v4f32_a, v4i32_b); // CHECK: call <4 x float> @llvm.mips.fexp2.w(
- v2f64_r = __builtin_msa_fexp2_d(v2f64_a, v2i64_b); // CHECK: call <2 x double> @llvm.mips.fexp2.d(
+ v4f32_r = __msa_fexp2_w(v4f32_a, v4i32_b); // CHECK: call <4 x float> @llvm.mips.fexp2.w(
+ v2f64_r = __msa_fexp2_d(v2f64_a, v2i64_b); // CHECK: call <2 x double> @llvm.mips.fexp2.d(
- v4f32_r = __builtin_msa_fexupl_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.fexupl.w(
- v2f64_r = __builtin_msa_fexupl_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.fexupl.d(
+ v4f32_r = __msa_fexupl_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.fexupl.w(
+ v2f64_r = __msa_fexupl_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.fexupl.d(
- v4f32_r = __builtin_msa_fexupr_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.fexupr.w(
- v2f64_r = __builtin_msa_fexupr_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.fexupr.d(
+ v4f32_r = __msa_fexupr_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.fexupr.w(
+ v2f64_r = __msa_fexupr_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.fexupr.d(
- v4f32_r = __builtin_msa_ffint_s_w(v4i32_a); // CHECK: call <4 x float> @llvm.mips.ffint.s.w(
- v2f64_r = __builtin_msa_ffint_s_d(v2i64_a); // CHECK: call <2 x double> @llvm.mips.ffint.s.d(
+ v4f32_r = __msa_ffint_s_w(v4i32_a); // CHECK: call <4 x float> @llvm.mips.ffint.s.w(
+ v2f64_r = __msa_ffint_s_d(v2i64_a); // CHECK: call <2 x double> @llvm.mips.ffint.s.d(
- v4f32_r = __builtin_msa_ffint_u_w(v4i32_a); // CHECK: call <4 x float> @llvm.mips.ffint.u.w(
- v2f64_r = __builtin_msa_ffint_u_d(v2i64_a); // CHECK: call <2 x double> @llvm.mips.ffint.u.d(
+ v4f32_r = __msa_ffint_u_w(v4i32_a); // CHECK: call <4 x float> @llvm.mips.ffint.u.w(
+ v2f64_r = __msa_ffint_u_d(v2i64_a); // CHECK: call <2 x double> @llvm.mips.ffint.u.d(
- v4f32_r = __builtin_msa_ffql_w(v8i16_a); // CHECK: call <4 x float> @llvm.mips.ffql.w(
- v2f64_r = __builtin_msa_ffql_d(v4i32_a); // CHECK: call <2 x double> @llvm.mips.ffql.d(
+ v4f32_r = __msa_ffql_w(v8i16_a); // CHECK: call <4 x float> @llvm.mips.ffql.w(
+ v2f64_r = __msa_ffql_d(v4i32_a); // CHECK: call <2 x double> @llvm.mips.ffql.d(
- v4f32_r = __builtin_msa_ffqr_w(v8i16_a); // CHECK: call <4 x float> @llvm.mips.ffqr.w(
- v2f64_r = __builtin_msa_ffqr_d(v4i32_a); // CHECK: call <2 x double> @llvm.mips.ffqr.d(
+ v4f32_r = __msa_ffqr_w(v8i16_a); // CHECK: call <4 x float> @llvm.mips.ffqr.w(
+ v2f64_r = __msa_ffqr_d(v4i32_a); // CHECK: call <2 x double> @llvm.mips.ffqr.d(
- v16i8_r = __builtin_msa_fill_b(3); // CHECK: call <16 x i8> @llvm.mips.fill.b(
- v8i16_r = __builtin_msa_fill_h(3); // CHECK: call <8 x i16> @llvm.mips.fill.h(
- v4i32_r = __builtin_msa_fill_w(3); // CHECK: call <4 x i32> @llvm.mips.fill.w(
- v2i64_r = __builtin_msa_fill_d(3); // CHECK: call <2 x i64> @llvm.mips.fill.d(
+ v16i8_r = __msa_fill_b(3); // CHECK: call <16 x i8> @llvm.mips.fill.b(
+ v8i16_r = __msa_fill_h(3); // CHECK: call <8 x i16> @llvm.mips.fill.h(
+ v4i32_r = __msa_fill_w(3); // CHECK: call <4 x i32> @llvm.mips.fill.w(
+ v2i64_r = __msa_fill_d(3); // CHECK: call <2 x i64> @llvm.mips.fill.d(
- v4f32_r = __builtin_msa_flog2_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.flog2.w(
- v2f64_r = __builtin_msa_flog2_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.flog2.d(
+ v4f32_r = __msa_flog2_w(v4f32_a); // CHECK: call <4 x float> @llvm.mips.flog2.w(
+ v2f64_r = __msa_flog2_d(v2f64_a); // CHECK: call <2 x double> @llvm.mips.flog2.d(
- v4f32_r = __builtin_msa_fmadd_w(v8f16_r, v8f16_a, v8f16_b); // CHECK: call <4 x float> @llvm.mips.fmadd.w(
- v2f64_r = __builtin_msa_fmadd_d(v4f32_r, v4f32_a, v4f32_b); // CHECK: call <2 x double> @llvm.mips.fmadd.d(
+ v4f32_r = __msa_fmadd_w(v4f32_r, v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmadd.w(
+ v2f64_r = __msa_fmadd_d(v2f64_r, v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmadd.d(
- v4f32_r = __builtin_msa_fmax_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmax.w(
- v2f64_r = __builtin_msa_fmax_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmax.d(
+ v4f32_r = __msa_fmax_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmax.w(
+ v2f64_r = __msa_fmax_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmax.d(
- v4f32_r = __builtin_msa_fmax_a_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmax.a.w(
- v2f64_r = __builtin_msa_fmax_a_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmax.a.d(
+ v4f32_r = __msa_fmax_a_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmax.a.w(
+ v2f64_r = __msa_fmax_a_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmax.a.d(
- v4f32_r = __builtin_msa_fmin_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmin.w(
- v2f64_r = __builtin_msa_fmin_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmin.d(
+ v4f32_r = __msa_fmin_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmin.w(
+ v2f64_r = __msa_fmin_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmin.d(
- v4f32_r = __builtin_msa_fmin_a_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmin.a.w(
- v2f64_r = __builtin_msa_fmin_a_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmin.a.d(
+ v4f32_r = __msa_fmin_a_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmin.a.w(
+ v2f64_r = __msa_fmin_a_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmin.a.d(
- v4f32_r = __builtin_msa_fmsub_w(v8f16_r, v8f16_a, v8f16_b); // CHECK: call <4 x float> @llvm.mips.fmsub.w(
- v2f64_r = __builtin_msa_fmsub_d(v4f32_r, v4f32_a, v4f32_b); // CHECK: call <2 x double> @llvm.mips.fmsub.d(
+ v4f32_r = __msa_fmsub_w(v4f32_r, v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmsub.w(
+ v2f64_r = __msa_fmsub_d(v2f64_r, v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmsub.d(
- v4f32_r = __builtin_msa_fmul_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmul.w(
- v2f64_r = __builtin_msa_fmul_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmul.d(
+ v4f32_r = __msa_fmul_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fmul.w(
+ v2f64_r = __msa_fmul_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fmul.d(
- v4f32_r = __builtin_msa_frint_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.frint.w(
- v2f64_r = __builtin_msa_frint_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.frint.d(
+ v4f32_r = __msa_frint_w(v4f32_a); // CHECK: call <4 x float> @llvm.mips.frint.w(
+ v2f64_r = __msa_frint_d(v2f64_a); // CHECK: call <2 x double> @llvm.mips.frint.d(
- v4f32_r = __builtin_msa_frcp_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.frcp.w(
- v2f64_r = __builtin_msa_frcp_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.frcp.d(
+ v4f32_r = __msa_frcp_w(v4f32_a); // CHECK: call <4 x float> @llvm.mips.frcp.w(
+ v2f64_r = __msa_frcp_d(v2f64_a); // CHECK: call <2 x double> @llvm.mips.frcp.d(
- v4f32_r = __builtin_msa_frsqrt_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.frsqrt.w(
- v2f64_r = __builtin_msa_frsqrt_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.frsqrt.d(
+ v4f32_r = __msa_frsqrt_w(v4f32_a); // CHECK: call <4 x float> @llvm.mips.frsqrt.w(
+ v2f64_r = __msa_frsqrt_d(v2f64_a); // CHECK: call <2 x double> @llvm.mips.frsqrt.d(
- v4i32_r = __builtin_msa_fseq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fseq.w(
- v2i64_r = __builtin_msa_fseq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fseq.d(
+ v4i32_r = __msa_fseq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fseq.w(
+ v2i64_r = __msa_fseq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fseq.d(
- v4i32_r = __builtin_msa_fsaf_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsaf.w(
- v2i64_r = __builtin_msa_fsaf_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsaf.d(
+ v4i32_r = __msa_fsaf_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsaf.w(
+ v2i64_r = __msa_fsaf_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsaf.d(
- v4i32_r = __builtin_msa_fsle_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsle.w(
- v2i64_r = __builtin_msa_fsle_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsle.d(
+ v4i32_r = __msa_fsle_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsle.w(
+ v2i64_r = __msa_fsle_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsle.d(
- v4i32_r = __builtin_msa_fslt_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fslt.w(
- v2i64_r = __builtin_msa_fslt_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fslt.d(
+ v4i32_r = __msa_fslt_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fslt.w(
+ v2i64_r = __msa_fslt_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fslt.d(
- v4i32_r = __builtin_msa_fsne_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsne.w(
- v2i64_r = __builtin_msa_fsne_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsne.d(
+ v4i32_r = __msa_fsne_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsne.w(
+ v2i64_r = __msa_fsne_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsne.d(
- v4i32_r = __builtin_msa_fsor_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsor.w(
- v2i64_r = __builtin_msa_fsor_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsor.d(
+ v4i32_r = __msa_fsor_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsor.w(
+ v2i64_r = __msa_fsor_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsor.d(
- v4f32_r = __builtin_msa_fsqrt_w(v8f16_a); // CHECK: call <4 x float> @llvm.mips.fsqrt.w(
- v2f64_r = __builtin_msa_fsqrt_d(v4f32_a); // CHECK: call <2 x double> @llvm.mips.fsqrt.d(
+ v4f32_r = __msa_fsqrt_w(v4f32_a); // CHECK: call <4 x float> @llvm.mips.fsqrt.w(
+ v2f64_r = __msa_fsqrt_d(v2f64_a); // CHECK: call <2 x double> @llvm.mips.fsqrt.d(
- v4f32_r = __builtin_msa_fsub_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fsub.w(
- v2f64_r = __builtin_msa_fsub_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fsub.d(
+ v4f32_r = __msa_fsub_w(v4f32_a, v4f32_b); // CHECK: call <4 x float> @llvm.mips.fsub.w(
+ v2f64_r = __msa_fsub_d(v2f64_a, v2f64_b); // CHECK: call <2 x double> @llvm.mips.fsub.d(
- v4i32_r = __builtin_msa_fsueq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsueq.w(
- v2i64_r = __builtin_msa_fsueq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsueq.d(
+ v4i32_r = __msa_fsueq_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsueq.w(
+ v2i64_r = __msa_fsueq_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsueq.d(
- v4i32_r = __builtin_msa_fsule_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsule.w(
- v2i64_r = __builtin_msa_fsule_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsule.d(
+ v4i32_r = __msa_fsule_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsule.w(
+ v2i64_r = __msa_fsule_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsule.d(
- v4i32_r = __builtin_msa_fsult_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsult.w(
- v2i64_r = __builtin_msa_fsult_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsult.d(
+ v4i32_r = __msa_fsult_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsult.w(
+ v2i64_r = __msa_fsult_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsult.d(
- v4i32_r = __builtin_msa_fsun_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsun.w(
- v2i64_r = __builtin_msa_fsun_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsun.d(
+ v4i32_r = __msa_fsun_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsun.w(
+ v2i64_r = __msa_fsun_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsun.d(
- v4i32_r = __builtin_msa_fsune_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsune.w(
- v2i64_r = __builtin_msa_fsune_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsune.d(
+ v4i32_r = __msa_fsune_w(v4f32_a, v4f32_b); // CHECK: call <4 x i32> @llvm.mips.fsune.w(
+ v2i64_r = __msa_fsune_d(v2f64_a, v2f64_b); // CHECK: call <2 x i64> @llvm.mips.fsune.d(
- v4i32_r = __builtin_msa_ftint_s_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftint.s.w(
- v2i64_r = __builtin_msa_ftint_s_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftint.s.d(
+ v4i32_r = __msa_ftint_s_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftint.s.w(
+ v2i64_r = __msa_ftint_s_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftint.s.d(
- v4i32_r = __builtin_msa_ftint_u_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftint.u.w(
- v2i64_r = __builtin_msa_ftint_u_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftint.u.d(
+ v4i32_r = __msa_ftint_u_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftint.u.w(
+ v2i64_r = __msa_ftint_u_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftint.u.d(
- v8i16_r = __builtin_msa_ftq_h(v4f32_a, v4f32_b); // CHECK: call <8 x i16> @llvm.mips.ftq.h(
- v4i32_r = __builtin_msa_ftq_w(v2f64_a, v2f64_b); // CHECK: call <4 x i32> @llvm.mips.ftq.w(
+ v8i16_r = __msa_ftq_h(v4f32_a, v4f32_b); // CHECK: call <8 x i16> @llvm.mips.ftq.h(
+ v4i32_r = __msa_ftq_w(v2f64_a, v2f64_b); // CHECK: call <4 x i32> @llvm.mips.ftq.w(
- v4i32_r = __builtin_msa_ftrunc_s_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftrunc.s.w(
- v2i64_r = __builtin_msa_ftrunc_s_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftrunc.s.d(
+ v4i32_r = __msa_ftrunc_s_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftrunc.s.w(
+ v2i64_r = __msa_ftrunc_s_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftrunc.s.d(
- v4i32_r = __builtin_msa_ftrunc_u_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftrunc.u.w(
- v2i64_r = __builtin_msa_ftrunc_u_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftrunc.u.d(
+ v4i32_r = __msa_ftrunc_u_w(v4f32_a); // CHECK: call <4 x i32> @llvm.mips.ftrunc.u.w(
+ v2i64_r = __msa_ftrunc_u_d(v2f64_a); // CHECK: call <2 x i64> @llvm.mips.ftrunc.u.d(
- v8i16_r = __builtin_msa_hadd_s_h(v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.hadd.s.h(
- v4i32_r = __builtin_msa_hadd_s_w(v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.hadd.s.w(
- v2i64_r = __builtin_msa_hadd_s_d(v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.hadd.s.d(
+ v8i16_r = __msa_hadd_s_h(v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.hadd.s.h(
+ v4i32_r = __msa_hadd_s_w(v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.hadd.s.w(
+ v2i64_r = __msa_hadd_s_d(v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.hadd.s.d(
- v8u16_r = __builtin_msa_hadd_u_h(v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.hadd.u.h(
- v4u32_r = __builtin_msa_hadd_u_w(v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.hadd.u.w(
- v2u64_r = __builtin_msa_hadd_u_d(v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.hadd.u.d(
+ v8u16_r = __msa_hadd_u_h(v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.hadd.u.h(
+ v4u32_r = __msa_hadd_u_w(v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.hadd.u.w(
+ v2u64_r = __msa_hadd_u_d(v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.hadd.u.d(
- v8i16_r = __builtin_msa_hsub_s_h(v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.hsub.s.h(
- v4i32_r = __builtin_msa_hsub_s_w(v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.hsub.s.w(
- v2i64_r = __builtin_msa_hsub_s_d(v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.hsub.s.d(
+ v8i16_r = __msa_hsub_s_h(v16i8_a, v16i8_b); // CHECK: call <8 x i16> @llvm.mips.hsub.s.h(
+ v4i32_r = __msa_hsub_s_w(v8i16_a, v8i16_b); // CHECK: call <4 x i32> @llvm.mips.hsub.s.w(
+ v2i64_r = __msa_hsub_s_d(v4i32_a, v4i32_b); // CHECK: call <2 x i64> @llvm.mips.hsub.s.d(
- v8u16_r = __builtin_msa_hsub_u_h(v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.hsub.u.h(
- v4u32_r = __builtin_msa_hsub_u_w(v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.hsub.u.w(
- v2u64_r = __builtin_msa_hsub_u_d(v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.hsub.u.d(
+ v8u16_r = __msa_hsub_u_h(v16u8_a, v16u8_b); // CHECK: call <8 x i16> @llvm.mips.hsub.u.h(
+ v4u32_r = __msa_hsub_u_w(v8u16_a, v8u16_b); // CHECK: call <4 x i32> @llvm.mips.hsub.u.w(
+ v2u64_r = __msa_hsub_u_d(v4u32_a, v4u32_b); // CHECK: call <2 x i64> @llvm.mips.hsub.u.d(
- v16i8_r = __builtin_msa_ilvev_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvev.b(
- v8i16_r = __builtin_msa_ilvev_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvev.h(
- v4i32_r = __builtin_msa_ilvev_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvev.w(
- v2i64_r = __builtin_msa_ilvev_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvev.d(
+ v16i8_r = __msa_ilvev_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvev.b(
+ v8i16_r = __msa_ilvev_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvev.h(
+ v4i32_r = __msa_ilvev_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvev.w(
+ v2i64_r = __msa_ilvev_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvev.d(
- v16i8_r = __builtin_msa_ilvl_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvl.b(
- v8i16_r = __builtin_msa_ilvl_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvl.h(
- v4i32_r = __builtin_msa_ilvl_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvl.w(
- v2i64_r = __builtin_msa_ilvl_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvl.d(
+ v16i8_r = __msa_ilvl_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvl.b(
+ v8i16_r = __msa_ilvl_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvl.h(
+ v4i32_r = __msa_ilvl_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvl.w(
+ v2i64_r = __msa_ilvl_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvl.d(
- v16i8_r = __builtin_msa_ilvod_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvod.b(
- v8i16_r = __builtin_msa_ilvod_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvod.h(
- v4i32_r = __builtin_msa_ilvod_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvod.w(
- v2i64_r = __builtin_msa_ilvod_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvod.d(
+ v16i8_r = __msa_ilvod_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvod.b(
+ v8i16_r = __msa_ilvod_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvod.h(
+ v4i32_r = __msa_ilvod_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvod.w(
+ v2i64_r = __msa_ilvod_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvod.d(
- v16i8_r = __builtin_msa_ilvr_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvr.b(
- v8i16_r = __builtin_msa_ilvr_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvr.h(
- v4i32_r = __builtin_msa_ilvr_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvr.w(
- v2i64_r = __builtin_msa_ilvr_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvr.d(
+ v16i8_r = __msa_ilvr_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.ilvr.b(
+ v8i16_r = __msa_ilvr_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.ilvr.h(
+ v4i32_r = __msa_ilvr_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.ilvr.w(
+ v2i64_r = __msa_ilvr_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.ilvr.d(
- v16i8_r = __builtin_msa_insert_b(v16i8_r, 1, 25); // CHECK: call <16 x i8> @llvm.mips.insert.b(
- v8i16_r = __builtin_msa_insert_h(v8i16_r, 1, 25); // CHECK: call <8 x i16> @llvm.mips.insert.h(
- v4i32_r = __builtin_msa_insert_w(v4i32_r, 1, 25); // CHECK: call <4 x i32> @llvm.mips.insert.w(
- v2i64_r = __builtin_msa_insert_d(v2i64_r, 1, 25); // CHECK: call <2 x i64> @llvm.mips.insert.d(
+ v16i8_r = __msa_insert_b(v16i8_r, 1, 25); // CHECK: call <16 x i8> @llvm.mips.insert.b(
+ v8i16_r = __msa_insert_h(v8i16_r, 1, 25); // CHECK: call <8 x i16> @llvm.mips.insert.h(
+ v4i32_r = __msa_insert_w(v4i32_r, 1, 25); // CHECK: call <4 x i32> @llvm.mips.insert.w(
+ v2i64_r = __msa_insert_d(v2i64_r, 1, 25); // CHECK: call <2 x i64> @llvm.mips.insert.d(
- v16i8_r = __builtin_msa_insve_b(v16i8_r, 1, v16i8_a); // CHECK: call <16 x i8> @llvm.mips.insve.b(
- v8i16_r = __builtin_msa_insve_h(v8i16_r, 1, v8i16_a); // CHECK: call <8 x i16> @llvm.mips.insve.h(
- v4i32_r = __builtin_msa_insve_w(v4i32_r, 1, v4i32_a); // CHECK: call <4 x i32> @llvm.mips.insve.w(
- v2i64_r = __builtin_msa_insve_d(v2i64_r, 1, v2i64_a); // CHECK: call <2 x i64> @llvm.mips.insve.d(
+ v16i8_r = __msa_insve_b(v16i8_r, 1, v16i8_a); // CHECK: call <16 x i8> @llvm.mips.insve.b(
+ v8i16_r = __msa_insve_h(v8i16_r, 1, v8i16_a); // CHECK: call <8 x i16> @llvm.mips.insve.h(
+ v4i32_r = __msa_insve_w(v4i32_r, 1, v4i32_a); // CHECK: call <4 x i32> @llvm.mips.insve.w(
+ v2i64_r = __msa_insve_d(v2i64_r, 1, v2i64_a); // CHECK: call <2 x i64> @llvm.mips.insve.d(
- v16i8_r = __builtin_msa_ld_b(&v16i8_a, 1); // CHECK: call <16 x i8> @llvm.mips.ld.b(
- v8i16_r = __builtin_msa_ld_h(&v8i16_a, 2); // CHECK: call <8 x i16> @llvm.mips.ld.h(
- v4i32_r = __builtin_msa_ld_w(&v4i32_a, 4); // CHECK: call <4 x i32> @llvm.mips.ld.w(
- v2i64_r = __builtin_msa_ld_d(&v2i64_a, 8); // CHECK: call <2 x i64> @llvm.mips.ld.d(
+ v16i8_r = __msa_ld_b(&v16i8_a, 16); // CHECK: call <16 x i8> @llvm.mips.ld.b(
+ v8i16_r = __msa_ld_h(&v8i16_a, 32); // CHECK: call <8 x i16> @llvm.mips.ld.h(
+ v4i32_r = __msa_ld_w(&v4i32_a, 48); // CHECK: call <4 x i32> @llvm.mips.ld.w(
+ v2i64_r = __msa_ld_d(&v2i64_a, 96); // CHECK: call <2 x i64> @llvm.mips.ld.d(
- v16i8_r = __builtin_msa_ldi_b(3); // CHECK: call <16 x i8> @llvm.mips.ldi.b(
- v8i16_r = __builtin_msa_ldi_h(3); // CHECK: call <8 x i16> @llvm.mips.ldi.h(
- v4i32_r = __builtin_msa_ldi_w(3); // CHECK: call <4 x i32> @llvm.mips.ldi.w(
- v2i64_r = __builtin_msa_ldi_d(3); // CHECK: call <2 x i64> @llvm.mips.ldi.d(
+ v16i8_r = __msa_ldi_b(3); // CHECK: call <16 x i8> @llvm.mips.ldi.b(
+ v8i16_r = __msa_ldi_h(3); // CHECK: call <8 x i16> @llvm.mips.ldi.h(
+ v4i32_r = __msa_ldi_w(3); // CHECK: call <4 x i32> @llvm.mips.ldi.w(
+ v2i64_r = __msa_ldi_d(3); // CHECK: call <2 x i64> @llvm.mips.ldi.d(
- v8i16_r = __builtin_msa_madd_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.madd.q.h(
- v4i32_r = __builtin_msa_madd_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.madd.q.w(
-
- v8i16_r = __builtin_msa_maddr_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.maddr.q.h(
- v4i32_r = __builtin_msa_maddr_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.maddr.q.w(
-
- v16i8_r = __builtin_msa_maddv_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.maddv.b(
- v8i16_r = __builtin_msa_maddv_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.maddv.h(
- v4i32_r = __builtin_msa_maddv_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.maddv.w(
- v2i64_r = __builtin_msa_maddv_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.maddv.d(
-
- v16i8_r = __builtin_msa_max_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.max.a.b(
- v8i16_r = __builtin_msa_max_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.max.a.h(
- v4i32_r = __builtin_msa_max_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.max.a.w(
- v2i64_r = __builtin_msa_max_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.max.a.d(
-
- v16i8_r = __builtin_msa_max_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.max.s.b(
- v8i16_r = __builtin_msa_max_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.max.s.h(
- v4i32_r = __builtin_msa_max_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.max.s.w(
- v2i64_r = __builtin_msa_max_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.max.s.d(
-
- v16u8_r = __builtin_msa_max_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.max.u.b(
- v8u16_r = __builtin_msa_max_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.max.u.h(
- v4u32_r = __builtin_msa_max_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.max.u.w(
- v2u64_r = __builtin_msa_max_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.max.u.d(
-
- v16i8_r = __builtin_msa_maxi_s_b(v16i8_a, 2); // CHECK: call <16 x i8> @llvm.mips.maxi.s.b(
- v8i16_r = __builtin_msa_maxi_s_h(v8i16_a, 2); // CHECK: call <8 x i16> @llvm.mips.maxi.s.h(
- v4i32_r = __builtin_msa_maxi_s_w(v4i32_a, 2); // CHECK: call <4 x i32> @llvm.mips.maxi.s.w(
- v2i64_r = __builtin_msa_maxi_s_d(v2i64_a, 2); // CHECK: call <2 x i64> @llvm.mips.maxi.s.d(
-
- v16u8_r = __builtin_msa_maxi_u_b(v16u8_a, 2); // CHECK: call <16 x i8> @llvm.mips.maxi.u.b(
- v8u16_r = __builtin_msa_maxi_u_h(v8u16_a, 2); // CHECK: call <8 x i16> @llvm.mips.maxi.u.h(
- v4u32_r = __builtin_msa_maxi_u_w(v4u32_a, 2); // CHECK: call <4 x i32> @llvm.mips.maxi.u.w(
- v2u64_r = __builtin_msa_maxi_u_d(v2u64_a, 2); // CHECK: call <2 x i64> @llvm.mips.maxi.u.d(
-
- v16i8_r = __builtin_msa_min_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.min.a.b(
- v8i16_r = __builtin_msa_min_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.min.a.h(
- v4i32_r = __builtin_msa_min_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.min.a.w(
- v2i64_r = __builtin_msa_min_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.min.a.d(
-
- v16i8_r = __builtin_msa_min_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.min.s.b(
- v8i16_r = __builtin_msa_min_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.min.s.h(
- v4i32_r = __builtin_msa_min_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.min.s.w(
- v2i64_r = __builtin_msa_min_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.min.s.d(
-
- v16u8_r = __builtin_msa_min_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.min.u.b(
- v8u16_r = __builtin_msa_min_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.min.u.h(
- v4u32_r = __builtin_msa_min_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.min.u.w(
- v2u64_r = __builtin_msa_min_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.min.u.d(
-
- v16i8_r = __builtin_msa_mini_s_b(v16i8_a, 2); // CHECK: call <16 x i8> @llvm.mips.mini.s.b(
- v8i16_r = __builtin_msa_mini_s_h(v8i16_a, 2); // CHECK: call <8 x i16> @llvm.mips.mini.s.h(
- v4i32_r = __builtin_msa_mini_s_w(v4i32_a, 2); // CHECK: call <4 x i32> @llvm.mips.mini.s.w(
- v2i64_r = __builtin_msa_mini_s_d(v2i64_a, 2); // CHECK: call <2 x i64> @llvm.mips.mini.s.d(
-
- v16u8_r = __builtin_msa_mini_u_b(v16u8_a, 2); // CHECK: call <16 x i8> @llvm.mips.mini.u.b(
- v8u16_r = __builtin_msa_mini_u_h(v8u16_a, 2); // CHECK: call <8 x i16> @llvm.mips.mini.u.h(
- v4u32_r = __builtin_msa_mini_u_w(v4u32_a, 2); // CHECK: call <4 x i32> @llvm.mips.mini.u.w(
- v2u64_r = __builtin_msa_mini_u_d(v2u64_a, 2); // CHECK: call <2 x i64> @llvm.mips.mini.u.d(
-
- v16i8_r = __builtin_msa_mod_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.mod.s.b(
- v8i16_r = __builtin_msa_mod_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mod.s.h(
- v4i32_r = __builtin_msa_mod_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mod.s.w(
- v2i64_r = __builtin_msa_mod_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.mod.s.d(
-
- v16u8_r = __builtin_msa_mod_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.mod.u.b(
- v8u16_r = __builtin_msa_mod_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.mod.u.h(
- v4u32_r = __builtin_msa_mod_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.mod.u.w(
- v2u64_r = __builtin_msa_mod_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.mod.u.d(
-
- v16i8_r = __builtin_msa_move_v(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.move.v(
-
- v8i16_r = __builtin_msa_msub_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.msub.q.h(
- v4i32_r = __builtin_msa_msub_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.msub.q.w(
-
- v8i16_r = __builtin_msa_msubr_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.msubr.q.h(
- v4i32_r = __builtin_msa_msubr_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.msubr.q.w(
-
- v16i8_r = __builtin_msa_msubv_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.msubv.b(
- v8i16_r = __builtin_msa_msubv_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.msubv.h(
- v4i32_r = __builtin_msa_msubv_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.msubv.w(
- v2i64_r = __builtin_msa_msubv_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.msubv.d(
-
- v8i16_r = __builtin_msa_mul_q_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mul.q.h(
- v4i32_r = __builtin_msa_mul_q_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mul.q.w(
-
- v8i16_r = __builtin_msa_mulr_q_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mulr.q.h(
- v4i32_r = __builtin_msa_mulr_q_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mulr.q.w(
-
- v16i8_r = __builtin_msa_mulv_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.mulv.b(
- v8i16_r = __builtin_msa_mulv_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mulv.h(
- v4i32_r = __builtin_msa_mulv_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mulv.w(
- v2i64_r = __builtin_msa_mulv_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.mulv.d(
-
- v16i8_r = __builtin_msa_nloc_b(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.nloc.b(
- v8i16_r = __builtin_msa_nloc_h(v8i16_a); // CHECK: call <8 x i16> @llvm.mips.nloc.h(
- v4i32_r = __builtin_msa_nloc_w(v4i32_a); // CHECK: call <4 x i32> @llvm.mips.nloc.w(
- v2i64_r = __builtin_msa_nloc_d(v2i64_a); // CHECK: call <2 x i64> @llvm.mips.nloc.d(
-
- v16i8_r = __builtin_msa_nlzc_b(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.nlzc.b(
- v8i16_r = __builtin_msa_nlzc_h(v8i16_a); // CHECK: call <8 x i16> @llvm.mips.nlzc.h(
- v4i32_r = __builtin_msa_nlzc_w(v4i32_a); // CHECK: call <4 x i32> @llvm.mips.nlzc.w(
- v2i64_r = __builtin_msa_nlzc_d(v2i64_a); // CHECK: call <2 x i64> @llvm.mips.nlzc.d(
-
- v16i8_r = __builtin_msa_nor_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
- v8i16_r = __builtin_msa_nor_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
- v4i32_r = __builtin_msa_nor_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
- v2i64_r = __builtin_msa_nor_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
-
- v16i8_r = __builtin_msa_nori_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
- v8i16_r = __builtin_msa_nori_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
- v4i32_r = __builtin_msa_nori_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
- v2i64_r = __builtin_msa_nori_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
-
- v16u8_r = __builtin_msa_nori_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
- v8u16_r = __builtin_msa_nori_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
- v4u32_r = __builtin_msa_nori_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
- v2u64_r = __builtin_msa_nori_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
-
- v16i8_r = __builtin_msa_or_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
- v8i16_r = __builtin_msa_or_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
- v4i32_r = __builtin_msa_or_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
- v2i64_r = __builtin_msa_or_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
-
- v16i8_r = __builtin_msa_ori_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
- v8i16_r = __builtin_msa_ori_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
- v4i32_r = __builtin_msa_ori_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
- v2i64_r = __builtin_msa_ori_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
-
- v16u8_r = __builtin_msa_ori_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
- v8u16_r = __builtin_msa_ori_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
- v4u32_r = __builtin_msa_ori_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
- v2u64_r = __builtin_msa_ori_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
-
- v16i8_r = __builtin_msa_pckev_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.pckev.b(
- v8i16_r = __builtin_msa_pckev_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.pckev.h(
- v4i32_r = __builtin_msa_pckev_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.pckev.w(
- v2i64_r = __builtin_msa_pckev_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.pckev.d(
-
- v16i8_r = __builtin_msa_pckod_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.pckod.b(
- v8i16_r = __builtin_msa_pckod_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.pckod.h(
- v4i32_r = __builtin_msa_pckod_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.pckod.w(
- v2i64_r = __builtin_msa_pckod_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.pckod.d(
-
- v16i8_r = __builtin_msa_pcnt_b(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.pcnt.b(
- v8i16_r = __builtin_msa_pcnt_h(v8i16_a); // CHECK: call <8 x i16> @llvm.mips.pcnt.h(
- v4i32_r = __builtin_msa_pcnt_w(v4i32_a); // CHECK: call <4 x i32> @llvm.mips.pcnt.w(
- v2i64_r = __builtin_msa_pcnt_d(v2i64_a); // CHECK: call <2 x i64> @llvm.mips.pcnt.d(
-
- v16i8_r = __builtin_msa_sat_s_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.sat.s.b(
- v8i16_r = __builtin_msa_sat_s_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sat.s.h(
- v4i32_r = __builtin_msa_sat_s_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sat.s.w(
- v2i64_r = __builtin_msa_sat_s_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.sat.s.d(
-
- v16i8_r = __builtin_msa_sat_u_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.sat.u.b(
- v8i16_r = __builtin_msa_sat_u_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sat.u.h(
- v4i32_r = __builtin_msa_sat_u_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sat.u.w(
- v2i64_r = __builtin_msa_sat_u_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.sat.u.d(
-
- v16i8_r = __builtin_msa_shf_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.shf.b(
- v8i16_r = __builtin_msa_shf_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.shf.h(
- v4i32_r = __builtin_msa_shf_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.shf.w(
-
- v16i8_r = __builtin_msa_sld_b(v16i8_r, v16i8_a, 10); // CHECK: call <16 x i8> @llvm.mips.sld.b(
- v8i16_r = __builtin_msa_sld_h(v8i16_r, v8i16_a, 10); // CHECK: call <8 x i16> @llvm.mips.sld.h(
- v4i32_r = __builtin_msa_sld_w(v4i32_r, v4i32_a, 10); // CHECK: call <4 x i32> @llvm.mips.sld.w(
- v2i64_r = __builtin_msa_sld_d(v2i64_r, v2i64_a, 10); // CHECK: call <2 x i64> @llvm.mips.sld.d(
-
- v16i8_r = __builtin_msa_sldi_b(v16i8_r, v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.sldi.b(
- v8i16_r = __builtin_msa_sldi_h(v8i16_r, v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sldi.h(
- v4i32_r = __builtin_msa_sldi_w(v4i32_r, v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sldi.w(
- v2i64_r = __builtin_msa_sldi_d(v2i64_r, v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.sldi.d(
-
- v16i8_r = __builtin_msa_sll_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.sll.b(
- v8i16_r = __builtin_msa_sll_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.sll.h(
- v4i32_r = __builtin_msa_sll_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.sll.w(
- v2i64_r = __builtin_msa_sll_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.sll.d(
-
- v16i8_r = __builtin_msa_slli_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.slli.b(
- v8i16_r = __builtin_msa_slli_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.slli.h(
- v4i32_r = __builtin_msa_slli_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.slli.w(
- v2i64_r = __builtin_msa_slli_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.slli.d(
-
- v16i8_r = __builtin_msa_splat_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.splat.b(
- v8i16_r = __builtin_msa_splat_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.splat.h(
- v4i32_r = __builtin_msa_splat_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.splat.w(
- v2i64_r = __builtin_msa_splat_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.splat.d(
-
- v16i8_r = __builtin_msa_splati_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.splati.b(
- v8i16_r = __builtin_msa_splati_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.splati.h(
- v4i32_r = __builtin_msa_splati_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.splati.w(
- v2i64_r = __builtin_msa_splati_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.splati.d(
-
- v16i8_r = __builtin_msa_sra_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.sra.b(
- v8i16_r = __builtin_msa_sra_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.sra.h(
- v4i32_r = __builtin_msa_sra_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.sra.w(
- v2i64_r = __builtin_msa_sra_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.sra.d(
-
- v16i8_r = __builtin_msa_srai_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srai.b(
- v8i16_r = __builtin_msa_srai_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srai.h(
- v4i32_r = __builtin_msa_srai_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srai.w(
- v2i64_r = __builtin_msa_srai_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srai.d(
-
- v16i8_r = __builtin_msa_srar_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.srar.b(
- v8i16_r = __builtin_msa_srar_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.srar.h(
- v4i32_r = __builtin_msa_srar_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.srar.w(
- v2i64_r = __builtin_msa_srar_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.srar.d(
-
- v16i8_r = __builtin_msa_srari_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srari.b(
- v8i16_r = __builtin_msa_srari_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srari.h(
- v4i32_r = __builtin_msa_srari_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srari.w(
- v2i64_r = __builtin_msa_srari_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srari.d(
-
- v16i8_r = __builtin_msa_srl_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.srl.b(
- v8i16_r = __builtin_msa_srl_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.srl.h(
- v4i32_r = __builtin_msa_srl_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.srl.w(
- v2i64_r = __builtin_msa_srl_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.srl.d(
-
- v16i8_r = __builtin_msa_srli_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srli.b(
- v8i16_r = __builtin_msa_srli_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srli.h(
- v4i32_r = __builtin_msa_srli_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srli.w(
- v2i64_r = __builtin_msa_srli_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srli.d(
-
- v16i8_r = __builtin_msa_srlr_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.srlr.b(
- v8i16_r = __builtin_msa_srlr_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.srlr.h(
- v4i32_r = __builtin_msa_srlr_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.srlr.w(
- v2i64_r = __builtin_msa_srlr_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.srlr.d(
-
- v16i8_r = __builtin_msa_srlri_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srlri.b(
- v8i16_r = __builtin_msa_srlri_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srlri.h(
- v4i32_r = __builtin_msa_srlri_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srlri.w(
- v2i64_r = __builtin_msa_srlri_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srlri.d(
-
- __builtin_msa_st_b(v16i8_b, &v16i8_a, 1); // CHECK: call void @llvm.mips.st.b(
- __builtin_msa_st_h(v8i16_b, &v8i16_a, 2); // CHECK: call void @llvm.mips.st.h(
- __builtin_msa_st_w(v4i32_b, &v4i32_a, 4); // CHECK: call void @llvm.mips.st.w(
- __builtin_msa_st_d(v2i64_b, &v2i64_a, 8); // CHECK: call void @llvm.mips.st.d(
-
- v16i8_r = __builtin_msa_subs_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.subs.s.b(
- v8i16_r = __builtin_msa_subs_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.subs.s.h(
- v4i32_r = __builtin_msa_subs_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.subs.s.w(
- v2i64_r = __builtin_msa_subs_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.subs.s.d(
-
- v16u8_r = __builtin_msa_subs_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.subs.u.b(
- v8u16_r = __builtin_msa_subs_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.subs.u.h(
- v4u32_r = __builtin_msa_subs_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.subs.u.w(
- v2u64_r = __builtin_msa_subs_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.subs.u.d(
-
- v16u8_r = __builtin_msa_subsus_u_b(v16u8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.subsus.u.b(
- v8u16_r = __builtin_msa_subsus_u_h(v8u16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.subsus.u.h(
- v4u32_r = __builtin_msa_subsus_u_w(v4u32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.subsus.u.w(
- v2u64_r = __builtin_msa_subsus_u_d(v2u64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.subsus.u.d(
-
- v16i8_r = __builtin_msa_subsuu_s_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.subsuu.s.b(
- v8i16_r = __builtin_msa_subsuu_s_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.subsuu.s.h(
- v4i32_r = __builtin_msa_subsuu_s_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.subsuu.s.w(
- v2i64_r = __builtin_msa_subsuu_s_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.subsuu.s.d(
-
- v16i8_r = __builtin_msa_subv_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.subv.b(
- v8i16_r = __builtin_msa_subv_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.subv.h(
- v4i32_r = __builtin_msa_subv_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.subv.w(
- v2i64_r = __builtin_msa_subv_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.subv.d(
-
- v16i8_r = __builtin_msa_subvi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.subvi.b(
- v8i16_r = __builtin_msa_subvi_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.subvi.h(
- v4i32_r = __builtin_msa_subvi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.subvi.w(
- v2i64_r = __builtin_msa_subvi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.subvi.d(
-
- v16i8_r = __builtin_msa_vshf_b(v16i8_a, v16i8_b, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.vshf.b(
- v8i16_r = __builtin_msa_vshf_h(v8i16_a, v8i16_b, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.vshf.h(
- v4i32_r = __builtin_msa_vshf_w(v4i32_a, v4i32_b, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.vshf.w(
- v2i64_r = __builtin_msa_vshf_d(v2i64_a, v2i64_b, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.vshf.d(
-
- v16i8_r = __builtin_msa_xor_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
- v8i16_r = __builtin_msa_xor_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
- v4i32_r = __builtin_msa_xor_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
- v2i64_r = __builtin_msa_xor_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
-
- v16i8_r = __builtin_msa_xori_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
- v8i16_r = __builtin_msa_xori_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
- v4i32_r = __builtin_msa_xori_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
- v2i64_r = __builtin_msa_xori_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
-
- v16u8_r = __builtin_msa_xori_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
- v8u16_r = __builtin_msa_xori_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
- v4u32_r = __builtin_msa_xori_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
- v2u64_r = __builtin_msa_xori_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v8i16_r = __msa_madd_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.madd.q.h(
+ v4i32_r = __msa_madd_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.madd.q.w(
+
+ v8i16_r = __msa_maddr_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.maddr.q.h(
+ v4i32_r = __msa_maddr_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.maddr.q.w(
+
+ v16i8_r = __msa_maddv_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.maddv.b(
+ v8i16_r = __msa_maddv_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.maddv.h(
+ v4i32_r = __msa_maddv_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.maddv.w(
+ v2i64_r = __msa_maddv_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.maddv.d(
+
+ v16i8_r = __msa_max_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.max.a.b(
+ v8i16_r = __msa_max_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.max.a.h(
+ v4i32_r = __msa_max_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.max.a.w(
+ v2i64_r = __msa_max_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.max.a.d(
+
+ v16i8_r = __msa_max_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.max.s.b(
+ v8i16_r = __msa_max_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.max.s.h(
+ v4i32_r = __msa_max_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.max.s.w(
+ v2i64_r = __msa_max_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.max.s.d(
+
+ v16u8_r = __msa_max_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.max.u.b(
+ v8u16_r = __msa_max_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.max.u.h(
+ v4u32_r = __msa_max_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.max.u.w(
+ v2u64_r = __msa_max_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.max.u.d(
+
+ v16i8_r = __msa_maxi_s_b(v16i8_a, 2); // CHECK: call <16 x i8> @llvm.mips.maxi.s.b(
+ v8i16_r = __msa_maxi_s_h(v8i16_a, 2); // CHECK: call <8 x i16> @llvm.mips.maxi.s.h(
+ v4i32_r = __msa_maxi_s_w(v4i32_a, 2); // CHECK: call <4 x i32> @llvm.mips.maxi.s.w(
+ v2i64_r = __msa_maxi_s_d(v2i64_a, 2); // CHECK: call <2 x i64> @llvm.mips.maxi.s.d(
+
+ v16u8_r = __msa_maxi_u_b(v16u8_a, 2); // CHECK: call <16 x i8> @llvm.mips.maxi.u.b(
+ v8u16_r = __msa_maxi_u_h(v8u16_a, 2); // CHECK: call <8 x i16> @llvm.mips.maxi.u.h(
+ v4u32_r = __msa_maxi_u_w(v4u32_a, 2); // CHECK: call <4 x i32> @llvm.mips.maxi.u.w(
+ v2u64_r = __msa_maxi_u_d(v2u64_a, 2); // CHECK: call <2 x i64> @llvm.mips.maxi.u.d(
+
+ v16i8_r = __msa_min_a_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.min.a.b(
+ v8i16_r = __msa_min_a_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.min.a.h(
+ v4i32_r = __msa_min_a_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.min.a.w(
+ v2i64_r = __msa_min_a_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.min.a.d(
+
+ v16i8_r = __msa_min_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.min.s.b(
+ v8i16_r = __msa_min_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.min.s.h(
+ v4i32_r = __msa_min_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.min.s.w(
+ v2i64_r = __msa_min_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.min.s.d(
+
+ v16u8_r = __msa_min_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.min.u.b(
+ v8u16_r = __msa_min_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.min.u.h(
+ v4u32_r = __msa_min_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.min.u.w(
+ v2u64_r = __msa_min_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.min.u.d(
+
+ v16i8_r = __msa_mini_s_b(v16i8_a, 2); // CHECK: call <16 x i8> @llvm.mips.mini.s.b(
+ v8i16_r = __msa_mini_s_h(v8i16_a, 2); // CHECK: call <8 x i16> @llvm.mips.mini.s.h(
+ v4i32_r = __msa_mini_s_w(v4i32_a, 2); // CHECK: call <4 x i32> @llvm.mips.mini.s.w(
+ v2i64_r = __msa_mini_s_d(v2i64_a, 2); // CHECK: call <2 x i64> @llvm.mips.mini.s.d(
+
+ v16u8_r = __msa_mini_u_b(v16u8_a, 2); // CHECK: call <16 x i8> @llvm.mips.mini.u.b(
+ v8u16_r = __msa_mini_u_h(v8u16_a, 2); // CHECK: call <8 x i16> @llvm.mips.mini.u.h(
+ v4u32_r = __msa_mini_u_w(v4u32_a, 2); // CHECK: call <4 x i32> @llvm.mips.mini.u.w(
+ v2u64_r = __msa_mini_u_d(v2u64_a, 2); // CHECK: call <2 x i64> @llvm.mips.mini.u.d(
+
+ v16i8_r = __msa_mod_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.mod.s.b(
+ v8i16_r = __msa_mod_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mod.s.h(
+ v4i32_r = __msa_mod_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mod.s.w(
+ v2i64_r = __msa_mod_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.mod.s.d(
+
+ v16u8_r = __msa_mod_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.mod.u.b(
+ v8u16_r = __msa_mod_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.mod.u.h(
+ v4u32_r = __msa_mod_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.mod.u.w(
+ v2u64_r = __msa_mod_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.mod.u.d(
+
+ v16i8_r = __msa_move_v(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.move.v(
+
+ v8i16_r = __msa_msub_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.msub.q.h(
+ v4i32_r = __msa_msub_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.msub.q.w(
+
+ v8i16_r = __msa_msubr_q_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.msubr.q.h(
+ v4i32_r = __msa_msubr_q_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.msubr.q.w(
+
+ v16i8_r = __msa_msubv_b(v16i8_r, v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.msubv.b(
+ v8i16_r = __msa_msubv_h(v8i16_r, v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.msubv.h(
+ v4i32_r = __msa_msubv_w(v4i32_r, v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.msubv.w(
+ v2i64_r = __msa_msubv_d(v2i64_r, v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.msubv.d(
+
+ v8i16_r = __msa_mul_q_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mul.q.h(
+ v4i32_r = __msa_mul_q_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mul.q.w(
+
+ v8i16_r = __msa_mulr_q_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mulr.q.h(
+ v4i32_r = __msa_mulr_q_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mulr.q.w(
+
+ v16i8_r = __msa_mulv_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.mulv.b(
+ v8i16_r = __msa_mulv_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.mulv.h(
+ v4i32_r = __msa_mulv_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.mulv.w(
+ v2i64_r = __msa_mulv_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.mulv.d(
+
+ v16i8_r = __msa_nloc_b(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.nloc.b(
+ v8i16_r = __msa_nloc_h(v8i16_a); // CHECK: call <8 x i16> @llvm.mips.nloc.h(
+ v4i32_r = __msa_nloc_w(v4i32_a); // CHECK: call <4 x i32> @llvm.mips.nloc.w(
+ v2i64_r = __msa_nloc_d(v2i64_a); // CHECK: call <2 x i64> @llvm.mips.nloc.d(
+
+ v16i8_r = __msa_nlzc_b(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.nlzc.b(
+ v8i16_r = __msa_nlzc_h(v8i16_a); // CHECK: call <8 x i16> @llvm.mips.nlzc.h(
+ v4i32_r = __msa_nlzc_w(v4i32_a); // CHECK: call <4 x i32> @llvm.mips.nlzc.w(
+ v2i64_r = __msa_nlzc_d(v2i64_a); // CHECK: call <2 x i64> @llvm.mips.nlzc.d(
+
+ v16i8_r = __msa_nor_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
+ v8i16_r = __msa_nor_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
+ v4i32_r = __msa_nor_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
+ v2i64_r = __msa_nor_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.nor.v(
+
+ v16i8_r = __msa_nori_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+ v8i16_r = __msa_nori_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+ v4i32_r = __msa_nori_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+ v2i64_r = __msa_nori_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+
+ v16u8_r = __msa_nori_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+ v8u16_r = __msa_nori_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+ v4u32_r = __msa_nori_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+ v2u64_r = __msa_nori_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.nori.b(
+
+ v16i8_r = __msa_or_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
+ v8i16_r = __msa_or_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
+ v4i32_r = __msa_or_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
+ v2i64_r = __msa_or_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.or.v(
+
+ v16i8_r = __msa_ori_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+ v8i16_r = __msa_ori_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+ v4i32_r = __msa_ori_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+ v2i64_r = __msa_ori_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+
+ v16u8_r = __msa_ori_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+ v8u16_r = __msa_ori_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+ v4u32_r = __msa_ori_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+ v2u64_r = __msa_ori_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.ori.b(
+
+ v16i8_r = __msa_pckev_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.pckev.b(
+ v8i16_r = __msa_pckev_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.pckev.h(
+ v4i32_r = __msa_pckev_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.pckev.w(
+ v2i64_r = __msa_pckev_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.pckev.d(
+
+ v16i8_r = __msa_pckod_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.pckod.b(
+ v8i16_r = __msa_pckod_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.pckod.h(
+ v4i32_r = __msa_pckod_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.pckod.w(
+ v2i64_r = __msa_pckod_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.pckod.d(
+
+ v16i8_r = __msa_pcnt_b(v16i8_a); // CHECK: call <16 x i8> @llvm.mips.pcnt.b(
+ v8i16_r = __msa_pcnt_h(v8i16_a); // CHECK: call <8 x i16> @llvm.mips.pcnt.h(
+ v4i32_r = __msa_pcnt_w(v4i32_a); // CHECK: call <4 x i32> @llvm.mips.pcnt.w(
+ v2i64_r = __msa_pcnt_d(v2i64_a); // CHECK: call <2 x i64> @llvm.mips.pcnt.d(
+
+ v16i8_r = __msa_sat_s_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.sat.s.b(
+ v8i16_r = __msa_sat_s_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sat.s.h(
+ v4i32_r = __msa_sat_s_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sat.s.w(
+ v2i64_r = __msa_sat_s_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.sat.s.d(
+
+ v16i8_r = __msa_sat_u_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.sat.u.b(
+ v8i16_r = __msa_sat_u_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sat.u.h(
+ v4i32_r = __msa_sat_u_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sat.u.w(
+ v2i64_r = __msa_sat_u_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.sat.u.d(
+
+ v16i8_r = __msa_shf_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.shf.b(
+ v8i16_r = __msa_shf_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.shf.h(
+ v4i32_r = __msa_shf_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.shf.w(
+
+ v16i8_r = __msa_sld_b(v16i8_r, v16i8_a, 7); // CHECK: call <16 x i8> @llvm.mips.sld.b(
+ v8i16_r = __msa_sld_h(v8i16_r, v8i16_a, 5); // CHECK: call <8 x i16> @llvm.mips.sld.h(
+ v4i32_r = __msa_sld_w(v4i32_r, v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.sld.w(
+ v2i64_r = __msa_sld_d(v2i64_r, v2i64_a, 1); // CHECK: call <2 x i64> @llvm.mips.sld.d(
+
+ v16i8_r = __msa_sldi_b(v16i8_r, v16i8_a, 7); // CHECK: call <16 x i8> @llvm.mips.sldi.b(
+ v8i16_r = __msa_sldi_h(v8i16_r, v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.sldi.h(
+ v4i32_r = __msa_sldi_w(v4i32_r, v4i32_a, 2); // CHECK: call <4 x i32> @llvm.mips.sldi.w(
+ v2i64_r = __msa_sldi_d(v2i64_r, v2i64_a, 1); // CHECK: call <2 x i64> @llvm.mips.sldi.d(
+
+ v16i8_r = __msa_sll_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.sll.b(
+ v8i16_r = __msa_sll_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.sll.h(
+ v4i32_r = __msa_sll_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.sll.w(
+ v2i64_r = __msa_sll_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.sll.d(
+
+ v16i8_r = __msa_slli_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.slli.b(
+ v8i16_r = __msa_slli_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.slli.h(
+ v4i32_r = __msa_slli_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.slli.w(
+ v2i64_r = __msa_slli_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.slli.d(
+
+ v16i8_r = __msa_splat_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.splat.b(
+ v8i16_r = __msa_splat_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.splat.h(
+ v4i32_r = __msa_splat_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.splat.w(
+ v2i64_r = __msa_splat_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.splat.d(
+
+ v16i8_r = __msa_splati_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.splati.b(
+ v8i16_r = __msa_splati_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.splati.h(
+ v4i32_r = __msa_splati_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.splati.w(
+ v2i64_r = __msa_splati_d(v2i64_a, 1); // CHECK: call <2 x i64> @llvm.mips.splati.d(
+
+ v16i8_r = __msa_sra_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.sra.b(
+ v8i16_r = __msa_sra_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.sra.h(
+ v4i32_r = __msa_sra_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.sra.w(
+ v2i64_r = __msa_sra_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.sra.d(
+
+ v16i8_r = __msa_srai_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srai.b(
+ v8i16_r = __msa_srai_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srai.h(
+ v4i32_r = __msa_srai_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srai.w(
+ v2i64_r = __msa_srai_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srai.d(
+
+ v16i8_r = __msa_srar_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.srar.b(
+ v8i16_r = __msa_srar_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.srar.h(
+ v4i32_r = __msa_srar_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.srar.w(
+ v2i64_r = __msa_srar_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.srar.d(
+
+ v16i8_r = __msa_srari_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srari.b(
+ v8i16_r = __msa_srari_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srari.h(
+ v4i32_r = __msa_srari_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srari.w(
+ v2i64_r = __msa_srari_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srari.d(
+
+ v16i8_r = __msa_srl_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.srl.b(
+ v8i16_r = __msa_srl_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.srl.h(
+ v4i32_r = __msa_srl_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.srl.w(
+ v2i64_r = __msa_srl_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.srl.d(
+
+ v16i8_r = __msa_srli_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srli.b(
+ v8i16_r = __msa_srli_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srli.h(
+ v4i32_r = __msa_srli_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srli.w(
+ v2i64_r = __msa_srli_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srli.d(
+
+ v16i8_r = __msa_srlr_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.srlr.b(
+ v8i16_r = __msa_srlr_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.srlr.h(
+ v4i32_r = __msa_srlr_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.srlr.w(
+ v2i64_r = __msa_srlr_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.srlr.d(
+
+ v16i8_r = __msa_srlri_b(v16i8_a, 3); // CHECK: call <16 x i8> @llvm.mips.srlri.b(
+ v8i16_r = __msa_srlri_h(v8i16_a, 3); // CHECK: call <8 x i16> @llvm.mips.srlri.h(
+ v4i32_r = __msa_srlri_w(v4i32_a, 3); // CHECK: call <4 x i32> @llvm.mips.srlri.w(
+ v2i64_r = __msa_srlri_d(v2i64_a, 3); // CHECK: call <2 x i64> @llvm.mips.srlri.d(
+
+ __msa_st_b(v16i8_b, &v16i8_a, 16); // CHECK: call void @llvm.mips.st.b(
+ __msa_st_h(v8i16_b, &v8i16_a, 32); // CHECK: call void @llvm.mips.st.h(
+ __msa_st_w(v4i32_b, &v4i32_a, 48); // CHECK: call void @llvm.mips.st.w(
+ __msa_st_d(v2i64_b, &v2i64_a, 96); // CHECK: call void @llvm.mips.st.d(
+
+ v16i8_r = __msa_subs_s_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.subs.s.b(
+ v8i16_r = __msa_subs_s_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.subs.s.h(
+ v4i32_r = __msa_subs_s_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.subs.s.w(
+ v2i64_r = __msa_subs_s_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.subs.s.d(
+
+ v16u8_r = __msa_subs_u_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.subs.u.b(
+ v8u16_r = __msa_subs_u_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.subs.u.h(
+ v4u32_r = __msa_subs_u_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.subs.u.w(
+ v2u64_r = __msa_subs_u_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.subs.u.d(
+
+ v16u8_r = __msa_subsus_u_b(v16u8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.subsus.u.b(
+ v8u16_r = __msa_subsus_u_h(v8u16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.subsus.u.h(
+ v4u32_r = __msa_subsus_u_w(v4u32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.subsus.u.w(
+ v2u64_r = __msa_subsus_u_d(v2u64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.subsus.u.d(
+
+ v16i8_r = __msa_subsuu_s_b(v16u8_a, v16u8_b); // CHECK: call <16 x i8> @llvm.mips.subsuu.s.b(
+ v8i16_r = __msa_subsuu_s_h(v8u16_a, v8u16_b); // CHECK: call <8 x i16> @llvm.mips.subsuu.s.h(
+ v4i32_r = __msa_subsuu_s_w(v4u32_a, v4u32_b); // CHECK: call <4 x i32> @llvm.mips.subsuu.s.w(
+ v2i64_r = __msa_subsuu_s_d(v2u64_a, v2u64_b); // CHECK: call <2 x i64> @llvm.mips.subsuu.s.d(
+
+ v16i8_r = __msa_subv_b(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.subv.b(
+ v8i16_r = __msa_subv_h(v8i16_a, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.subv.h(
+ v4i32_r = __msa_subv_w(v4i32_a, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.subv.w(
+ v2i64_r = __msa_subv_d(v2i64_a, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.subv.d(
+
+ v16i8_r = __msa_subvi_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.subvi.b(
+ v8i16_r = __msa_subvi_h(v8i16_a, 25); // CHECK: call <8 x i16> @llvm.mips.subvi.h(
+ v4i32_r = __msa_subvi_w(v4i32_a, 25); // CHECK: call <4 x i32> @llvm.mips.subvi.w(
+ v2i64_r = __msa_subvi_d(v2i64_a, 25); // CHECK: call <2 x i64> @llvm.mips.subvi.d(
+
+ v16i8_r = __msa_vshf_b(v16i8_a, v16i8_b, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.vshf.b(
+ v8i16_r = __msa_vshf_h(v8i16_a, v8i16_b, v8i16_b); // CHECK: call <8 x i16> @llvm.mips.vshf.h(
+ v4i32_r = __msa_vshf_w(v4i32_a, v4i32_b, v4i32_b); // CHECK: call <4 x i32> @llvm.mips.vshf.w(
+ v2i64_r = __msa_vshf_d(v2i64_a, v2i64_b, v2i64_b); // CHECK: call <2 x i64> @llvm.mips.vshf.d(
+
+ v16i8_r = __msa_xor_v(v16i8_a, v16i8_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
+ v8i16_r = __msa_xor_v(v8i16_a, v8i16_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
+ v4i32_r = __msa_xor_v(v4i32_a, v4i32_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
+ v2i64_r = __msa_xor_v(v2i64_a, v2i64_b); // CHECK: call <16 x i8> @llvm.mips.xor.v(
+
+ v16i8_r = __msa_xori_b(v16i8_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v8i16_r = __msa_xori_b(v8i16_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v4i32_r = __msa_xori_b(v4i32_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v2i64_r = __msa_xori_b(v2i64_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+
+ v16u8_r = __msa_xori_b(v16u8_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v8u16_r = __msa_xori_b(v8u16_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v4u32_r = __msa_xori_b(v4u32_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
+ v2u64_r = __msa_xori_b(v2u64_a, 25); // CHECK: call <16 x i8> @llvm.mips.xori.b(
}
diff --git a/test/OpenMP/cancel_codegen.cpp b/test/OpenMP/cancel_codegen.cpp
index 059a8d3901f4..a09214c9319c 100644
--- a/test/OpenMP/cancel_codegen.cpp
+++ b/test/OpenMP/cancel_codegen.cpp
@@ -92,7 +92,7 @@ for (int i = 0; i < argc; ++i) {
}
// CHECK: call void (%ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(
int r = 0;
-#pragma omp parallel for reduction(+:r)
+#pragma omp parallel for reduction(+: r)
for (int i = 0; i < argc; ++i) {
#pragma omp cancel for
r += i;
diff --git a/test/SemaCXX/cxx11-crashes.cpp b/test/SemaCXX/cxx11-crashes.cpp
index 97c959454c35..7c455eecd5fa 100644
--- a/test/SemaCXX/cxx11-crashes.cpp
+++ b/test/SemaCXX/cxx11-crashes.cpp
@@ -91,3 +91,15 @@ void test(int some_number) { // expected-note {{'some_number' declared here}}
Foo(lambda);
}
}
+
+namespace pr29091 {
+ struct X{ X(const X &x); };
+ struct Y: X { using X::X; };
+ bool foo() { return __has_nothrow_constructor(Y); }
+ bool bar() { return __has_nothrow_copy(Y); }
+
+ struct A { template <typename T> A(); };
+ struct B : A { using A::A; };
+ bool baz() { return __has_nothrow_constructor(B); }
+ bool qux() { return __has_nothrow_copy(B); }
+}