aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/adc.ll8
-rw-r--r--test/CodeGen/AArch64/addsub-shifted.ll10
-rw-r--r--test/CodeGen/AArch64/addsub.ll10
-rw-r--r--test/CodeGen/AArch64/addsub_ext.ll8
-rw-r--r--test/CodeGen/AArch64/adrp-relocation.ll27
-rw-r--r--test/CodeGen/AArch64/alloca.ll43
-rw-r--r--test/CodeGen/AArch64/analyze-branch.ll20
-rw-r--r--test/CodeGen/AArch64/atomic-ops-not-barriers.ll2
-rw-r--r--test/CodeGen/AArch64/atomic-ops.ll116
-rw-r--r--test/CodeGen/AArch64/basic-pic.ll28
-rw-r--r--test/CodeGen/AArch64/bitfield-insert-0.ll2
-rw-r--r--test/CodeGen/AArch64/bitfield-insert.ll20
-rw-r--r--test/CodeGen/AArch64/bitfield.ll22
-rw-r--r--test/CodeGen/AArch64/blockaddress.ll2
-rw-r--r--test/CodeGen/AArch64/breg.ll2
-rw-r--r--test/CodeGen/AArch64/callee-save.ll2
-rw-r--r--test/CodeGen/AArch64/code-model-large-abs.ll10
-rw-r--r--test/CodeGen/AArch64/compare-branch.ll4
-rw-r--r--test/CodeGen/AArch64/complex-copy-noneon.ll21
-rw-r--r--test/CodeGen/AArch64/cond-sel.ll25
-rw-r--r--test/CodeGen/AArch64/directcond.ll16
-rw-r--r--test/CodeGen/AArch64/dp-3source.ll36
-rw-r--r--test/CodeGen/AArch64/dp1.ll28
-rw-r--r--test/CodeGen/AArch64/dp2.ll30
-rw-r--r--test/CodeGen/AArch64/elf-extern.ll17
-rw-r--r--test/CodeGen/AArch64/extern-weak.ll2
-rw-r--r--test/CodeGen/AArch64/extract.ll10
-rw-r--r--test/CodeGen/AArch64/fastcc-reserved.ll4
-rw-r--r--test/CodeGen/AArch64/fastcc.ll12
-rw-r--r--test/CodeGen/AArch64/fcmp.ll4
-rw-r--r--test/CodeGen/AArch64/fcvt-fixed.ll8
-rw-r--r--test/CodeGen/AArch64/fcvt-int.ll56
-rw-r--r--test/CodeGen/AArch64/flags-multiuse.ll2
-rw-r--r--test/CodeGen/AArch64/floatdp_1source.ll6
-rw-r--r--test/CodeGen/AArch64/floatdp_2source.ll4
-rw-r--r--test/CodeGen/AArch64/fp-cond-sel.ll2
-rw-r--r--test/CodeGen/AArch64/fp-dp3.ll59
-rw-r--r--test/CodeGen/AArch64/fp128-folding.ll4
-rw-r--r--test/CodeGen/AArch64/fp128.ll47
-rw-r--r--test/CodeGen/AArch64/fpimm.ll14
-rw-r--r--test/CodeGen/AArch64/frameaddr.ll20
-rw-r--r--test/CodeGen/AArch64/func-argpassing.ll52
-rw-r--r--test/CodeGen/AArch64/func-calls.ll30
-rw-r--r--test/CodeGen/AArch64/global-alignment.ll10
-rw-r--r--test/CodeGen/AArch64/got-abuse.ll2
-rw-r--r--test/CodeGen/AArch64/i128-align.ll6
-rw-r--r--test/CodeGen/AArch64/illegal-float-ops.ll46
-rw-r--r--test/CodeGen/AArch64/init-array.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badI.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badK.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badK2.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints-badL.ll2
-rw-r--r--test/CodeGen/AArch64/inline-asm-constraints.ll48
-rw-r--r--test/CodeGen/AArch64/inline-asm-modifiers.ll64
-rw-r--r--test/CodeGen/AArch64/jump-table.ll17
-rw-r--r--test/CodeGen/AArch64/large-consts.ll13
-rw-r--r--test/CodeGen/AArch64/large-frame.ll11
-rw-r--r--test/CodeGen/AArch64/ldst-regoffset.ll33
-rw-r--r--test/CodeGen/AArch64/ldst-unscaledimm.ll15
-rw-r--r--test/CodeGen/AArch64/ldst-unsignedimm.ll15
-rw-r--r--test/CodeGen/AArch64/lit.local.cfg2
-rw-r--r--test/CodeGen/AArch64/literal_pools.ll20
-rw-r--r--test/CodeGen/AArch64/local_vars.ll4
-rw-r--r--test/CodeGen/AArch64/logical-imm.ll8
-rw-r--r--test/CodeGen/AArch64/logical_shifted_reg.ll6
-rw-r--r--test/CodeGen/AArch64/logical_shifted_reg.s208
-rw-r--r--test/CodeGen/AArch64/movw-consts.ll36
-rw-r--r--test/CodeGen/AArch64/movw-shift-encoding.ll14
-rw-r--r--test/CodeGen/AArch64/neon-2velem-high.ll331
-rw-r--r--test/CodeGen/AArch64/neon-2velem.ll2550
-rw-r--r--test/CodeGen/AArch64/neon-3vdiff.ll1806
-rw-r--r--test/CodeGen/AArch64/neon-aba-abd.ll236
-rw-r--r--test/CodeGen/AArch64/neon-across.ll476
-rw-r--r--test/CodeGen/AArch64/neon-add-pairwise.ll92
-rw-r--r--test/CodeGen/AArch64/neon-add-sub.ll237
-rw-r--r--test/CodeGen/AArch64/neon-bitcast.ll574
-rw-r--r--test/CodeGen/AArch64/neon-bitwise-instructions.ll594
-rw-r--r--test/CodeGen/AArch64/neon-bsl.ll222
-rw-r--r--test/CodeGen/AArch64/neon-compare-instructions.ll1926
-rw-r--r--test/CodeGen/AArch64/neon-copy.ll615
-rw-r--r--test/CodeGen/AArch64/neon-crypto.ll149
-rw-r--r--test/CodeGen/AArch64/neon-diagnostics.ll24
-rw-r--r--test/CodeGen/AArch64/neon-extract.ll190
-rw-r--r--test/CodeGen/AArch64/neon-facge-facgt.ll56
-rw-r--r--test/CodeGen/AArch64/neon-fma.ll112
-rw-r--r--test/CodeGen/AArch64/neon-frsqrt-frecp.ll54
-rw-r--r--test/CodeGen/AArch64/neon-halving-add-sub.ll207
-rw-r--r--test/CodeGen/AArch64/neon-max-min-pairwise.ll310
-rw-r--r--test/CodeGen/AArch64/neon-max-min.ll310
-rw-r--r--test/CodeGen/AArch64/neon-misc-scalar.ll60
-rw-r--r--test/CodeGen/AArch64/neon-misc.ll1799
-rw-r--r--test/CodeGen/AArch64/neon-mla-mls.ll88
-rw-r--r--test/CodeGen/AArch64/neon-mov.ll217
-rw-r--r--test/CodeGen/AArch64/neon-mul-div.ll181
-rw-r--r--test/CodeGen/AArch64/neon-perm.ll1693
-rw-r--r--test/CodeGen/AArch64/neon-rounding-halving-add.ll105
-rw-r--r--test/CodeGen/AArch64/neon-rounding-shift.ll121
-rw-r--r--test/CodeGen/AArch64/neon-saturating-add-sub.ll241
-rw-r--r--test/CodeGen/AArch64/neon-saturating-rounding-shift.ll121
-rw-r--r--test/CodeGen/AArch64/neon-saturating-shift.ll121
-rw-r--r--test/CodeGen/AArch64/neon-scalar-abs.ll61
-rw-r--r--test/CodeGen/AArch64/neon-scalar-add-sub.ll50
-rw-r--r--test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll108
-rw-r--r--test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll124
-rw-r--r--test/CodeGen/AArch64/neon-scalar-compare.ll343
-rw-r--r--test/CodeGen/AArch64/neon-scalar-copy.ll88
-rw-r--r--test/CodeGen/AArch64/neon-scalar-cvt.ll137
-rw-r--r--test/CodeGen/AArch64/neon-scalar-extract-narrow.ll104
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fabd.ll26
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fcvt.ll255
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fp-compare.ll328
-rw-r--r--test/CodeGen/AArch64/neon-scalar-mul.ll143
-rw-r--r--test/CodeGen/AArch64/neon-scalar-neg.ll61
-rw-r--r--test/CodeGen/AArch64/neon-scalar-recip.ll116
-rw-r--r--test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll247
-rw-r--r--test/CodeGen/AArch64/neon-scalar-rounding-shift.ll39
-rw-r--r--test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll242
-rw-r--r--test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll94
-rw-r--r--test/CodeGen/AArch64/neon-scalar-saturating-shift.ll88
-rw-r--r--test/CodeGen/AArch64/neon-scalar-shift-imm.ll531
-rw-r--r--test/CodeGen/AArch64/neon-scalar-shift.ll38
-rw-r--r--test/CodeGen/AArch64/neon-shift-left-long.ll193
-rw-r--r--test/CodeGen/AArch64/neon-shift.ll171
-rw-r--r--test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll2314
-rw-r--r--test/CodeGen/AArch64/neon-simd-ldst-one.ll2113
-rw-r--r--test/CodeGen/AArch64/neon-simd-ldst.ll164
-rw-r--r--test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll354
-rw-r--r--test/CodeGen/AArch64/neon-simd-post-ldst-one.ll319
-rw-r--r--test/CodeGen/AArch64/neon-simd-shift.ll1556
-rw-r--r--test/CodeGen/AArch64/neon-simd-tbl.ll828
-rw-r--r--test/CodeGen/AArch64/neon-simd-vget.ll225
-rw-r--r--test/CodeGen/AArch64/pic-eh-stubs.ll2
-rw-r--r--test/CodeGen/AArch64/regress-bitcast-formals.ll2
-rw-r--r--test/CodeGen/AArch64/regress-fp128-livein.ll17
-rw-r--r--test/CodeGen/AArch64/regress-tail-livereg.ll4
-rw-r--r--test/CodeGen/AArch64/regress-tblgen-chains.ll2
-rw-r--r--test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll17
-rw-r--r--test/CodeGen/AArch64/returnaddr.ll21
-rw-r--r--test/CodeGen/AArch64/setcc-takes-i32.ll4
-rw-r--r--test/CodeGen/AArch64/sibling-call.ll20
-rw-r--r--test/CodeGen/AArch64/sincos-expansion.ll8
-rw-r--r--test/CodeGen/AArch64/tail-call.ll14
-rw-r--r--test/CodeGen/AArch64/tls-dynamic-together.ll2
-rw-r--r--test/CodeGen/AArch64/tls-dynamics.ll46
-rw-r--r--test/CodeGen/AArch64/tls-execs.ll12
-rw-r--r--test/CodeGen/AArch64/tst-br.ll2
-rw-r--r--test/CodeGen/AArch64/variadic.ll78
-rw-r--r--test/CodeGen/AArch64/zero-reg.ll6
-rw-r--r--test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll2
-rw-r--r--test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll2
-rw-r--r--test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll2
-rw-r--r--test/CodeGen/ARM/2009-10-16-Scope.ll12
-rw-r--r--test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll2
-rw-r--r--test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll22
-rw-r--r--test/CodeGen/ARM/2010-05-18-PostIndexBug.ll4
-rw-r--r--test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll19
-rw-r--r--test/CodeGen/ARM/2010-08-04-StackVariable.ll50
-rw-r--r--test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll290
-rw-r--r--test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll48
-rw-r--r--test/CodeGen/ARM/2010-11-29-PrologueBug.ll4
-rw-r--r--test/CodeGen/ARM/2010-11-30-reloc-movt.ll27
-rw-r--r--test/CodeGen/ARM/2010-12-07-PEIBug.ll2
-rw-r--r--test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll29
-rw-r--r--test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll13
-rw-r--r--test/CodeGen/ARM/2011-03-23-PeepholeBug.ll2
-rw-r--r--test/CodeGen/ARM/2011-04-07-schediv.ll2
-rw-r--r--test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll9
-rw-r--r--test/CodeGen/ARM/2011-04-26-SchedTweak.ll2
-rw-r--r--test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll27
-rw-r--r--test/CodeGen/ARM/2011-08-25-ldmia_ret.ll2
-rw-r--r--test/CodeGen/ARM/2011-10-26-memset-inline.ll4
-rw-r--r--test/CodeGen/ARM/2011-10-26-memset-with-neon.ll4
-rw-r--r--test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll4
-rw-r--r--test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll2
-rw-r--r--test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll2
-rw-r--r--test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll22
-rw-r--r--test/CodeGen/ARM/2012-03-26-FoldImmBug.ll2
-rw-r--r--test/CodeGen/ARM/2012-05-04-vmov.ll5
-rw-r--r--test/CodeGen/ARM/2012-08-09-neon-extload.ll12
-rw-r--r--test/CodeGen/ARM/2012-08-23-legalize-vmull.ll18
-rw-r--r--test/CodeGen/ARM/2012-08-30-select.ll11
-rw-r--r--test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll2
-rw-r--r--test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll2
-rw-r--r--test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll21
-rw-r--r--test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll2
-rw-r--r--test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll2
-rw-r--r--test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll4
-rw-r--r--test/CodeGen/ARM/2012-11-14-subs_carry.ll2
-rw-r--r--test/CodeGen/ARM/2013-01-21-PR14992.ll4
-rw-r--r--test/CodeGen/ARM/2013-02-27-expand-vfma.ll6
-rw-r--r--test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll24
-rw-r--r--test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll4
-rw-r--r--test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll2
-rw-r--r--test/CodeGen/ARM/2013-05-05-IfConvertBug.ll88
-rw-r--r--test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll64
-rw-r--r--test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll31
-rw-r--r--test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll25
-rw-r--r--test/CodeGen/ARM/2013-05-13-DAGCombiner-undef-mask.ll10
-rw-r--r--test/CodeGen/ARM/2013-05-31-char-shift-crash.ll21
-rw-r--r--test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll30
-rw-r--r--test/CodeGen/ARM/2013-07-29-vector-or-combine.ll32
-rw-r--r--test/CodeGen/ARM/2013-10-11-select-stalls.ll16
-rw-r--r--test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll16
-rw-r--r--test/CodeGen/ARM/a15-SD-dep.ll24
-rw-r--r--test/CodeGen/ARM/a15-mla.ll26
-rw-r--r--test/CodeGen/ARM/a15-partial-update.ll4
-rw-r--r--test/CodeGen/ARM/addrspacecast.ll7
-rw-r--r--test/CodeGen/ARM/aliases.ll27
-rw-r--r--test/CodeGen/ARM/alloc-no-stack-realign.ll50
-rw-r--r--test/CodeGen/ARM/arguments.ll12
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll23
-rw-r--r--test/CodeGen/ARM/arm-frameaddr.ll4
-rw-r--r--test/CodeGen/ARM/arm-modifier.ll8
-rw-r--r--test/CodeGen/ARM/arm-returnaddr.ll4
-rw-r--r--test/CodeGen/ARM/atomic-64bit.ll170
-rw-r--r--test/CodeGen/ARM/atomic-cmp.ll4
-rw-r--r--test/CodeGen/ARM/atomic-load-store.ll29
-rw-r--r--test/CodeGen/ARM/atomic-op.ll25
-rw-r--r--test/CodeGen/ARM/atomic-ops-v8.ll1344
-rw-r--r--test/CodeGen/ARM/atomicrmw_minmax.ll4
-rw-r--r--test/CodeGen/ARM/avoid-cpsr-rmw.ll6
-rw-r--r--test/CodeGen/ARM/bfc.ll6
-rw-r--r--test/CodeGen/ARM/bfi.ll4
-rw-r--r--test/CodeGen/ARM/bswap-inline-asm.ll2
-rw-r--r--test/CodeGen/ARM/build-attributes-encoding.s85
-rw-r--r--test/CodeGen/ARM/byval_load_align.ll27
-rw-r--r--test/CodeGen/ARM/call-noret-minsize.ll8
-rw-r--r--test/CodeGen/ARM/call-noret.ll8
-rw-r--r--test/CodeGen/ARM/call-tc.ll47
-rw-r--r--test/CodeGen/ARM/call_nolink.ll4
-rw-r--r--test/CodeGen/ARM/carry.ll10
-rw-r--r--test/CodeGen/ARM/coalesce-dbgvalue.ll111
-rw-r--r--test/CodeGen/ARM/code-placement.ll4
-rw-r--r--test/CodeGen/ARM/constantfp.ll68
-rw-r--r--test/CodeGen/ARM/copy-paired-reg.ll17
-rw-r--r--test/CodeGen/ARM/crash-greedy-v6.ll20
-rw-r--r--test/CodeGen/ARM/crash-shufflevector.ll2
-rw-r--r--test/CodeGen/ARM/ctz.ll2
-rw-r--r--test/CodeGen/ARM/dagcombine-anyexttozeroext.ll2
-rw-r--r--test/CodeGen/ARM/dagcombine-concatvector.ll4
-rw-r--r--test/CodeGen/ARM/darwin-eabi.ll24
-rw-r--r--test/CodeGen/ARM/data-in-code-annotations.ll4
-rw-r--r--test/CodeGen/ARM/debug-info-arg.ll12
-rw-r--r--test/CodeGen/ARM/debug-info-blocks.ll68
-rw-r--r--test/CodeGen/ARM/debug-info-branch-folding.ll21
-rw-r--r--test/CodeGen/ARM/debug-info-d16-reg.ll35
-rw-r--r--test/CodeGen/ARM/debug-info-qreg.ll34
-rw-r--r--test/CodeGen/ARM/debug-info-s16-reg.ll33
-rw-r--r--test/CodeGen/ARM/debug-info-sreg2.ll17
-rw-r--r--test/CodeGen/ARM/div.ll23
-rw-r--r--test/CodeGen/ARM/divmod-eabi.ll202
-rw-r--r--test/CodeGen/ARM/divmod.ll20
-rw-r--r--test/CodeGen/ARM/domain-conv-vmovs.ll14
-rw-r--r--test/CodeGen/ARM/eh-dispcont.ll8
-rw-r--r--test/CodeGen/ARM/ehabi-filters.ll2
-rw-r--r--test/CodeGen/ARM/ehabi-mc-cantunwind.ll14
-rw-r--r--test/CodeGen/ARM/ehabi-mc-compact-pr0.ll49
-rw-r--r--test/CodeGen/ARM/ehabi-mc-compact-pr1.ll62
-rw-r--r--test/CodeGen/ARM/ehabi-mc-section-group.ll88
-rw-r--r--test/CodeGen/ARM/ehabi-mc-section.ll71
-rw-r--r--test/CodeGen/ARM/ehabi-mc-sh_link.ll58
-rw-r--r--test/CodeGen/ARM/ehabi-mc.ll71
-rw-r--r--test/CodeGen/ARM/ehabi.ll298
-rw-r--r--test/CodeGen/ARM/emit-big-cst.ll18
-rw-r--r--test/CodeGen/ARM/extload-knownzero.ll2
-rw-r--r--test/CodeGen/ARM/fabs-neon.ll4
-rw-r--r--test/CodeGen/ARM/fabss.ll10
-rw-r--r--test/CodeGen/ARM/fadds.ll12
-rw-r--r--test/CodeGen/ARM/fast-isel-GEP-coalesce.ll5
-rw-r--r--test/CodeGen/ARM/fast-isel-align.ll144
-rw-r--r--test/CodeGen/ARM/fast-isel-binary.ll5
-rw-r--r--test/CodeGen/ARM/fast-isel-br-const.ll17
-rw-r--r--test/CodeGen/ARM/fast-isel-br-phi.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-call-multi-reg-return.ll5
-rw-r--r--test/CodeGen/ARM/fast-isel-call.ll178
-rw-r--r--test/CodeGen/ARM/fast-isel-cmp-imm.ll5
-rw-r--r--test/CodeGen/ARM/fast-isel-conversion.ll13
-rw-r--r--test/CodeGen/ARM/fast-isel-crash.ll3
-rw-r--r--test/CodeGen/ARM/fast-isel-crash2.ll3
-rw-r--r--test/CodeGen/ARM/fast-isel-deadcode.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-ext.ll137
-rw-r--r--test/CodeGen/ARM/fast-isel-fold.ll5
-rw-r--r--test/CodeGen/ARM/fast-isel-frameaddr.ll30
-rw-r--r--test/CodeGen/ARM/fast-isel-icmp.ll13
-rw-r--r--test/CodeGen/ARM/fast-isel-indirectbr.ll1
-rw-r--r--test/CodeGen/ARM/fast-isel-intrinsic.ll100
-rw-r--r--test/CodeGen/ARM/fast-isel-ldr-str-arm.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll43
-rw-r--r--test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll3
-rw-r--r--test/CodeGen/ARM/fast-isel-load-store-verify.ll70
-rw-r--r--test/CodeGen/ARM/fast-isel-mvn.ll5
-rw-r--r--test/CodeGen/ARM/fast-isel-pic.ll26
-rw-r--r--test/CodeGen/ARM/fast-isel-pred.ll1
-rw-r--r--test/CodeGen/ARM/fast-isel-redefinition.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel-ret.ll7
-rw-r--r--test/CodeGen/ARM/fast-isel-select.ll17
-rw-r--r--test/CodeGen/ARM/fast-isel-shifter.ll3
-rw-r--r--test/CodeGen/ARM/fast-isel-static.ll8
-rw-r--r--test/CodeGen/ARM/fast-isel-vararg.ll47
-rw-r--r--test/CodeGen/ARM/fast-isel.ll172
-rw-r--r--test/CodeGen/ARM/fast-tail-call.ll16
-rw-r--r--test/CodeGen/ARM/fastisel-gep-promote-before-add.ll18
-rw-r--r--test/CodeGen/ARM/fcopysign.ll12
-rw-r--r--test/CodeGen/ARM/fdivs.ll10
-rw-r--r--test/CodeGen/ARM/fmacs.ll30
-rw-r--r--test/CodeGen/ARM/fmscs.ll12
-rw-r--r--test/CodeGen/ARM/fmuls.ll12
-rw-r--r--test/CodeGen/ARM/fnegs.ll24
-rw-r--r--test/CodeGen/ARM/fnmacs.ll12
-rw-r--r--test/CodeGen/ARM/fnmscs.ll32
-rw-r--r--test/CodeGen/ARM/fold-stack-adjust.ll164
-rw-r--r--test/CodeGen/ARM/fp.ll16
-rw-r--r--test/CodeGen/ARM/fp16.ll4
-rw-r--r--test/CodeGen/ARM/fp_convert.ll16
-rw-r--r--test/CodeGen/ARM/fparith.ll24
-rw-r--r--test/CodeGen/ARM/fpcmp-opt.ll11
-rw-r--r--test/CodeGen/ARM/fpcmp.ll14
-rw-r--r--test/CodeGen/ARM/fpcmp_ueq.ll2
-rw-r--r--test/CodeGen/ARM/fpconsts.ll8
-rw-r--r--test/CodeGen/ARM/fpconv.ll40
-rw-r--r--test/CodeGen/ARM/fpmem.ll10
-rw-r--r--test/CodeGen/ARM/fptoint.ll2
-rw-r--r--test/CodeGen/ARM/fusedMAC.ll24
-rw-r--r--test/CodeGen/ARM/globals.ll2
-rw-r--r--test/CodeGen/ARM/hidden-vis-2.ll2
-rw-r--r--test/CodeGen/ARM/hidden-vis.ll8
-rw-r--r--test/CodeGen/ARM/ifconv-kills.ll30
-rw-r--r--test/CodeGen/ARM/ifconv-regmask.ll35
-rw-r--r--test/CodeGen/ARM/ifcvt1.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt10.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt11.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt12.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt2.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt3.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt4.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt5.ll4
-rw-r--r--test/CodeGen/ARM/indirect-reg-input.ll2
-rw-r--r--test/CodeGen/ARM/indirectbr-2.ll2
-rw-r--r--test/CodeGen/ARM/indirectbr-3.ll32
-rw-r--r--test/CodeGen/ARM/indirectbr.ll16
-rw-r--r--test/CodeGen/ARM/inlineasm-64bit.ll64
-rw-r--r--test/CodeGen/ARM/inlineasm4.ll4
-rw-r--r--test/CodeGen/ARM/interrupt-attr.ll130
-rw-r--r--test/CodeGen/ARM/intrinsics-crypto.ll57
-rw-r--r--test/CodeGen/ARM/intrinsics-v8.ll19
-rw-r--r--test/CodeGen/ARM/ldm.ll12
-rw-r--r--test/CodeGen/ARM/ldr.ll14
-rw-r--r--test/CodeGen/ARM/ldr_post.ll4
-rw-r--r--test/CodeGen/ARM/ldr_pre.ll4
-rw-r--r--test/CodeGen/ARM/ldrd.ll32
-rw-r--r--test/CodeGen/ARM/ldst-f32-2-i32.ll2
-rw-r--r--test/CodeGen/ARM/ldstrex.ll139
-rw-r--r--test/CodeGen/ARM/ldstrexd.ll33
-rw-r--r--test/CodeGen/ARM/lit.local.cfg2
-rw-r--r--test/CodeGen/ARM/load-address-masked.ll14
-rw-r--r--test/CodeGen/ARM/load-global.ll2
-rw-r--r--test/CodeGen/ARM/load_i1_select.ll2
-rw-r--r--test/CodeGen/ARM/long.ll22
-rw-r--r--test/CodeGen/ARM/longMAC.ll8
-rw-r--r--test/CodeGen/ARM/long_shift.ll8
-rw-r--r--test/CodeGen/ARM/lsr-icmp-imm.ll2
-rw-r--r--test/CodeGen/ARM/lsr-unfolded-offset.ll3
-rw-r--r--test/CodeGen/ARM/machine-cse-cmp.ll6
-rw-r--r--test/CodeGen/ARM/machine-licm.ll8
-rw-r--r--test/CodeGen/ARM/memcpy-inline.ll14
-rw-r--r--test/CodeGen/ARM/memset-inline.ll4
-rw-r--r--test/CodeGen/ARM/misched-copy-arm.ll51
-rw-r--r--test/CodeGen/ARM/mls.ll8
-rw-r--r--test/CodeGen/ARM/movt.ll4
-rw-r--r--test/CodeGen/ARM/mul_const.ll20
-rw-r--r--test/CodeGen/ARM/mulhi.ll18
-rw-r--r--test/CodeGen/ARM/mvn.ll2
-rw-r--r--test/CodeGen/ARM/neon-spfp.ll60
-rw-r--r--test/CodeGen/ARM/neon_minmax.ll20
-rw-r--r--test/CodeGen/ARM/neon_spill.ll11
-rw-r--r--test/CodeGen/ARM/neon_vabs.ll20
-rw-r--r--test/CodeGen/ARM/no-fpu.ll33
-rw-r--r--test/CodeGen/ARM/noreturn.ll50
-rw-r--r--test/CodeGen/ARM/optselect-regclass.ll23
-rw-r--r--test/CodeGen/ARM/pack.ll25
-rw-r--r--test/CodeGen/ARM/peephole-bitcast.ll2
-rw-r--r--test/CodeGen/ARM/pic.ll23
-rw-r--r--test/CodeGen/ARM/popcnt.ll36
-rw-r--r--test/CodeGen/ARM/prefetch-thumb.ll22
-rw-r--r--test/CodeGen/ARM/prefetch.ll22
-rw-r--r--test/CodeGen/ARM/private.ll2
-rw-r--r--test/CodeGen/ARM/readcyclecounter.ll24
-rw-r--r--test/CodeGen/ARM/reg_sequence.ll25
-rw-r--r--test/CodeGen/ARM/ret_sret_vector.ll2
-rw-r--r--test/CodeGen/ARM/returned-ext.ll28
-rw-r--r--test/CodeGen/ARM/returned-trunc-tail-calls.ll111
-rw-r--r--test/CodeGen/ARM/rev.ll4
-rw-r--r--test/CodeGen/ARM/sbfx.ll10
-rw-r--r--test/CodeGen/ARM/section-name.ll25
-rw-r--r--test/CodeGen/ARM/select-imm.ll68
-rw-r--r--test/CodeGen/ARM/select-undef.ll7
-rw-r--r--test/CodeGen/ARM/select.ll28
-rw-r--r--test/CodeGen/ARM/select_xform.ll48
-rw-r--r--test/CodeGen/ARM/setcc-sentinals.ll14
-rw-r--r--test/CodeGen/ARM/shifter_operand.ll16
-rw-r--r--test/CodeGen/ARM/sincos.ll38
-rw-r--r--test/CodeGen/ARM/spill-q.ll2
-rw-r--r--test/CodeGen/ARM/stack-protector-bmovpcb_call.ll32
-rw-r--r--test/CodeGen/ARM/str_post.ll4
-rw-r--r--test/CodeGen/ARM/struct-byval-frame-index.ll219
-rw-r--r--test/CodeGen/ARM/struct_byval.ll53
-rw-r--r--test/CodeGen/ARM/struct_byval_arm_t1_t2.ll1523
-rw-r--r--test/CodeGen/ARM/sub-cmp-peephole.ll70
-rw-r--r--test/CodeGen/ARM/swift-atomics.ll45
-rw-r--r--test/CodeGen/ARM/swift-vldm.ll29
-rw-r--r--test/CodeGen/ARM/tail-dup.ll2
-rw-r--r--test/CodeGen/ARM/tail-opts.ll2
-rw-r--r--test/CodeGen/ARM/test-sharedidx.ll2
-rw-r--r--test/CodeGen/ARM/this-return.ll24
-rw-r--r--test/CodeGen/ARM/thumb1-varalloc.ll2
-rw-r--r--test/CodeGen/ARM/thumb2-it-block.ll5
-rw-r--r--test/CodeGen/ARM/tls-models.ll32
-rw-r--r--test/CodeGen/ARM/tls2.ll8
-rw-r--r--test/CodeGen/ARM/trap.ll12
-rw-r--r--test/CodeGen/ARM/twoaddrinstr.ll2
-rw-r--r--test/CodeGen/ARM/umulo-32.ll8
-rw-r--r--test/CodeGen/ARM/unaligned_load_store.ll16
-rw-r--r--test/CodeGen/ARM/unaligned_load_store_vector.ll54
-rw-r--r--test/CodeGen/ARM/undef-sext.ll2
-rw-r--r--test/CodeGen/ARM/unwind-init.ll18
-rw-r--r--test/CodeGen/ARM/v1-constant-fold.ll2
-rw-r--r--test/CodeGen/ARM/va_arg.ll4
-rw-r--r--test/CodeGen/ARM/vaba.ll36
-rw-r--r--test/CodeGen/ARM/vabd.ll40
-rw-r--r--test/CodeGen/ARM/vabs.ll28
-rw-r--r--test/CodeGen/ARM/vadd.ll108
-rw-r--r--test/CodeGen/ARM/vbits.ll114
-rw-r--r--test/CodeGen/ARM/vbsl-constant.ll16
-rw-r--r--test/CodeGen/ARM/vbsl.ll40
-rw-r--r--test/CodeGen/ARM/vceq.ll18
-rw-r--r--test/CodeGen/ARM/vcge.ll38
-rw-r--r--test/CodeGen/ARM/vcgt.ll38
-rw-r--r--test/CodeGen/ARM/vcnt.ll28
-rw-r--r--test/CodeGen/ARM/vcvt-cost.ll20
-rw-r--r--test/CodeGen/ARM/vcvt-v8.ll145
-rw-r--r--test/CodeGen/ARM/vcvt.ll77
-rw-r--r--test/CodeGen/ARM/vdiv_combine.ll41
-rw-r--r--test/CodeGen/ARM/vdup.ll64
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll90
-rw-r--r--test/CodeGen/ARM/vector-extend-narrow.ll10
-rw-r--r--test/CodeGen/ARM/vext.ll59
-rw-r--r--test/CodeGen/ARM/vfcmp.ll22
-rw-r--r--test/CodeGen/ARM/vfp.ll24
-rw-r--r--test/CodeGen/ARM/vget_lane.ll34
-rw-r--r--test/CodeGen/ARM/vhadd.ll48
-rw-r--r--test/CodeGen/ARM/vhsub.ll24
-rw-r--r--test/CodeGen/ARM/vicmp.ll20
-rw-r--r--test/CodeGen/ARM/vld1.ll26
-rw-r--r--test/CodeGen/ARM/vld2.ll22
-rw-r--r--test/CodeGen/ARM/vld3.ll22
-rw-r--r--test/CodeGen/ARM/vld4.ll22
-rw-r--r--test/CodeGen/ARM/vlddup.ll28
-rw-r--r--test/CodeGen/ARM/vldlane.ll68
-rw-r--r--test/CodeGen/ARM/vldm-liveness.ll40
-rw-r--r--test/CodeGen/ARM/vldm-sched-a9.ll71
-rw-r--r--test/CodeGen/ARM/vminmax.ll56
-rw-r--r--test/CodeGen/ARM/vminmaxnm.ll88
-rw-r--r--test/CodeGen/ARM/vmla.ll28
-rw-r--r--test/CodeGen/ARM/vmls.ll28
-rw-r--r--test/CodeGen/ARM/vmov.ll104
-rw-r--r--test/CodeGen/ARM/vmul.ll89
-rw-r--r--test/CodeGen/ARM/vneg.ll28
-rw-r--r--test/CodeGen/ARM/vpadal.ll24
-rw-r--r--test/CodeGen/ARM/vpadd.ll32
-rw-r--r--test/CodeGen/ARM/vpminmax.ll28
-rw-r--r--test/CodeGen/ARM/vqadd.ll32
-rw-r--r--test/CodeGen/ARM/vqdmul.ll124
-rw-r--r--test/CodeGen/ARM/vqshl.ll112
-rw-r--r--test/CodeGen/ARM/vqshrn.ll36
-rw-r--r--test/CodeGen/ARM/vqsub.ll32
-rw-r--r--test/CodeGen/ARM/vrec.ll24
-rw-r--r--test/CodeGen/ARM/vrev.ll36
-rw-r--r--test/CodeGen/ARM/vsel.ll309
-rw-r--r--test/CodeGen/ARM/vselect_imax.ll58
-rw-r--r--test/CodeGen/ARM/vshift.ll96
-rw-r--r--test/CodeGen/ARM/vshiftins.ll32
-rw-r--r--test/CodeGen/ARM/vshl.ll144
-rw-r--r--test/CodeGen/ARM/vshll.ll18
-rw-r--r--test/CodeGen/ARM/vshrn.ll12
-rw-r--r--test/CodeGen/ARM/vsra.ll64
-rw-r--r--test/CodeGen/ARM/vst1.ll24
-rw-r--r--test/CodeGen/ARM/vst2.ll26
-rw-r--r--test/CodeGen/ARM/vst3.ll22
-rw-r--r--test/CodeGen/ARM/vst4.ll22
-rw-r--r--test/CodeGen/ARM/vstlane.ll72
-rw-r--r--test/CodeGen/ARM/vsub.ll96
-rw-r--r--test/CodeGen/ARM/vtbl.ll16
-rw-r--r--test/CodeGen/ARM/vtrn.ll20
-rw-r--r--test/CodeGen/ARM/vuzp.ll16
-rw-r--r--test/CodeGen/ARM/vzip.ll16
-rw-r--r--test/CodeGen/ARM/warn-stack.ll24
-rw-r--r--test/CodeGen/CPP/lit.local.cfg2
-rw-r--r--test/CodeGen/Generic/2009-03-17-LSR-APInt.ll56
-rw-r--r--test/CodeGen/Generic/crash.ll4
-rw-r--r--test/CodeGen/Generic/dbg_value.ll3
-rw-r--r--test/CodeGen/Generic/lit.local.cfg1
-rw-r--r--test/CodeGen/Hexagon/BranchPredict.ll76
-rw-r--r--test/CodeGen/Hexagon/adde.ll2
-rw-r--r--test/CodeGen/Hexagon/args.ll9
-rw-r--r--test/CodeGen/Hexagon/combine_ir.ll15
-rw-r--r--test/CodeGen/Hexagon/extload-combine.ll80
-rw-r--r--test/CodeGen/Hexagon/hwloop-dbg.ll27
-rw-r--r--test/CodeGen/Hexagon/i16_VarArg.ll2
-rw-r--r--test/CodeGen/Hexagon/i1_VarArg.ll2
-rw-r--r--test/CodeGen/Hexagon/i8_VarArg.ll2
-rw-r--r--test/CodeGen/Hexagon/indirect-br.ll2
-rw-r--r--test/CodeGen/Hexagon/lit.local.cfg2
-rw-r--r--test/CodeGen/Hexagon/memops.ll509
-rw-r--r--test/CodeGen/Hexagon/packetize_cond_inst.ll32
-rw-r--r--test/CodeGen/Hexagon/pred-gp.ll28
-rw-r--r--test/CodeGen/Hexagon/pred-instrs.ll30
-rw-r--r--test/CodeGen/Hexagon/split-const32-const64.ll26
-rw-r--r--test/CodeGen/Hexagon/sube.ll2
-rw-r--r--test/CodeGen/Hexagon/tail-call-trunc.ll28
-rw-r--r--test/CodeGen/Hexagon/tfr-to-combine.ll35
-rw-r--r--test/CodeGen/Hexagon/union-1.ll8
-rw-r--r--test/CodeGen/Hexagon/zextloadi1.ll2
-rw-r--r--test/CodeGen/Inputs/DbgValueOtherTargets.ll12
-rw-r--r--test/CodeGen/MBlaze/DbgValueOtherTargets.test1
-rw-r--r--test/CodeGen/MBlaze/brind.ll72
-rw-r--r--test/CodeGen/MBlaze/callind.ll80
-rw-r--r--test/CodeGen/MBlaze/cc.ll266
-rw-r--r--test/CodeGen/MBlaze/div.ll75
-rw-r--r--test/CodeGen/MBlaze/fpu.ll66
-rw-r--r--test/CodeGen/MBlaze/fsl.ll319
-rw-r--r--test/CodeGen/MBlaze/imm.ll70
-rw-r--r--test/CodeGen/MBlaze/intr.ll48
-rw-r--r--test/CodeGen/MBlaze/jumptable.ll79
-rw-r--r--test/CodeGen/MBlaze/lit.local.cfg6
-rw-r--r--test/CodeGen/MBlaze/loop.ll44
-rw-r--r--test/CodeGen/MBlaze/mul.ll51
-rw-r--r--test/CodeGen/MBlaze/mul64.ll23
-rw-r--r--test/CodeGen/MBlaze/select.ll15
-rw-r--r--test/CodeGen/MBlaze/shift.ll115
-rw-r--r--test/CodeGen/MBlaze/svol.ll80
-rw-r--r--test/CodeGen/MSP430/2009-11-05-8BitLibcalls.ll2
-rw-r--r--test/CodeGen/MSP430/AddrMode-bis-rx.ll14
-rw-r--r--test/CodeGen/MSP430/AddrMode-bis-xr.ll14
-rw-r--r--test/CodeGen/MSP430/AddrMode-mov-rx.ll14
-rw-r--r--test/CodeGen/MSP430/AddrMode-mov-xr.ll14
-rw-r--r--test/CodeGen/MSP430/Inst16mi.ll10
-rw-r--r--test/CodeGen/MSP430/Inst16mm.ll12
-rw-r--r--test/CodeGen/MSP430/Inst16mr.ll12
-rw-r--r--test/CodeGen/MSP430/Inst16ri.ll10
-rw-r--r--test/CodeGen/MSP430/Inst16rm.ll10
-rw-r--r--test/CodeGen/MSP430/Inst16rr.ll12
-rw-r--r--test/CodeGen/MSP430/Inst8mi.ll10
-rw-r--r--test/CodeGen/MSP430/Inst8mm.ll10
-rw-r--r--test/CodeGen/MSP430/Inst8mr.ll12
-rw-r--r--test/CodeGen/MSP430/Inst8ri.ll10
-rw-r--r--test/CodeGen/MSP430/Inst8rm.ll10
-rw-r--r--test/CodeGen/MSP430/Inst8rr.ll12
-rw-r--r--test/CodeGen/MSP430/bit.ll32
-rw-r--r--test/CodeGen/MSP430/byval.ll4
-rw-r--r--test/CodeGen/MSP430/cc_args.ll118
-rw-r--r--test/CodeGen/MSP430/cc_ret.ll61
-rw-r--r--test/CodeGen/MSP430/fp.ll2
-rw-r--r--test/CodeGen/MSP430/indirectbr2.ll2
-rw-r--r--test/CodeGen/MSP430/jumptable.ll54
-rw-r--r--test/CodeGen/MSP430/lit.local.cfg2
-rw-r--r--test/CodeGen/MSP430/postinc.ll10
-rw-r--r--test/CodeGen/MSP430/setcc.ll34
-rw-r--r--test/CodeGen/MSP430/shifts.ll10
-rw-r--r--test/CodeGen/MSP430/transient-stack-alignment.ll17
-rw-r--r--test/CodeGen/MSP430/vararg.ll6
-rw-r--r--test/CodeGen/Mips/2008-07-16-SignExtInReg.ll1
-rw-r--r--test/CodeGen/Mips/2008-08-01-AsmInline.ll18
-rw-r--r--test/CodeGen/Mips/2013-11-18-fp64-const0.ll31
-rw-r--r--test/CodeGen/Mips/align16.ll8
-rw-r--r--test/CodeGen/Mips/alloca16.ll4
-rw-r--r--test/CodeGen/Mips/atomic.ll495
-rw-r--r--test/CodeGen/Mips/atomicops.ll4
-rw-r--r--test/CodeGen/Mips/beqzc.ll20
-rw-r--r--test/CodeGen/Mips/beqzc1.ll24
-rw-r--r--test/CodeGen/Mips/biggot.ll4
-rw-r--r--test/CodeGen/Mips/blez_bgez.ll36
-rw-r--r--test/CodeGen/Mips/blockaddr.ll10
-rw-r--r--test/CodeGen/Mips/brdelayslot.ll31
-rw-r--r--test/CodeGen/Mips/brsize3.ll33
-rw-r--r--test/CodeGen/Mips/brsize3a.ll26
-rw-r--r--test/CodeGen/Mips/bswap.ll7
-rw-r--r--test/CodeGen/Mips/buildpairextractelementf64.ll23
-rw-r--r--test/CodeGen/Mips/check-noat.ll2
-rwxr-xr-xtest/CodeGen/Mips/cmov.ll44
-rw-r--r--test/CodeGen/Mips/cmplarge.ll38
-rw-r--r--test/CodeGen/Mips/const-mult.ll49
-rw-r--r--test/CodeGen/Mips/const1.ll35
-rw-r--r--test/CodeGen/Mips/const4a.ll180
-rw-r--r--test/CodeGen/Mips/const6.ll164
-rw-r--r--test/CodeGen/Mips/const6a.ll29
-rw-r--r--test/CodeGen/Mips/ctlz.ll27
-rw-r--r--test/CodeGen/Mips/disable-tail-merge.ll33
-rw-r--r--test/CodeGen/Mips/divrem.ll48
-rw-r--r--test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll80
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll46
-rw-r--r--test/CodeGen/Mips/dsp-vec-load-store.ll11
-rw-r--r--test/CodeGen/Mips/eh-return64.ll12
-rw-r--r--test/CodeGen/Mips/emit-big-cst.ll17
-rw-r--r--test/CodeGen/Mips/ex2.ll11
-rw-r--r--test/CodeGen/Mips/extins.ll9
-rw-r--r--test/CodeGen/Mips/f16abs.ll37
-rw-r--r--test/CodeGen/Mips/fcopysign-f32-f64.ll35
-rw-r--r--test/CodeGen/Mips/fixdfsf.ll18
-rw-r--r--test/CodeGen/Mips/fp16instrinsmc.ll391
-rw-r--r--test/CodeGen/Mips/fp16mix.ll92
-rw-r--r--test/CodeGen/Mips/fpneeded.ll2
-rw-r--r--test/CodeGen/Mips/fpnotneeded.ll2
-rw-r--r--test/CodeGen/Mips/fptr2.ll20
-rw-r--r--test/CodeGen/Mips/frame-address.ll7
-rw-r--r--test/CodeGen/Mips/helloworld.ll26
-rw-r--r--test/CodeGen/Mips/hf16call32.ll1030
-rw-r--r--test/CodeGen/Mips/hf16call32_body.ll294
-rw-r--r--test/CodeGen/Mips/hf1_body.ll21
-rw-r--r--test/CodeGen/Mips/hfptrcall.ll125
-rw-r--r--test/CodeGen/Mips/i32k.ll19
-rw-r--r--test/CodeGen/Mips/i64arg.ll32
-rw-r--r--test/CodeGen/Mips/inlineasm-operand-code.ll24
-rw-r--r--test/CodeGen/Mips/int-to-float-conversion.ll48
-rw-r--r--test/CodeGen/Mips/largefr1.ll41
-rw-r--r--test/CodeGen/Mips/largeimmprinting.ll4
-rw-r--r--test/CodeGen/Mips/lazy-binding.ll41
-rw-r--r--test/CodeGen/Mips/lit.local.cfg2
-rw-r--r--test/CodeGen/Mips/longbranch.ll8
-rw-r--r--test/CodeGen/Mips/mips16_32_1.ll2
-rw-r--r--test/CodeGen/Mips/mips16_32_10.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_3.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_4.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_5.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_6.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_7.ll6
-rw-r--r--test/CodeGen/Mips/mips16_32_8.ll8
-rw-r--r--test/CodeGen/Mips/mips16_32_9.ll6
-rw-r--r--test/CodeGen/Mips/mips16_fpret.ll76
-rw-r--r--test/CodeGen/Mips/mips16fpe.ll56
-rw-r--r--test/CodeGen/Mips/mips64-f128.ll146
-rw-r--r--test/CodeGen/Mips/mips64-libcall.ll4
-rw-r--r--test/CodeGen/Mips/mips64instrs.ll32
-rw-r--r--test/CodeGen/Mips/misha.ll4
-rw-r--r--test/CodeGen/Mips/mno-ldc1-sdc1.ll93
-rw-r--r--test/CodeGen/Mips/msa/2r.ll257
-rw-r--r--test/CodeGen/Mips/msa/2r_vector_scalar.ll87
-rw-r--r--test/CodeGen/Mips/msa/2rf.ll323
-rw-r--r--test/CodeGen/Mips/msa/2rf_exup.ll82
-rw-r--r--test/CodeGen/Mips/msa/2rf_float_int.ll90
-rw-r--r--test/CodeGen/Mips/msa/2rf_fq.ll82
-rw-r--r--test/CodeGen/Mips/msa/2rf_int_float.ll217
-rw-r--r--test/CodeGen/Mips/msa/2rf_tq.ll50
-rw-r--r--test/CodeGen/Mips/msa/3r-a.ll1191
-rw-r--r--test/CodeGen/Mips/msa/3r-b.ll494
-rw-r--r--test/CodeGen/Mips/msa/3r-c.ll446
-rw-r--r--test/CodeGen/Mips/msa/3r-d.ll478
-rw-r--r--test/CodeGen/Mips/msa/3r-i.ll358
-rw-r--r--test/CodeGen/Mips/msa/3r-m.ll862
-rw-r--r--test/CodeGen/Mips/msa/3r-p.ll182
-rw-r--r--test/CodeGen/Mips/msa/3r-s.ll1353
-rw-r--r--test/CodeGen/Mips/msa/3r-v.ll105
-rw-r--r--test/CodeGen/Mips/msa/3r_4r.ll206
-rw-r--r--test/CodeGen/Mips/msa/3r_4r_widen.ll307
-rw-r--r--test/CodeGen/Mips/msa/3r_splat.ll94
-rw-r--r--test/CodeGen/Mips/msa/3rf.ll485
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf.ll106
-rw-r--r--test/CodeGen/Mips/msa/3rf_4rf_q.ll206
-rw-r--r--test/CodeGen/Mips/msa/3rf_exdo.ll50
-rw-r--r--test/CodeGen/Mips/msa/3rf_float_int.ll50
-rw-r--r--test/CodeGen/Mips/msa/3rf_int_float.ll974
-rw-r--r--test/CodeGen/Mips/msa/3rf_q.ll94
-rw-r--r--test/CodeGen/Mips/msa/arithmetic.ll726
-rw-r--r--test/CodeGen/Mips/msa/arithmetic_float.ll456
-rw-r--r--test/CodeGen/Mips/msa/basic_operations.ll481
-rw-r--r--test/CodeGen/Mips/msa/basic_operations_float.ll207
-rw-r--r--test/CodeGen/Mips/msa/bit.ll537
-rw-r--r--test/CodeGen/Mips/msa/bitcast.ll1210
-rw-r--r--test/CodeGen/Mips/msa/bitwise.ll1639
-rw-r--r--test/CodeGen/Mips/msa/compare.ll2079
-rw-r--r--test/CodeGen/Mips/msa/compare_float.ll663
-rw-r--r--test/CodeGen/Mips/msa/elm_copy.ll162
-rw-r--r--test/CodeGen/Mips/msa/elm_cxcmsa.ll168
-rw-r--r--test/CodeGen/Mips/msa/elm_insv.ll192
-rw-r--r--test/CodeGen/Mips/msa/elm_move.ll25
-rw-r--r--test/CodeGen/Mips/msa/elm_shift_slide.ll158
-rw-r--r--test/CodeGen/Mips/msa/endian.ll107
-rw-r--r--test/CodeGen/Mips/msa/frameindex.ll85
-rw-r--r--test/CodeGen/Mips/msa/i10.ll89
-rw-r--r--test/CodeGen/Mips/msa/i5-a.ll82
-rw-r--r--test/CodeGen/Mips/msa/i5-b.ll439
-rw-r--r--test/CodeGen/Mips/msa/i5-c.ll386
-rw-r--r--test/CodeGen/Mips/msa/i5-m.ll310
-rw-r--r--test/CodeGen/Mips/msa/i5-s.ll82
-rw-r--r--test/CodeGen/Mips/msa/i5_ld_st.ll150
-rw-r--r--test/CodeGen/Mips/msa/i8.ll211
-rw-r--r--test/CodeGen/Mips/msa/inline-asm.ll34
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll134
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll138
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2090927243-simplified.ll31
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2501752154-simplified.ll27
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll141
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll149
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll143
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll152
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll33
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s525530439.ll139
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-s997348632.ll143
-rw-r--r--test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll23
-rw-r--r--test/CodeGen/Mips/msa/shift-dagcombine.ll70
-rw-r--r--test/CodeGen/Mips/msa/shuffle.ll803
-rw-r--r--test/CodeGen/Mips/msa/special.ll26
-rw-r--r--test/CodeGen/Mips/msa/spill.ll601
-rw-r--r--test/CodeGen/Mips/msa/vec.ll946
-rw-r--r--test/CodeGen/Mips/msa/vecs10.ll47
-rw-r--r--test/CodeGen/Mips/nomips16.ll38
-rw-r--r--test/CodeGen/Mips/o32_cc.ll242
-rw-r--r--test/CodeGen/Mips/o32_cc_byval.ll62
-rw-r--r--test/CodeGen/Mips/o32_cc_vararg.ll20
-rw-r--r--test/CodeGen/Mips/optimize-fp-math.ll32
-rw-r--r--test/CodeGen/Mips/powif64_16.ll26
-rw-r--r--test/CodeGen/Mips/private.ll4
-rw-r--r--test/CodeGen/Mips/ra-allocatable.ll367
-rw-r--r--test/CodeGen/Mips/return-vector.ll42
-rw-r--r--test/CodeGen/Mips/rotate.ll5
-rw-r--r--test/CodeGen/Mips/sel1c.ll21
-rw-r--r--test/CodeGen/Mips/sel2c.ll21
-rw-r--r--test/CodeGen/Mips/selectcc.ll16
-rw-r--r--test/CodeGen/Mips/selnek.ll2
-rw-r--r--test/CodeGen/Mips/setcc-se.ll155
-rw-r--r--test/CodeGen/Mips/simplebr.ll37
-rw-r--r--test/CodeGen/Mips/sint-fp-store_pattern.ll52
-rw-r--r--test/CodeGen/Mips/stack-alignment.ll13
-rw-r--r--test/CodeGen/Mips/stackcoloring.ll39
-rw-r--r--test/CodeGen/Mips/stchar.ll4
-rw-r--r--test/CodeGen/Mips/tailcall.ll13
-rw-r--r--test/CodeGen/Mips/tls-alias.ll4
-rw-r--r--test/CodeGen/Mips/tls-models.ll32
-rw-r--r--test/CodeGen/Mips/tls.ll34
-rw-r--r--test/CodeGen/Mips/tnaked.ll8
-rw-r--r--test/CodeGen/Mips/trap.ll11
-rw-r--r--test/CodeGen/Mips/trap1.ll13
-rw-r--r--test/CodeGen/Mips/unalignedload.ll20
-rw-r--r--test/CodeGen/NVPTX/add-128bit.ll19
-rw-r--r--test/CodeGen/NVPTX/bug17709.ll26
-rw-r--r--test/CodeGen/NVPTX/callchain.ll10
-rw-r--r--test/CodeGen/NVPTX/compare-int.ll60
-rw-r--r--test/CodeGen/NVPTX/constant-vectors.ll6
-rw-r--r--test/CodeGen/NVPTX/convert-int-sm20.ll8
-rw-r--r--test/CodeGen/NVPTX/ctlz.ll44
-rw-r--r--test/CodeGen/NVPTX/ctpop.ll25
-rw-r--r--test/CodeGen/NVPTX/cttz.ll45
-rw-r--r--test/CodeGen/NVPTX/fast-math.ll43
-rw-r--r--test/CodeGen/NVPTX/fp-literals.ll18
-rw-r--r--test/CodeGen/NVPTX/generic-to-nvvm.ll3
-rw-r--r--test/CodeGen/NVPTX/i1-global.ll4
-rw-r--r--test/CodeGen/NVPTX/i1-int-to-fp.ll37
-rw-r--r--test/CodeGen/NVPTX/i1-param.ll3
-rw-r--r--test/CodeGen/NVPTX/i8-param.ll23
-rw-r--r--test/CodeGen/NVPTX/implicit-def.ll9
-rw-r--r--test/CodeGen/NVPTX/inline-asm.ll9
-rw-r--r--test/CodeGen/NVPTX/intrinsic-old.ll66
-rw-r--r--test/CodeGen/NVPTX/intrinsics.ll4
-rw-r--r--test/CodeGen/NVPTX/ld-addrspace.ll24
-rw-r--r--test/CodeGen/NVPTX/ld-generic.ll8
-rw-r--r--test/CodeGen/NVPTX/ldu-i8.ll14
-rw-r--r--test/CodeGen/NVPTX/ldu-reg-plus-offset.ll21
-rw-r--r--test/CodeGen/NVPTX/lit.local.cfg2
-rw-r--r--test/CodeGen/NVPTX/load-sext-i1.ll14
-rw-r--r--test/CodeGen/NVPTX/local-stack-frame.ll18
-rw-r--r--test/CodeGen/NVPTX/module-inline-asm.ll10
-rw-r--r--test/CodeGen/NVPTX/pr13291-i1-store.ll20
-rw-r--r--test/CodeGen/NVPTX/pr16278.ll10
-rw-r--r--test/CodeGen/NVPTX/pr17529.ll38
-rw-r--r--test/CodeGen/NVPTX/refl1.ll4
-rw-r--r--test/CodeGen/NVPTX/rsqrt.ll13
-rw-r--r--test/CodeGen/NVPTX/sext-in-reg.ll111
-rw-r--r--test/CodeGen/NVPTX/sext-params.ll16
-rw-r--r--test/CodeGen/NVPTX/st-addrspace.ll12
-rw-r--r--test/CodeGen/NVPTX/st-generic.ll4
-rw-r--r--test/CodeGen/NVPTX/vec-param-load.ll13
-rw-r--r--test/CodeGen/NVPTX/vec8.ll13
-rw-r--r--test/CodeGen/NVPTX/vector-args.ll16
-rw-r--r--test/CodeGen/NVPTX/vector-stores.ll30
-rw-r--r--test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll16
-rw-r--r--test/CodeGen/PowerPC/2009-09-18-carrybit.ll2
-rw-r--r--test/CodeGen/PowerPC/2010-02-12-saveCR.ll2
-rw-r--r--test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll2
-rw-r--r--test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll2
-rw-r--r--test/CodeGen/PowerPC/2013-05-15-preinc-fold.ll33
-rw-r--r--test/CodeGen/PowerPC/2013-07-01-PHIElimBug.ll28
-rw-r--r--test/CodeGen/PowerPC/Frames-alloca.ll14
-rw-r--r--test/CodeGen/PowerPC/addc.ll6
-rw-r--r--test/CodeGen/PowerPC/addrfuncstr.ll27
-rw-r--r--test/CodeGen/PowerPC/altivec-ord.ll17
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll108
-rw-r--r--test/CodeGen/PowerPC/ashr-neg1.ll18
-rw-r--r--test/CodeGen/PowerPC/asm-dialect.ll18
-rw-r--r--test/CodeGen/PowerPC/asym-regclass-copy.ll2
-rw-r--r--test/CodeGen/PowerPC/atomic-1.ll6
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll6
-rw-r--r--test/CodeGen/PowerPC/bdzlr.ll11
-rw-r--r--test/CodeGen/PowerPC/bv-pres-v8i1.ll39
-rw-r--r--test/CodeGen/PowerPC/bv-widen-undef.ll23
-rw-r--r--test/CodeGen/PowerPC/complex-return.ll4
-rw-r--r--test/CodeGen/PowerPC/copysignl.ll67
-rw-r--r--test/CodeGen/PowerPC/cr-spills.ll107
-rw-r--r--test/CodeGen/PowerPC/crsave.ll27
-rw-r--r--test/CodeGen/PowerPC/ctr-cleanup.ll2
-rw-r--r--test/CodeGen/PowerPC/ctrloop-asm.ll38
-rw-r--r--test/CodeGen/PowerPC/ctrloop-cpsgn.ll28
-rw-r--r--test/CodeGen/PowerPC/ctrloop-fp64.ll60
-rw-r--r--test/CodeGen/PowerPC/ctrloop-i64.ll93
-rw-r--r--test/CodeGen/PowerPC/ctrloop-large-ec.ll23
-rw-r--r--test/CodeGen/PowerPC/ctrloop-le.ll441
-rw-r--r--test/CodeGen/PowerPC/ctrloop-lt.ll438
-rw-r--r--test/CodeGen/PowerPC/ctrloop-ne.ll449
-rw-r--r--test/CodeGen/PowerPC/ctrloops.ll6
-rw-r--r--test/CodeGen/PowerPC/dbg.ll24
-rw-r--r--test/CodeGen/PowerPC/dyn-alloca-aligned.ll35
-rw-r--r--test/CodeGen/PowerPC/emptystruct.ll8
-rw-r--r--test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll48
-rw-r--r--test/CodeGen/PowerPC/fast-isel-binary.ll137
-rw-r--r--test/CodeGen/PowerPC/fast-isel-br-const.ll43
-rw-r--r--test/CodeGen/PowerPC/fast-isel-call.ll132
-rw-r--r--test/CodeGen/PowerPC/fast-isel-cmp-imm.ll289
-rw-r--r--test/CodeGen/PowerPC/fast-isel-conversion.ll305
-rw-r--r--test/CodeGen/PowerPC/fast-isel-crash.ll23
-rw-r--r--test/CodeGen/PowerPC/fast-isel-ext.ll75
-rw-r--r--test/CodeGen/PowerPC/fast-isel-fold.ll129
-rw-r--r--test/CodeGen/PowerPC/fast-isel-indirectbr.ll15
-rw-r--r--test/CodeGen/PowerPC/fast-isel-load-store.ll202
-rw-r--r--test/CodeGen/PowerPC/fast-isel-redefinition.ll10
-rw-r--r--test/CodeGen/PowerPC/fast-isel-ret.ll142
-rw-r--r--test/CodeGen/PowerPC/fast-isel-shifter.ll50
-rw-r--r--test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll17
-rw-r--r--test/CodeGen/PowerPC/fcpsgn.ll52
-rw-r--r--test/CodeGen/PowerPC/floatPSA.ll2
-rw-r--r--test/CodeGen/PowerPC/fma.ll16
-rw-r--r--test/CodeGen/PowerPC/frameaddr.ll4
-rw-r--r--test/CodeGen/PowerPC/glob-comp-aa-crash.ll139
-rw-r--r--test/CodeGen/PowerPC/hello-reloc.s84
-rw-r--r--test/CodeGen/PowerPC/i64_fp_round.ll2
-rw-r--r--test/CodeGen/PowerPC/indirectbr.ll6
-rw-r--r--test/CodeGen/PowerPC/inlineasm-i64-reg.ll108
-rw-r--r--test/CodeGen/PowerPC/isel-rc-nox0.ll46
-rw-r--r--test/CodeGen/PowerPC/jaggedstructs.ll28
-rw-r--r--test/CodeGen/PowerPC/lit.local.cfg2
-rw-r--r--test/CodeGen/PowerPC/mcm-1.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-10.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-11.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-12.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-2.ll12
-rw-r--r--test/CodeGen/PowerPC/mcm-3.ll12
-rw-r--r--test/CodeGen/PowerPC/mcm-4.ll12
-rw-r--r--test/CodeGen/PowerPC/mcm-5.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-6.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-7.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-8.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-9.ll5
-rw-r--r--test/CodeGen/PowerPC/mcm-default.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-obj-2.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-obj.ll8
-rw-r--r--test/CodeGen/PowerPC/misched-inorder-latency.ll4
-rw-r--r--test/CodeGen/PowerPC/mulli64.ll16
-rw-r--r--test/CodeGen/PowerPC/negctr.ll9
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll39
-rw-r--r--test/CodeGen/PowerPC/ppc32-vacopy.ll24
-rw-r--r--test/CodeGen/PowerPC/ppc64-align-long-double.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-calls.ll10
-rw-r--r--test/CodeGen/PowerPC/ppc64-toc.ll8
-rw-r--r--test/CodeGen/PowerPC/pr13891.ll2
-rw-r--r--test/CodeGen/PowerPC/pr15031.ll26
-rw-r--r--test/CodeGen/PowerPC/pr16556-2.ll41
-rw-r--r--test/CodeGen/PowerPC/pr16556.ll20
-rw-r--r--test/CodeGen/PowerPC/pr16573.ll11
-rw-r--r--test/CodeGen/PowerPC/pr17168.ll521
-rw-r--r--test/CodeGen/PowerPC/pr17354.ll39
-rw-r--r--test/CodeGen/PowerPC/recipest.ll37
-rw-r--r--test/CodeGen/PowerPC/reg-names.ll17
-rw-r--r--test/CodeGen/PowerPC/reloc-align.ll34
-rw-r--r--test/CodeGen/PowerPC/remap-crash.ll57
-rw-r--r--test/CodeGen/PowerPC/rlwimi-and.ll43
-rw-r--r--test/CodeGen/PowerPC/rounding-ops.ll91
-rw-r--r--test/CodeGen/PowerPC/rs-undef-use.ll48
-rw-r--r--test/CodeGen/PowerPC/set0-v8i16.ll18
-rw-r--r--test/CodeGen/PowerPC/sj-ctr-loop.ll50
-rw-r--r--test/CodeGen/PowerPC/sjlj.ll66
-rw-r--r--test/CodeGen/PowerPC/stack-protector.ll5
-rw-r--r--test/CodeGen/PowerPC/stack-realign.ll147
-rw-r--r--test/CodeGen/PowerPC/std-unal-fi.ll119
-rw-r--r--test/CodeGen/PowerPC/store-update.ll56
-rw-r--r--test/CodeGen/PowerPC/structsinmem.ll2
-rw-r--r--test/CodeGen/PowerPC/structsinregs.ll2
-rw-r--r--test/CodeGen/PowerPC/sub-bv-types.ll17
-rw-r--r--test/CodeGen/PowerPC/subsumes-pred-regs.ll65
-rw-r--r--test/CodeGen/PowerPC/svr4-redzone.ll12
-rw-r--r--test/CodeGen/PowerPC/tls-2.ll2
-rw-r--r--test/CodeGen/PowerPC/tls-gd-obj.ll31
-rw-r--r--test/CodeGen/PowerPC/tls-ie-obj.ll29
-rw-r--r--test/CodeGen/PowerPC/tls-ld-obj.ll34
-rw-r--r--test/CodeGen/PowerPC/tls.ll4
-rw-r--r--test/CodeGen/PowerPC/unal-altivec.ll52
-rw-r--r--test/CodeGen/PowerPC/unal-altivec2.ll166
-rw-r--r--test/CodeGen/PowerPC/unal4-std.ll2
-rw-r--r--test/CodeGen/PowerPC/unwind-dw2-g.ll35
-rw-r--r--test/CodeGen/PowerPC/unwind-dw2.ll15
-rw-r--r--test/CodeGen/PowerPC/vaddsplat.ll24
-rw-r--r--test/CodeGen/PowerPC/varargs.ll4
-rw-r--r--test/CodeGen/PowerPC/vec-abi-align.ll60
-rw-r--r--test/CodeGen/PowerPC/vec_cmp.ll100
-rw-r--r--test/CodeGen/PowerPC/vec_constants.ll16
-rw-r--r--test/CodeGen/PowerPC/vec_conv.ll8
-rw-r--r--test/CodeGen/PowerPC/vec_extload.ll14
-rw-r--r--test/CodeGen/PowerPC/vec_fmuladd.ll56
-rw-r--r--test/CodeGen/PowerPC/vec_mul.ll8
-rw-r--r--test/CodeGen/PowerPC/vec_rounding.ll32
-rw-r--r--test/CodeGen/PowerPC/vec_sqrt.ll10
-rw-r--r--test/CodeGen/PowerPC/vector.ll8
-rw-r--r--test/CodeGen/PowerPC/vrspill.ll7
-rw-r--r--test/CodeGen/PowerPC/zero-not-run.ll27
-rw-r--r--test/CodeGen/R600/128bit-kernel-args.ll22
-rw-r--r--test/CodeGen/R600/32-bit-local-address-space.ll88
-rw-r--r--test/CodeGen/R600/64bit-kernel-args.ll11
-rw-r--r--test/CodeGen/R600/add.ll57
-rw-r--r--test/CodeGen/R600/add_i64.ll59
-rw-r--r--test/CodeGen/R600/address-space.ll31
-rw-r--r--test/CodeGen/R600/alu-split.ll851
-rw-r--r--test/CodeGen/R600/and.ll37
-rw-r--r--test/CodeGen/R600/array-ptr-calc-i64.ll18
-rw-r--r--test/CodeGen/R600/atomic_load_add.ll23
-rw-r--r--test/CodeGen/R600/atomic_load_sub.ll23
-rw-r--r--test/CodeGen/R600/bfi_int.ll10
-rw-r--r--test/CodeGen/R600/big_alu.ll1174
-rw-r--r--test/CodeGen/R600/bitcast.ll21
-rw-r--r--test/CodeGen/R600/build_vector.ll34
-rw-r--r--test/CodeGen/R600/call_fs.ll2
-rw-r--r--test/CodeGen/R600/combine_vloads.ll42
-rw-r--r--test/CodeGen/R600/complex-folding.ll19
-rw-r--r--test/CodeGen/R600/dot4-folding.ll27
-rw-r--r--test/CodeGen/R600/elf.ll4
-rw-r--r--test/CodeGen/R600/extload.ll51
-rw-r--r--test/CodeGen/R600/fabs.ll56
-rw-r--r--test/CodeGen/R600/fadd.ll50
-rw-r--r--test/CodeGen/R600/fadd64.ll13
-rw-r--r--test/CodeGen/R600/fcmp-cnd.ll2
-rw-r--r--test/CodeGen/R600/fcmp.ll2
-rw-r--r--test/CodeGen/R600/fcmp64.ll79
-rw-r--r--test/CodeGen/R600/fconst64.ll12
-rw-r--r--test/CodeGen/R600/fdiv.ll51
-rw-r--r--test/CodeGen/R600/fdiv64.ll14
-rw-r--r--test/CodeGen/R600/fetch-limits.r600.ll48
-rw-r--r--test/CodeGen/R600/fetch-limits.r700+.ll81
-rw-r--r--test/CodeGen/R600/floor.ll14
-rw-r--r--test/CodeGen/R600/fma.ll31
-rw-r--r--test/CodeGen/R600/fmad.ll22
-rw-r--r--test/CodeGen/R600/fmax.ll15
-rw-r--r--test/CodeGen/R600/fmin.ll13
-rw-r--r--test/CodeGen/R600/fmul.ll48
-rw-r--r--test/CodeGen/R600/fmul.v4f32.ll15
-rw-r--r--test/CodeGen/R600/fmul64.ll13
-rw-r--r--test/CodeGen/R600/fmuladd.ll31
-rw-r--r--test/CodeGen/R600/fneg.ll61
-rw-r--r--test/CodeGen/R600/fp64_to_sint.ll9
-rw-r--r--test/CodeGen/R600/fp_to_sint.ll29
-rw-r--r--test/CodeGen/R600/fp_to_uint.ll31
-rw-r--r--test/CodeGen/R600/fpext.ll9
-rw-r--r--test/CodeGen/R600/fptrunc.ll9
-rw-r--r--test/CodeGen/R600/fsqrt.ll24
-rw-r--r--test/CodeGen/R600/fsub.ll48
-rw-r--r--test/CodeGen/R600/fsub64.ll13
-rw-r--r--test/CodeGen/R600/gep-address-space.ll40
-rw-r--r--test/CodeGen/R600/icmp-select-sete-reverse-args.ll2
-rw-r--r--test/CodeGen/R600/imm.ll14
-rw-r--r--test/CodeGen/R600/indirect-addressing-si.ll48
-rw-r--r--test/CodeGen/R600/insert_vector_elt.ll16
-rw-r--r--test/CodeGen/R600/kcache-fold.ll18
-rw-r--r--test/CodeGen/R600/kernel-args.ll455
-rw-r--r--test/CodeGen/R600/lds-output-queue.ll99
-rw-r--r--test/CodeGen/R600/lds-size.ll26
-rw-r--r--test/CodeGen/R600/lit.local.cfg12
-rw-r--r--test/CodeGen/R600/literals.ll191
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll24
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.cube.ll59
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imax.ll21
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.imin.ll21
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.mul.ll16
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.tex.ll32
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trunc.ll22
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umax.ll21
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.umin.ll21
-rw-r--r--test/CodeGen/R600/llvm.SI.fs.interp.constant.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.imageload.ll131
-rw-r--r--test/CodeGen/R600/llvm.SI.resinfo.ll110
-rw-r--r--test/CodeGen/R600/llvm.SI.sample-masked.ll93
-rw-r--r--test/CodeGen/R600/llvm.SI.sample.ll85
-rw-r--r--test/CodeGen/R600/llvm.SI.sampled.ll140
-rw-r--r--test/CodeGen/R600/llvm.SI.tbuffer.store.ll44
-rw-r--r--test/CodeGen/R600/llvm.SI.tid.ll16
-rw-r--r--test/CodeGen/R600/llvm.cos.ll17
-rw-r--r--test/CodeGen/R600/llvm.floor.ll54
-rw-r--r--test/CodeGen/R600/llvm.pow.ll20
-rw-r--r--test/CodeGen/R600/llvm.rint.ll54
-rw-r--r--test/CodeGen/R600/llvm.round.ll41
-rw-r--r--test/CodeGen/R600/llvm.sin.ll17
-rw-r--r--test/CodeGen/R600/llvm.sqrt.ll54
-rw-r--r--test/CodeGen/R600/load-input-fold.ll118
-rw-r--r--test/CodeGen/R600/load.ll669
-rw-r--r--test/CodeGen/R600/load.vec.ll24
-rw-r--r--test/CodeGen/R600/load64.ll20
-rw-r--r--test/CodeGen/R600/local-memory-two-objects.ll58
-rw-r--r--test/CodeGen/R600/local-memory.ll50
-rw-r--r--test/CodeGen/R600/loop-address.ll10
-rw-r--r--test/CodeGen/R600/lshl.ll4
-rw-r--r--test/CodeGen/R600/lshr.ll4
-rw-r--r--test/CodeGen/R600/mad_int24.ll20
-rw-r--r--test/CodeGen/R600/mad_uint24.ll70
-rw-r--r--test/CodeGen/R600/max-literals.ll67
-rw-r--r--test/CodeGen/R600/mul.ll38
-rw-r--r--test/CodeGen/R600/mul_int24.ll19
-rw-r--r--test/CodeGen/R600/mul_uint24.ll65
-rw-r--r--test/CodeGen/R600/mulhu.ll8
-rw-r--r--test/CodeGen/R600/or.ll54
-rw-r--r--test/CodeGen/R600/packetizer.ll34
-rw-r--r--test/CodeGen/R600/parallelandifcollapse.ll54
-rw-r--r--test/CodeGen/R600/parallelorifcollapse.ll61
-rw-r--r--test/CodeGen/R600/predicate-dp4.ll27
-rw-r--r--test/CodeGen/R600/predicates.ll2
-rw-r--r--test/CodeGen/R600/private-memory.ll115
-rw-r--r--test/CodeGen/R600/pv-packing.ll45
-rw-r--r--test/CodeGen/R600/pv.ll63
-rw-r--r--test/CodeGen/R600/r600-encoding.ll19
-rw-r--r--test/CodeGen/R600/r600-export-fix.ll142
-rw-r--r--test/CodeGen/R600/r600cfg.ll120
-rw-r--r--test/CodeGen/R600/reciprocal.ll13
-rw-r--r--test/CodeGen/R600/rotr.ll37
-rw-r--r--test/CodeGen/R600/rv7x0_count3.ll41
-rw-r--r--test/CodeGen/R600/schedule-fs-loop-nested-if.ll15
-rw-r--r--test/CodeGen/R600/schedule-fs-loop-nested.ll2
-rw-r--r--test/CodeGen/R600/schedule-fs-loop.ll2
-rw-r--r--test/CodeGen/R600/schedule-if-2.ll2
-rw-r--r--test/CodeGen/R600/schedule-if.ll2
-rw-r--r--test/CodeGen/R600/schedule-vs-if-nested-loop.ll14
-rw-r--r--test/CodeGen/R600/select.ll46
-rw-r--r--test/CodeGen/R600/selectcc-cnd.ll4
-rw-r--r--test/CodeGen/R600/selectcc-cnde-int.ll2
-rw-r--r--test/CodeGen/R600/selectcc-opt.ll5
-rw-r--r--test/CodeGen/R600/set-dx10.ll96
-rw-r--r--test/CodeGen/R600/setcc.ll327
-rw-r--r--test/CodeGen/R600/setcc64.ll263
-rw-r--r--test/CodeGen/R600/seto.ll4
-rw-r--r--test/CodeGen/R600/setuo.ll4
-rw-r--r--test/CodeGen/R600/sgpr-copy-duplicate-operand.ll18
-rw-r--r--test/CodeGen/R600/sgpr-copy.ll327
-rw-r--r--test/CodeGen/R600/shared-op-cycle.ll32
-rw-r--r--test/CodeGen/R600/shl.ll44
-rw-r--r--test/CodeGen/R600/short-args.ll41
-rw-r--r--test/CodeGen/R600/si-annotate-cf-assertion.ll23
-rw-r--r--test/CodeGen/R600/si-lod-bias.ll51
-rw-r--r--test/CodeGen/R600/si-sgpr-spill.ll692
-rw-r--r--test/CodeGen/R600/si-vector-hang.ll107
-rw-r--r--test/CodeGen/R600/sign_extend.ll12
-rw-r--r--test/CodeGen/R600/sint_to_fp.ll29
-rw-r--r--test/CodeGen/R600/sint_to_fp64.ll9
-rw-r--r--test/CodeGen/R600/sra.ll55
-rw-r--r--test/CodeGen/R600/srl.ll42
-rw-r--r--test/CodeGen/R600/store-vector-ptrs.ll8
-rw-r--r--test/CodeGen/R600/store.ll277
-rw-r--r--test/CodeGen/R600/store.r600.ll4
-rw-r--r--test/CodeGen/R600/structurize.ll83
-rw-r--r--test/CodeGen/R600/structurize1.ll62
-rw-r--r--test/CodeGen/R600/sub.ll37
-rw-r--r--test/CodeGen/R600/swizzle-export.ll129
-rw-r--r--test/CodeGen/R600/tex-clause-antidep.ll25
-rw-r--r--test/CodeGen/R600/texture-input-merge.ll31
-rw-r--r--test/CodeGen/R600/trunc-vector-store-assertion-failure.ll20
-rw-r--r--test/CodeGen/R600/trunc.ll30
-rw-r--r--test/CodeGen/R600/udiv.ll38
-rw-r--r--test/CodeGen/R600/uint_to_fp.ll44
-rw-r--r--test/CodeGen/R600/unaligned-load-store.ll17
-rw-r--r--test/CodeGen/R600/unsupported-cc.ll68
-rw-r--r--test/CodeGen/R600/urecip.ll2
-rw-r--r--test/CodeGen/R600/urem.ll27
-rw-r--r--test/CodeGen/R600/vertex-fetch-encoding.ll25
-rw-r--r--test/CodeGen/R600/vselect.ll71
-rw-r--r--test/CodeGen/R600/vselect64.ll15
-rw-r--r--test/CodeGen/R600/vtx-schedule.ll18
-rw-r--r--test/CodeGen/R600/wait.ll37
-rw-r--r--test/CodeGen/R600/work-item-intrinsics.ll211
-rw-r--r--test/CodeGen/R600/wrong-transalu-pos-fix.ll86
-rw-r--r--test/CodeGen/R600/xor.ll57
-rw-r--r--test/CodeGen/R600/zero_extend.ll18
-rw-r--r--test/CodeGen/SI/sanity.ll37
-rwxr-xr-xtest/CodeGen/SPARC/2011-01-11-CC.ll110
-rw-r--r--test/CodeGen/SPARC/2011-01-11-Call.ll40
-rw-r--r--test/CodeGen/SPARC/2011-01-11-FrameAddr.ll43
-rw-r--r--test/CodeGen/SPARC/2011-01-19-DelaySlot.ll105
-rw-r--r--test/CodeGen/SPARC/2011-01-21-ByValArgs.ll2
-rw-r--r--test/CodeGen/SPARC/2011-01-22-SRet.ll10
-rw-r--r--test/CodeGen/SPARC/2013-05-17-CallFrame.ll26
-rw-r--r--test/CodeGen/SPARC/64abi.ll37
-rw-r--r--test/CodeGen/SPARC/64bit.ll135
-rw-r--r--test/CodeGen/SPARC/64cond.ll89
-rw-r--r--test/CodeGen/SPARC/basictest.ll24
-rw-r--r--test/CodeGen/SPARC/blockaddr.ll77
-rw-r--r--test/CodeGen/SPARC/constpool.ll15
-rw-r--r--test/CodeGen/SPARC/exception.ll112
-rw-r--r--test/CodeGen/SPARC/float.ll249
-rw-r--r--test/CodeGen/SPARC/fp128.ll234
-rw-r--r--test/CodeGen/SPARC/globals.ll19
-rw-r--r--test/CodeGen/SPARC/leafproc.ll80
-rw-r--r--test/CodeGen/SPARC/lit.local.cfg2
-rw-r--r--test/CodeGen/SPARC/rem.ll39
-rw-r--r--test/CodeGen/SPARC/setjmp.ll72
-rw-r--r--test/CodeGen/SPARC/tls.ll73
-rw-r--r--test/CodeGen/SPARC/varargs.ll1
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-01.py105
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-02.py82
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-03.py107
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-04.py111
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-05.py109
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-06.py109
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-07.py68
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-08.py69
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-09.py107
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-10.py111
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-11.py127
-rw-r--r--test/CodeGen/SystemZ/Large/branch-range-12.py127
-rw-r--r--test/CodeGen/SystemZ/Large/lit.local.cfg10
-rw-r--r--test/CodeGen/SystemZ/Large/spill-01.py40
-rw-r--r--test/CodeGen/SystemZ/Large/spill-02.py73
-rw-r--r--test/CodeGen/SystemZ/addr-01.ll16
-rw-r--r--test/CodeGen/SystemZ/addr-02.ll16
-rw-r--r--test/CodeGen/SystemZ/addr-03.ll10
-rw-r--r--test/CodeGen/SystemZ/alias-01.ll19
-rw-r--r--test/CodeGen/SystemZ/alloca-01.ll42
-rw-r--r--test/CodeGen/SystemZ/alloca-02.ll35
-rw-r--r--test/CodeGen/SystemZ/and-01.ll70
-rw-r--r--test/CodeGen/SystemZ/and-02.ll189
-rw-r--r--test/CodeGen/SystemZ/and-03.ll64
-rw-r--r--test/CodeGen/SystemZ/and-04.ll153
-rw-r--r--test/CodeGen/SystemZ/and-05.ll26
-rw-r--r--test/CodeGen/SystemZ/and-06.ll16
-rw-r--r--test/CodeGen/SystemZ/and-07.ll39
-rw-r--r--test/CodeGen/SystemZ/and-08.ll378
-rw-r--r--test/CodeGen/SystemZ/args-01.ll33
-rw-r--r--test/CodeGen/SystemZ/args-02.ll33
-rw-r--r--test/CodeGen/SystemZ/args-03.ll33
-rw-r--r--test/CodeGen/SystemZ/args-04.ll26
-rw-r--r--test/CodeGen/SystemZ/args-05.ll8
-rw-r--r--test/CodeGen/SystemZ/args-06.ll12
-rw-r--r--test/CodeGen/SystemZ/asm-01.ll10
-rw-r--r--test/CodeGen/SystemZ/asm-02.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-03.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-04.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-05.ll2
-rw-r--r--test/CodeGen/SystemZ/asm-06.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-07.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-08.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-09.ll16
-rw-r--r--test/CodeGen/SystemZ/asm-10.ll6
-rw-r--r--test/CodeGen/SystemZ/asm-11.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-12.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-13.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-14.ll8
-rw-r--r--test/CodeGen/SystemZ/asm-15.ll6
-rw-r--r--test/CodeGen/SystemZ/asm-16.ll6
-rw-r--r--test/CodeGen/SystemZ/asm-17.ll105
-rw-r--r--test/CodeGen/SystemZ/asm-18.ll745
-rw-r--r--test/CodeGen/SystemZ/atomic-load-01.ll2
-rw-r--r--test/CodeGen/SystemZ/atomic-load-02.ll2
-rw-r--r--test/CodeGen/SystemZ/atomic-load-03.ll2
-rw-r--r--test/CodeGen/SystemZ/atomic-load-04.ll2
-rw-r--r--test/CodeGen/SystemZ/atomic-store-01.ll2
-rw-r--r--test/CodeGen/SystemZ/atomic-store-02.ll2
-rw-r--r--test/CodeGen/SystemZ/atomic-store-03.ll4
-rw-r--r--test/CodeGen/SystemZ/atomic-store-04.ll4
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-01.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-02.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-03.ll24
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-04.ll28
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-01.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-02.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-03.ll22
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-04.ll136
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-01.ll84
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-02.ll84
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-03.ll60
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-04.ll54
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-01.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-02.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-03.ll22
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-04.ll160
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-01.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-02.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-03.ll22
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-04.ll38
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-01.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-02.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-03.ll24
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-04.ll28
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-01.ll10
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-02.ll10
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-03.ll24
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-04.ll18
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-01.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-02.ll40
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-03.ll14
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-04.ll20
-rw-r--r--test/CodeGen/SystemZ/branch-01.ll4
-rw-r--r--test/CodeGen/SystemZ/branch-02.ll27
-rw-r--r--test/CodeGen/SystemZ/branch-03.ll16
-rw-r--r--test/CodeGen/SystemZ/branch-04.ll56
-rw-r--r--test/CodeGen/SystemZ/branch-05.ll5
-rw-r--r--test/CodeGen/SystemZ/branch-06.ll190
-rw-r--r--test/CodeGen/SystemZ/branch-07.ll157
-rw-r--r--test/CodeGen/SystemZ/branch-08.ll46
-rw-r--r--test/CodeGen/SystemZ/branch-09.ll62
-rw-r--r--test/CodeGen/SystemZ/branch-10.ll62
-rw-r--r--test/CodeGen/SystemZ/bswap-01.ll8
-rw-r--r--test/CodeGen/SystemZ/bswap-02.ll103
-rw-r--r--test/CodeGen/SystemZ/bswap-03.ll103
-rw-r--r--test/CodeGen/SystemZ/bswap-04.ll50
-rw-r--r--test/CodeGen/SystemZ/bswap-05.ll50
-rw-r--r--test/CodeGen/SystemZ/call-01.ll2
-rw-r--r--test/CodeGen/SystemZ/call-02.ll2
-rw-r--r--test/CodeGen/SystemZ/call-03.ll125
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-01.ll13
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-02.ll13
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-03.ll24
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-04.ll18
-rw-r--r--test/CodeGen/SystemZ/cond-load-01.ll130
-rw-r--r--test/CodeGen/SystemZ/cond-load-02.ll130
-rw-r--r--test/CodeGen/SystemZ/cond-move-01.ll48
-rw-r--r--test/CodeGen/SystemZ/cond-store-01.ll398
-rw-r--r--test/CodeGen/SystemZ/cond-store-02.ll398
-rw-r--r--test/CodeGen/SystemZ/cond-store-03.ll322
-rw-r--r--test/CodeGen/SystemZ/cond-store-04.ll214
-rw-r--r--test/CodeGen/SystemZ/cond-store-05.ll213
-rw-r--r--test/CodeGen/SystemZ/cond-store-06.ll213
-rw-r--r--test/CodeGen/SystemZ/cond-store-07.ll186
-rw-r--r--test/CodeGen/SystemZ/cond-store-08.ll124
-rw-r--r--test/CodeGen/SystemZ/fp-abs-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-abs-02.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-add-01.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-add-02.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-add-03.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-01.ll258
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-02.ll114
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-03.ll24
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-04.ll348
-rw-r--r--test/CodeGen/SystemZ/fp-const-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-const-02.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-const-03.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-const-04.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-const-05.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-const-06.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-const-07.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-const-08.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-const-09.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-conv-01.ll10
-rw-r--r--test/CodeGen/SystemZ/fp-conv-02.ll93
-rw-r--r--test/CodeGen/SystemZ/fp-conv-03.ll93
-rw-r--r--test/CodeGen/SystemZ/fp-conv-04.ll93
-rw-r--r--test/CodeGen/SystemZ/fp-conv-05.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-conv-06.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-conv-07.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-conv-08.ll8
-rw-r--r--test/CodeGen/SystemZ/fp-conv-09.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-conv-10.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-conv-11.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-conv-12.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-copysign-01.ll18
-rw-r--r--test/CodeGen/SystemZ/fp-div-01.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-div-02.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-div-03.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-move-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-move-02.ll316
-rw-r--r--test/CodeGen/SystemZ/fp-move-03.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-move-04.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-move-05.ll22
-rw-r--r--test/CodeGen/SystemZ/fp-move-06.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-move-07.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-move-08.ll22
-rw-r--r--test/CodeGen/SystemZ/fp-move-09.ll62
-rw-r--r--test/CodeGen/SystemZ/fp-mul-01.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-mul-02.ll132
-rw-r--r--test/CodeGen/SystemZ/fp-mul-03.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-mul-04.ll142
-rw-r--r--test/CodeGen/SystemZ/fp-mul-05.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-mul-06.ll16
-rw-r--r--test/CodeGen/SystemZ/fp-mul-07.ll16
-rw-r--r--test/CodeGen/SystemZ/fp-mul-08.ll16
-rw-r--r--test/CodeGen/SystemZ/fp-mul-09.ll16
-rw-r--r--test/CodeGen/SystemZ/fp-neg-01.ll6
-rw-r--r--test/CodeGen/SystemZ/fp-round-01.ll132
-rw-r--r--test/CodeGen/SystemZ/fp-round-02.ll195
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-01.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-02.ll108
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-03.ll2
-rw-r--r--test/CodeGen/SystemZ/fp-sub-01.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-sub-02.ll60
-rw-r--r--test/CodeGen/SystemZ/fp-sub-03.ll2
-rw-r--r--test/CodeGen/SystemZ/frame-01.ll70
-rw-r--r--test/CodeGen/SystemZ/frame-02.ll8
-rw-r--r--test/CodeGen/SystemZ/frame-03.ll8
-rw-r--r--test/CodeGen/SystemZ/frame-04.ll8
-rw-r--r--test/CodeGen/SystemZ/frame-05.ll8
-rw-r--r--test/CodeGen/SystemZ/frame-06.ll8
-rw-r--r--test/CodeGen/SystemZ/frame-07.ll22
-rw-r--r--test/CodeGen/SystemZ/frame-08.ll44
-rw-r--r--test/CodeGen/SystemZ/frame-09.ll28
-rw-r--r--test/CodeGen/SystemZ/frame-10.ll2
-rw-r--r--test/CodeGen/SystemZ/frame-11.ll2
-rw-r--r--test/CodeGen/SystemZ/frame-13.ll182
-rw-r--r--test/CodeGen/SystemZ/frame-14.ll172
-rw-r--r--test/CodeGen/SystemZ/frame-15.ll203
-rw-r--r--test/CodeGen/SystemZ/frame-16.ll182
-rw-r--r--test/CodeGen/SystemZ/frame-17.ll6
-rw-r--r--test/CodeGen/SystemZ/frame-18.ll9
-rw-r--r--test/CodeGen/SystemZ/insert-01.ll34
-rw-r--r--test/CodeGen/SystemZ/insert-02.ll34
-rw-r--r--test/CodeGen/SystemZ/insert-03.ll12
-rw-r--r--test/CodeGen/SystemZ/insert-04.ll24
-rw-r--r--test/CodeGen/SystemZ/insert-05.ll38
-rw-r--r--test/CodeGen/SystemZ/insert-06.ll39
-rw-r--r--test/CodeGen/SystemZ/int-abs-01.ll83
-rw-r--r--test/CodeGen/SystemZ/int-add-01.ll20
-rw-r--r--test/CodeGen/SystemZ/int-add-02.ll70
-rw-r--r--test/CodeGen/SystemZ/int-add-03.ll94
-rw-r--r--test/CodeGen/SystemZ/int-add-04.ll94
-rw-r--r--test/CodeGen/SystemZ/int-add-05.ll64
-rw-r--r--test/CodeGen/SystemZ/int-add-06.ll20
-rw-r--r--test/CodeGen/SystemZ/int-add-07.ll28
-rw-r--r--test/CodeGen/SystemZ/int-add-08.ll50
-rw-r--r--test/CodeGen/SystemZ/int-add-09.ll18
-rw-r--r--test/CodeGen/SystemZ/int-add-10.ll38
-rw-r--r--test/CodeGen/SystemZ/int-add-11.ll191
-rw-r--r--test/CodeGen/SystemZ/int-add-12.ll186
-rw-r--r--test/CodeGen/SystemZ/int-add-13.ll39
-rw-r--r--test/CodeGen/SystemZ/int-add-14.ll67
-rw-r--r--test/CodeGen/SystemZ/int-add-15.ll67
-rw-r--r--test/CodeGen/SystemZ/int-add-16.ll93
-rw-r--r--test/CodeGen/SystemZ/int-cmp-01.ll34
-rw-r--r--test/CodeGen/SystemZ/int-cmp-02.ll80
-rw-r--r--test/CodeGen/SystemZ/int-cmp-03.ll58
-rw-r--r--test/CodeGen/SystemZ/int-cmp-04.ll28
-rw-r--r--test/CodeGen/SystemZ/int-cmp-05.ll157
-rw-r--r--test/CodeGen/SystemZ/int-cmp-06.ll171
-rw-r--r--test/CodeGen/SystemZ/int-cmp-07.ll46
-rw-r--r--test/CodeGen/SystemZ/int-cmp-08.ll46
-rw-r--r--test/CodeGen/SystemZ/int-cmp-09.ll161
-rw-r--r--test/CodeGen/SystemZ/int-cmp-10.ll38
-rw-r--r--test/CodeGen/SystemZ/int-cmp-11.ll115
-rw-r--r--test/CodeGen/SystemZ/int-cmp-12.ll45
-rw-r--r--test/CodeGen/SystemZ/int-cmp-13.ll122
-rw-r--r--test/CodeGen/SystemZ/int-cmp-14.ll122
-rw-r--r--test/CodeGen/SystemZ/int-cmp-15.ll58
-rw-r--r--test/CodeGen/SystemZ/int-cmp-16.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-17.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-18.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-19.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-20.ll64
-rw-r--r--test/CodeGen/SystemZ/int-cmp-21.ll52
-rw-r--r--test/CodeGen/SystemZ/int-cmp-22.ll38
-rw-r--r--test/CodeGen/SystemZ/int-cmp-23.ll24
-rw-r--r--test/CodeGen/SystemZ/int-cmp-24.ll16
-rw-r--r--test/CodeGen/SystemZ/int-cmp-25.ll16
-rw-r--r--test/CodeGen/SystemZ/int-cmp-26.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-27.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-28.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-29.ll32
-rw-r--r--test/CodeGen/SystemZ/int-cmp-30.ll52
-rw-r--r--test/CodeGen/SystemZ/int-cmp-31.ll52
-rw-r--r--test/CodeGen/SystemZ/int-cmp-32.ll66
-rw-r--r--test/CodeGen/SystemZ/int-cmp-33.ll36
-rw-r--r--test/CodeGen/SystemZ/int-cmp-34.ll66
-rw-r--r--test/CodeGen/SystemZ/int-cmp-35.ll36
-rw-r--r--test/CodeGen/SystemZ/int-cmp-36.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-37.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-38.ll73
-rw-r--r--test/CodeGen/SystemZ/int-cmp-39.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-40.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-41.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-42.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-43.ll54
-rw-r--r--test/CodeGen/SystemZ/int-cmp-44.ll799
-rw-r--r--test/CodeGen/SystemZ/int-cmp-45.ll115
-rw-r--r--test/CodeGen/SystemZ/int-cmp-46.ll491
-rw-r--r--test/CodeGen/SystemZ/int-cmp-47.ll234
-rw-r--r--test/CodeGen/SystemZ/int-cmp-48.ll245
-rw-r--r--test/CodeGen/SystemZ/int-cmp-49.ll49
-rw-r--r--test/CodeGen/SystemZ/int-const-01.ll44
-rw-r--r--test/CodeGen/SystemZ/int-const-02.ll94
-rw-r--r--test/CodeGen/SystemZ/int-const-03.ll44
-rw-r--r--test/CodeGen/SystemZ/int-const-04.ll42
-rw-r--r--test/CodeGen/SystemZ/int-const-05.ll39
-rw-r--r--test/CodeGen/SystemZ/int-const-06.ll40
-rw-r--r--test/CodeGen/SystemZ/int-conv-01.ll116
-rw-r--r--test/CodeGen/SystemZ/int-conv-02.ll125
-rw-r--r--test/CodeGen/SystemZ/int-conv-03.ll116
-rw-r--r--test/CodeGen/SystemZ/int-conv-04.ll120
-rw-r--r--test/CodeGen/SystemZ/int-conv-05.ll122
-rw-r--r--test/CodeGen/SystemZ/int-conv-06.ll125
-rw-r--r--test/CodeGen/SystemZ/int-conv-07.ll116
-rw-r--r--test/CodeGen/SystemZ/int-conv-08.ll120
-rw-r--r--test/CodeGen/SystemZ/int-conv-09.ll22
-rw-r--r--test/CodeGen/SystemZ/int-conv-10.ll26
-rw-r--r--test/CodeGen/SystemZ/int-conv-11.ll350
-rw-r--r--test/CodeGen/SystemZ/int-div-01.ll89
-rw-r--r--test/CodeGen/SystemZ/int-div-02.ll69
-rw-r--r--test/CodeGen/SystemZ/int-div-03.ll47
-rw-r--r--test/CodeGen/SystemZ/int-div-04.ll72
-rw-r--r--test/CodeGen/SystemZ/int-div-05.ll72
-rw-r--r--test/CodeGen/SystemZ/int-div-06.ll56
-rw-r--r--test/CodeGen/SystemZ/int-move-01.ll8
-rw-r--r--test/CodeGen/SystemZ/int-move-02.ll20
-rw-r--r--test/CodeGen/SystemZ/int-move-03.ll14
-rw-r--r--test/CodeGen/SystemZ/int-move-04.ll24
-rw-r--r--test/CodeGen/SystemZ/int-move-05.ll24
-rw-r--r--test/CodeGen/SystemZ/int-move-06.ll20
-rw-r--r--test/CodeGen/SystemZ/int-move-07.ll14
-rw-r--r--test/CodeGen/SystemZ/int-move-08.ll93
-rw-r--r--test/CodeGen/SystemZ/int-move-09.ll99
-rw-r--r--test/CodeGen/SystemZ/int-mul-01.ll20
-rw-r--r--test/CodeGen/SystemZ/int-mul-02.ll67
-rw-r--r--test/CodeGen/SystemZ/int-mul-03.ll94
-rw-r--r--test/CodeGen/SystemZ/int-mul-04.ll61
-rw-r--r--test/CodeGen/SystemZ/int-mul-05.ll34
-rw-r--r--test/CodeGen/SystemZ/int-mul-06.ll34
-rw-r--r--test/CodeGen/SystemZ/int-mul-07.ll8
-rw-r--r--test/CodeGen/SystemZ/int-mul-08.ll110
-rw-r--r--test/CodeGen/SystemZ/int-neg-01.ll8
-rw-r--r--test/CodeGen/SystemZ/int-neg-02.ll91
-rw-r--r--test/CodeGen/SystemZ/int-sub-01.ll70
-rw-r--r--test/CodeGen/SystemZ/int-sub-02.ll94
-rw-r--r--test/CodeGen/SystemZ/int-sub-03.ll94
-rw-r--r--test/CodeGen/SystemZ/int-sub-04.ll64
-rw-r--r--test/CodeGen/SystemZ/int-sub-05.ll53
-rw-r--r--test/CodeGen/SystemZ/int-sub-06.ll20
-rw-r--r--test/CodeGen/SystemZ/int-sub-07.ll131
-rw-r--r--test/CodeGen/SystemZ/int-sub-08.ll39
-rw-r--r--test/CodeGen/SystemZ/int-sub-09.ll22
-rw-r--r--test/CodeGen/SystemZ/la-01.ll31
-rw-r--r--test/CodeGen/SystemZ/la-02.ll16
-rw-r--r--test/CodeGen/SystemZ/la-03.ll16
-rw-r--r--test/CodeGen/SystemZ/la-04.ll2
-rw-r--r--test/CodeGen/SystemZ/lit.local.cfg2
-rw-r--r--test/CodeGen/SystemZ/loop-01.ll124
-rw-r--r--test/CodeGen/SystemZ/memchr-01.ll21
-rw-r--r--test/CodeGen/SystemZ/memchr-02.ll57
-rw-r--r--test/CodeGen/SystemZ/memcmp-01.ll221
-rw-r--r--test/CodeGen/SystemZ/memcmp-02.ll139
-rw-r--r--test/CodeGen/SystemZ/memcpy-01.ll235
-rw-r--r--test/CodeGen/SystemZ/memcpy-02.ll392
-rw-r--r--test/CodeGen/SystemZ/memset-01.ll160
-rw-r--r--test/CodeGen/SystemZ/memset-02.ll162
-rw-r--r--test/CodeGen/SystemZ/memset-03.ll382
-rw-r--r--test/CodeGen/SystemZ/memset-04.ll398
-rw-r--r--test/CodeGen/SystemZ/or-01.ll70
-rw-r--r--test/CodeGen/SystemZ/or-02.ll14
-rw-r--r--test/CodeGen/SystemZ/or-03.ll64
-rw-r--r--test/CodeGen/SystemZ/or-04.ll38
-rw-r--r--test/CodeGen/SystemZ/or-05.ll26
-rw-r--r--test/CodeGen/SystemZ/or-06.ll16
-rw-r--r--test/CodeGen/SystemZ/or-07.ll39
-rw-r--r--test/CodeGen/SystemZ/or-08.ll57
-rw-r--r--test/CodeGen/SystemZ/prefetch-01.ll87
-rw-r--r--test/CodeGen/SystemZ/risbg-01.ll472
-rw-r--r--test/CodeGen/SystemZ/risbg-02.ll93
-rw-r--r--test/CodeGen/SystemZ/rnsbg-01.ll257
-rw-r--r--test/CodeGen/SystemZ/rosbg-01.ll110
-rw-r--r--test/CodeGen/SystemZ/rxsbg-01.ll112
-rw-r--r--test/CodeGen/SystemZ/setcc-01.ll74
-rw-r--r--test/CodeGen/SystemZ/setcc-02.ll174
-rw-r--r--test/CodeGen/SystemZ/shift-01.ll22
-rw-r--r--test/CodeGen/SystemZ/shift-02.ll22
-rw-r--r--test/CodeGen/SystemZ/shift-03.ll22
-rw-r--r--test/CodeGen/SystemZ/shift-04.ll28
-rw-r--r--test/CodeGen/SystemZ/shift-05.ll28
-rw-r--r--test/CodeGen/SystemZ/shift-06.ll28
-rw-r--r--test/CodeGen/SystemZ/shift-07.ll28
-rw-r--r--test/CodeGen/SystemZ/shift-08.ll28
-rw-r--r--test/CodeGen/SystemZ/shift-09.ll63
-rw-r--r--test/CodeGen/SystemZ/shift-10.ll78
-rw-r--r--test/CodeGen/SystemZ/spill-01.ll548
-rw-r--r--test/CodeGen/SystemZ/strcmp-01.ll70
-rw-r--r--test/CodeGen/SystemZ/strcmp-02.ll72
-rw-r--r--test/CodeGen/SystemZ/strcpy-01.ll50
-rw-r--r--test/CodeGen/SystemZ/strlen-01.ll39
-rw-r--r--test/CodeGen/SystemZ/strlen-02.ll39
-rw-r--r--test/CodeGen/SystemZ/tls-01.ll2
-rw-r--r--test/CodeGen/SystemZ/unaligned-01.ll62
-rw-r--r--test/CodeGen/SystemZ/xor-01.ll70
-rw-r--r--test/CodeGen/SystemZ/xor-02.ll8
-rw-r--r--test/CodeGen/SystemZ/xor-03.ll64
-rw-r--r--test/CodeGen/SystemZ/xor-04.ll14
-rw-r--r--test/CodeGen/SystemZ/xor-05.ll26
-rw-r--r--test/CodeGen/SystemZ/xor-06.ll16
-rw-r--r--test/CodeGen/SystemZ/xor-07.ll39
-rw-r--r--test/CodeGen/SystemZ/xor-08.ll57
-rw-r--r--test/CodeGen/Thumb/2009-08-20-ISelBug.ll2
-rw-r--r--test/CodeGen/Thumb/2010-07-15-debugOrdering.ll150
-rw-r--r--test/CodeGen/Thumb/2012-04-26-M0ISelBug.ll2
-rw-r--r--test/CodeGen/Thumb/PR17309.ll57
-rw-r--r--test/CodeGen/Thumb/barrier.ll6
-rw-r--r--test/CodeGen/Thumb/dyn-stackalloc.ll4
-rw-r--r--test/CodeGen/Thumb/ispositive.ll2
-rw-r--r--test/CodeGen/Thumb/large-stack.ll12
-rw-r--r--test/CodeGen/Thumb/ldr_frame.ll8
-rw-r--r--test/CodeGen/Thumb/lit.local.cfg2
-rw-r--r--test/CodeGen/Thumb/pop.ll2
-rw-r--r--test/CodeGen/Thumb/push.ll2
-rw-r--r--test/CodeGen/Thumb/rev.ll4
-rw-r--r--test/CodeGen/Thumb/select.ll28
-rw-r--r--test/CodeGen/Thumb/trap.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-07-21-ISelBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll4
-rw-r--r--test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll6
-rw-r--r--test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll4
-rw-r--r--test/CodeGen/Thumb2/2010-11-22-EpilogueBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2011-04-21-FILoweringBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll2
-rw-r--r--test/CodeGen/Thumb2/2013-03-06-vector-sext-operand-scalarize.ll4
-rw-r--r--test/CodeGen/Thumb2/buildvector-crash.ll2
-rw-r--r--test/CodeGen/Thumb2/carry.ll6
-rw-r--r--test/CodeGen/Thumb2/cross-rc-coalescing-2.ll2
-rw-r--r--test/CodeGen/Thumb2/div.ll20
-rw-r--r--test/CodeGen/Thumb2/large-call.ll2
-rw-r--r--test/CodeGen/Thumb2/large-stack.ll12
-rw-r--r--test/CodeGen/Thumb2/lit.local.cfg2
-rw-r--r--test/CodeGen/Thumb2/longMACt.ll8
-rw-r--r--test/CodeGen/Thumb2/lsr-deficiency.ll2
-rw-r--r--test/CodeGen/Thumb2/machine-licm.ll6
-rw-r--r--test/CodeGen/Thumb2/mul_const.ll4
-rw-r--r--test/CodeGen/Thumb2/pic-load.ll2
-rw-r--r--test/CodeGen/Thumb2/tail-call-r9.ll14
-rw-r--r--test/CodeGen/Thumb2/thumb2-adc.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-add.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-add2.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-add3.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-add4.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-add5.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-add6.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-and.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-and2.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-asr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-asr2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-bcc.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-bfc.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-bic.ll24
-rw-r--r--test/CodeGen/Thumb2/thumb2-branch.ll30
-rw-r--r--test/CodeGen/Thumb2/thumb2-call-tc.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-call.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-clz.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn.ll18
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmn2.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-cmp2.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-eor.ll14
-rw-r--r--test/CodeGen/Thumb2/thumb2-eor2.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt1.ll11
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt2.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt3.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldm.ll6
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr.ll14
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldrb.ll14
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldrh.ll14
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsl.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsl2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-lsr2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-mla.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-mls.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-mov.ll60
-rw-r--r--test/CodeGen/Thumb2/thumb2-mul.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-mvn.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-mvn2.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-neg.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-orn.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-orn2.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-orr.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-orr2.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-pack.ll25
-rw-r--r--test/CodeGen/Thumb2/thumb2-rev.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-ror.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-rsb.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-rsb2.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-sbc.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-select.ll41
-rw-r--r--test/CodeGen/Thumb2/thumb2-spill-q.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-str.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-str_post.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-strb.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-strh.ll16
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub.ll12
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub2.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub4.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-sub5.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-sxt-uxt.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-tbb.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-tbh.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-teq.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst.ll10
-rw-r--r--test/CodeGen/Thumb2/thumb2-tst2.ll12
-rw-r--r--test/CodeGen/Thumb2/tls2.ll8
-rw-r--r--test/CodeGen/Thumb2/v8_IT_1.ll17
-rw-r--r--test/CodeGen/Thumb2/v8_IT_2.ll38
-rw-r--r--test/CodeGen/Thumb2/v8_IT_3.ll77
-rw-r--r--test/CodeGen/Thumb2/v8_IT_4.ll45
-rw-r--r--test/CodeGen/Thumb2/v8_IT_5.ll63
-rw-r--r--test/CodeGen/X86/2006-05-02-InstrSched1.ll6
-rw-r--r--test/CodeGen/X86/2006-05-11-InstrSched.ll2
-rw-r--r--test/CodeGen/X86/2006-11-12-CSRetCC.ll2
-rw-r--r--test/CodeGen/X86/2007-01-08-InstrSched.ll4
-rw-r--r--test/CodeGen/X86/2007-02-04-OrAddrMode.ll4
-rw-r--r--test/CodeGen/X86/2007-02-23-DAGCombine-Miscompile.ll2
-rw-r--r--test/CodeGen/X86/2007-03-24-InlineAsmXConstraint.ll2
-rw-r--r--test/CodeGen/X86/2007-05-07-InvokeSRet.ll19
-rw-r--r--test/CodeGen/X86/2007-09-06-ExtWeakAliasee.ll13
-rw-r--r--test/CodeGen/X86/2007-09-27-LDIntrinsics.ll4
-rw-r--r--test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll6
-rw-r--r--test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll2
-rw-r--r--test/CodeGen/X86/2008-01-08-SchedulerCrash.ll2
-rw-r--r--test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll99
-rw-r--r--test/CodeGen/X86/2008-03-14-SpillerCrash.ll4
-rw-r--r--test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll2
-rw-r--r--test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll2
-rw-r--r--test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll2
-rw-r--r--test/CodeGen/X86/2008-07-19-movups-spills.ll673
-rw-r--r--test/CodeGen/X86/2008-08-19-SubAndFetch.ll2
-rw-r--r--test/CodeGen/X86/2008-08-31-EH_RETURN32.ll2
-rw-r--r--test/CodeGen/X86/2008-09-11-CoalescerBug2.ll12
-rw-r--r--test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll18
-rw-r--r--test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll2
-rw-r--r--test/CodeGen/X86/2009-02-26-MachineLICMBug.ll10
-rw-r--r--test/CodeGen/X86/2009-03-23-MultiUseSched.ll2
-rw-r--r--test/CodeGen/X86/2009-04-21-NoReloadImpDef.ll2
-rw-r--r--test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll2
-rw-r--r--test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll2
-rw-r--r--test/CodeGen/X86/2009-10-16-Scope.ll12
-rw-r--r--test/CodeGen/X86/2009-11-16-MachineLICM.ll2
-rw-r--r--test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll2
-rw-r--r--test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll4
-rw-r--r--test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll2
-rw-r--r--test/CodeGen/X86/2010-01-08-Atomic64Bug.ll2
-rw-r--r--test/CodeGen/X86/2010-01-18-DbgValue.ll15
-rw-r--r--test/CodeGen/X86/2010-02-01-DbgValueCrash.ll24
-rw-r--r--test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll6
-rw-r--r--test/CodeGen/X86/2010-02-23-DAGCombineBug.ll2
-rw-r--r--test/CodeGen/X86/2010-04-08-CoalescerBug.ll2
-rw-r--r--test/CodeGen/X86/2010-05-25-DotDebugLoc.ll11
-rw-r--r--test/CodeGen/X86/2010-05-26-DotDebugLoc.ll37
-rw-r--r--test/CodeGen/X86/2010-05-28-Crash.ll23
-rw-r--r--test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll43
-rw-r--r--test/CodeGen/X86/2010-07-06-DbgCrash.ll22
-rw-r--r--test/CodeGen/X86/2010-07-29-SetccSimplify.ll2
-rw-r--r--test/CodeGen/X86/2010-08-04-StackVariable.ll56
-rw-r--r--test/CodeGen/X86/2010-09-16-EmptyFilename.ll19
-rw-r--r--test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll8
-rw-r--r--test/CodeGen/X86/2010-11-02-DbgParameter.ll21
-rw-r--r--test/CodeGen/X86/2010-12-02-MC-Set.ll14
-rw-r--r--test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll19
-rw-r--r--test/CodeGen/X86/2011-04-19-sclr-bb.ll2
-rw-r--r--test/CodeGen/X86/2011-05-09-loaduse.ll2
-rw-r--r--test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll15
-rw-r--r--test/CodeGen/X86/2011-06-03-x87chain.ll18
-rw-r--r--test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll2
-rw-r--r--test/CodeGen/X86/2011-09-14-valcoalesce.ll37
-rw-r--r--test/CodeGen/X86/2011-09-18-sse2cmp.ll2
-rw-r--r--test/CodeGen/X86/2011-09-21-setcc-bug.ll2
-rw-r--r--test/CodeGen/X86/2011-10-11-srl.ll2
-rw-r--r--test/CodeGen/X86/2011-10-12-MachineCSE.ll29
-rw-r--r--test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll2
-rw-r--r--test/CodeGen/X86/2011-10-19-LegelizeLoad.ll3
-rw-r--r--test/CodeGen/X86/2011-10-27-tstore.ll2
-rw-r--r--test/CodeGen/X86/2011-10-30-padd.ll4
-rw-r--r--test/CodeGen/X86/2011-12-06-AVXVectorExtractCombine.ll2
-rw-r--r--test/CodeGen/X86/2011-12-15-vec_shift.ll4
-rw-r--r--test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll4
-rw-r--r--test/CodeGen/X86/2011-20-21-zext-ui2fp.ll2
-rw-r--r--test/CodeGen/X86/2012-01-11-split-cv.ll2
-rw-r--r--test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll2
-rw-r--r--test/CodeGen/X86/2012-01-18-vbitcast.ll2
-rw-r--r--test/CodeGen/X86/2012-02-20-MachineCPBug.ll78
-rw-r--r--test/CodeGen/X86/2012-04-26-sdglue.ll6
-rw-r--r--test/CodeGen/X86/2012-05-17-TwoAddressBug.ll2
-rw-r--r--test/CodeGen/X86/2012-07-10-extload64.ll4
-rw-r--r--test/CodeGen/X86/2012-07-15-broadcastfold.ll2
-rw-r--r--test/CodeGen/X86/2012-08-07-CmpISelBug.ll2
-rw-r--r--test/CodeGen/X86/2012-08-16-setcc.ll8
-rw-r--r--test/CodeGen/X86/2012-08-17-legalizer-crash.ll3
-rw-r--r--test/CodeGen/X86/2012-1-10-buildvector.ll4
-rw-r--r--test/CodeGen/X86/2012-11-30-handlemove-dbg.ll8
-rw-r--r--test/CodeGen/X86/2012-11-30-misched-dbg.ll8
-rw-r--r--test/CodeGen/X86/2012-11-30-regpres-dbg.ll10
-rw-r--r--test/CodeGen/X86/2013-03-13-VEX-DestReg.ll2
-rw-r--r--test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll132
-rw-r--r--test/CodeGen/X86/3addr-16bit.ll23
-rw-r--r--test/CodeGen/X86/3addr-or.ll10
-rw-r--r--test/CodeGen/X86/GC/lit.local.cfg2
-rw-r--r--test/CodeGen/X86/GC/ocaml-gc-assert.ll21
-rw-r--r--test/CodeGen/X86/GC/ocaml-gc.ll20
-rw-r--r--test/CodeGen/X86/MachineSink-CritEdge.ll4
-rw-r--r--test/CodeGen/X86/MachineSink-DbgValue.ll15
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll10
-rw-r--r--test/CodeGen/X86/StackColoring-dbg.ll9
-rw-r--r--test/CodeGen/X86/StackColoring.ll43
-rw-r--r--test/CodeGen/X86/WidenArith.ll2
-rw-r--r--test/CodeGen/X86/abi-isel.ll1224
-rw-r--r--test/CodeGen/X86/add-of-carry.ll6
-rw-r--r--test/CodeGen/X86/add.ll34
-rw-r--r--test/CodeGen/X86/aes_intrinsics.ll48
-rw-r--r--test/CodeGen/X86/alias-error.ll5
-rw-r--r--test/CodeGen/X86/aliases.ll26
-rw-r--r--test/CodeGen/X86/alloca-align-rounding-32.ll2
-rw-r--r--test/CodeGen/X86/alloca-align-rounding.ll2
-rw-r--r--test/CodeGen/X86/and-su.ll4
-rw-r--r--test/CodeGen/X86/anyregcc-crash.ll17
-rw-r--r--test/CodeGen/X86/anyregcc.ll348
-rw-r--r--test/CodeGen/X86/apm.ll8
-rw-r--r--test/CodeGen/X86/asm-global-imm.ll2
-rw-r--r--test/CodeGen/X86/asm-modifier-P.ll16
-rw-r--r--test/CodeGen/X86/asm-modifier.ll8
-rw-r--r--test/CodeGen/X86/atom-bypass-slow-division-64.ll21
-rw-r--r--test/CodeGen/X86/atom-bypass-slow-division.ll20
-rw-r--r--test/CodeGen/X86/atom-call-reg-indirect.ll14
-rw-r--r--test/CodeGen/X86/atom-lea-addw-bug.ll19
-rw-r--r--test/CodeGen/X86/atom-lea-sp.ll12
-rw-r--r--test/CodeGen/X86/atom-sched.ll4
-rw-r--r--test/CodeGen/X86/atomic-dagsched.ll8
-rw-r--r--test/CodeGen/X86/atomic-minmax-i6432.ll2
-rw-r--r--test/CodeGen/X86/atomic-or.ll4
-rw-r--r--test/CodeGen/X86/atomic_add.ll48
-rw-r--r--test/CodeGen/X86/avx-arith.ll5
-rw-r--r--test/CodeGen/X86/avx-basic.ll4
-rw-r--r--test/CodeGen/X86/avx-bitcast.ll2
-rw-r--r--test/CodeGen/X86/avx-blend.ll26
-rw-r--r--test/CodeGen/X86/avx-brcond.ll12
-rwxr-xr-xtest/CodeGen/X86/avx-fp2int.ll4
-rw-r--r--test/CodeGen/X86/avx-intel-ocl.ll13
-rw-r--r--test/CodeGen/X86/avx-minmax.ll16
-rwxr-xr-xtest/CodeGen/X86/avx-sext.ll11
-rw-r--r--test/CodeGen/X86/avx-shift.ll3
-rwxr-xr-xtest/CodeGen/X86/avx-shuffle-x86_32.ll2
-rw-r--r--test/CodeGen/X86/avx-shuffle.ll16
-rw-r--r--test/CodeGen/X86/avx-splat.ll2
-rwxr-xr-xtest/CodeGen/X86/avx-trunc.ll7
-rw-r--r--test/CodeGen/X86/avx-varargs-x86_64.ll2
-rw-r--r--test/CodeGen/X86/avx-vextractf128.ll2
-rw-r--r--test/CodeGen/X86/avx-vpermil.ll2
-rwxr-xr-xtest/CodeGen/X86/avx-zext.ll18
-rw-r--r--test/CodeGen/X86/avx2-arith.ll112
-rwxr-xr-xtest/CodeGen/X86/avx2-conversions.ll28
-rw-r--r--test/CodeGen/X86/avx2-gather.ll18
-rw-r--r--test/CodeGen/X86/avx2-logic.ll4
-rw-r--r--test/CodeGen/X86/avx2-palignr.ll18
-rw-r--r--test/CodeGen/X86/avx2-phaddsub.ll16
-rw-r--r--test/CodeGen/X86/avx2-shift.ll8
-rw-r--r--test/CodeGen/X86/avx2-shuffle.ll8
-rw-r--r--test/CodeGen/X86/avx2-vbroadcast.ll24
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll247
-rw-r--r--test/CodeGen/X86/avx512-arith.ll271
-rw-r--r--test/CodeGen/X86/avx512-build-vector.ll18
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll27
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll217
-rw-r--r--test/CodeGen/X86/avx512-fma-intrinsics.ll97
-rw-r--r--test/CodeGen/X86/avx512-fma.ll83
-rw-r--r--test/CodeGen/X86/avx512-gather-scatter-intrin.ll225
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll125
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll374
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll57
-rw-r--r--test/CodeGen/X86/avx512-mov.ll155
-rw-r--r--test/CodeGen/X86/avx512-select.ll22
-rw-r--r--test/CodeGen/X86/avx512-shift.ll108
-rw-r--r--test/CodeGen/X86/avx512-shuffle.ll226
-rw-r--r--test/CodeGen/X86/avx512-trunc-ext.ll127
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll53
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll113
-rw-r--r--test/CodeGen/X86/bc-extract.ll2
-rw-r--r--test/CodeGen/X86/bigstructret2.ll10
-rw-r--r--test/CodeGen/X86/bitcast2.ll4
-rw-r--r--test/CodeGen/X86/blend-msb.ll8
-rw-r--r--test/CodeGen/X86/block-placement.ll50
-rw-r--r--test/CodeGen/X86/bmi.ll136
-rw-r--r--test/CodeGen/X86/bool-simplify.ll2
-rw-r--r--test/CodeGen/X86/brcond.ll18
-rw-r--r--test/CodeGen/X86/break-anti-dependencies.ll2
-rw-r--r--test/CodeGen/X86/break-avx-dep.ll29
-rw-r--r--test/CodeGen/X86/break-sse-dep.ll12
-rw-r--r--test/CodeGen/X86/bswap-inline-asm.ll24
-rw-r--r--test/CodeGen/X86/bswap.ll113
-rw-r--r--test/CodeGen/X86/bt.ll54
-rw-r--r--test/CodeGen/X86/btq.ll4
-rw-r--r--test/CodeGen/X86/byval7.ll6
-rw-r--r--test/CodeGen/X86/call-push.ll2
-rw-r--r--test/CodeGen/X86/chain_order.ll37
-rw-r--r--test/CodeGen/X86/change-compare-stride-1.ll2
-rw-r--r--test/CodeGen/X86/change-compare-stride-trickiness-0.ll2
-rw-r--r--test/CodeGen/X86/change-compare-stride-trickiness-1.ll2
-rw-r--r--test/CodeGen/X86/clz.ll24
-rw-r--r--test/CodeGen/X86/cmov-fp.ll192
-rw-r--r--test/CodeGen/X86/cmov-into-branch.ll10
-rw-r--r--test/CodeGen/X86/cmov.ll24
-rw-r--r--test/CodeGen/X86/cmp.ll27
-rw-r--r--test/CodeGen/X86/coalesce-implicitdef.ll35
-rw-r--r--test/CodeGen/X86/coalescer-commute1.ll2
-rw-r--r--test/CodeGen/X86/code_placement_align_all.ll2
-rw-r--r--test/CodeGen/X86/codegen-prepare.ll2
-rw-r--r--test/CodeGen/X86/codemodel.ll24
-rw-r--r--test/CodeGen/X86/coff-feat00.ll7
-rw-r--r--test/CodeGen/X86/commute-two-addr.ll14
-rw-r--r--test/CodeGen/X86/compact-unwind.ll23
-rw-r--r--test/CodeGen/X86/compare-inf.ll114
-rw-r--r--test/CodeGen/X86/compiler_used.ll2
-rw-r--r--test/CodeGen/X86/conditional-indecrement.ll16
-rw-r--r--test/CodeGen/X86/crash-nosse.ll2
-rw-r--r--test/CodeGen/X86/crash.ll13
-rw-r--r--test/CodeGen/X86/critical-edge-split-2.ll2
-rw-r--r--test/CodeGen/X86/ctpop-combine.ll6
-rw-r--r--test/CodeGen/X86/dag-rauw-cse.ll2
-rw-r--r--test/CodeGen/X86/dagcombine-buildvector.ll4
-rw-r--r--test/CodeGen/X86/dagcombine-shifts.ll209
-rw-r--r--test/CodeGen/X86/dagcombine-unsafe-math.ll (renamed from test/CodeGen/X86/dagcombine_unsafe_math.ll)2
-rw-r--r--test/CodeGen/X86/dbg-at-specficiation.ll20
-rw-r--r--test/CodeGen/X86/dbg-byval-parameter.ll49
-rw-r--r--test/CodeGen/X86/dbg-const-int.ll30
-rw-r--r--test/CodeGen/X86/dbg-const.ll36
-rw-r--r--test/CodeGen/X86/dbg-declare-arg.ll125
-rw-r--r--test/CodeGen/X86/dbg-declare.ll55
-rw-r--r--test/CodeGen/X86/dbg-file-name.ll21
-rw-r--r--test/CodeGen/X86/dbg-i128-const.ll31
-rw-r--r--test/CodeGen/X86/dbg-large-unsigned-const.ll58
-rw-r--r--test/CodeGen/X86/dbg-merge-loc-entry.ll78
-rw-r--r--test/CodeGen/X86/dbg-prolog-end.ll55
-rw-r--r--test/CodeGen/X86/dbg-subrange.ll34
-rw-r--r--test/CodeGen/X86/dbg-value-dag-combine.ll47
-rw-r--r--test/CodeGen/X86/dbg-value-isel.ll103
-rw-r--r--test/CodeGen/X86/dbg-value-location.ll74
-rw-r--r--test/CodeGen/X86/dbg-value-range.ll60
-rw-r--r--test/CodeGen/X86/divide-by-constant.ll18
-rw-r--r--test/CodeGen/X86/dwarf-comp-dir.ll5
-rw-r--r--test/CodeGen/X86/dyn_alloca_aligned.ll9
-rw-r--r--test/CodeGen/X86/emit-big-cst.ll17
-rw-r--r--test/CodeGen/X86/extended-fma-contraction.ll22
-rw-r--r--test/CodeGen/X86/extractelement-load.ll4
-rw-r--r--test/CodeGen/X86/fabs.ll18
-rw-r--r--test/CodeGen/X86/fast-cc-merge-stack-adj.ll2
-rw-r--r--test/CodeGen/X86/fast-cc-pass-in-regs.ll8
-rw-r--r--test/CodeGen/X86/fast-isel-call.ll8
-rw-r--r--test/CodeGen/X86/fast-isel-divrem-x86-64.ll10
-rw-r--r--test/CodeGen/X86/fast-isel-divrem.ll28
-rw-r--r--test/CodeGen/X86/fast-isel-extract.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-fneg.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-gep.ll20
-rw-r--r--test/CodeGen/X86/fast-isel-i1.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-mem.ll6
-rw-r--r--test/CodeGen/X86/fast-isel-ret-ext.ll12
-rw-r--r--test/CodeGen/X86/fast-isel-store.ll64
-rw-r--r--test/CodeGen/X86/fast-isel-tls.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-unaligned-store.ll18
-rw-r--r--test/CodeGen/X86/fast-isel-x86-64.ll46
-rw-r--r--test/CodeGen/X86/fast-isel-x86.ll10
-rw-r--r--test/CodeGen/X86/fastcc.ll4
-rw-r--r--test/CodeGen/X86/fastisel-gep-promote-before-add.ll37
-rw-r--r--test/CodeGen/X86/floor-soft-float.ll13
-rw-r--r--test/CodeGen/X86/fma.ll8
-rw-r--r--test/CodeGen/X86/fma_patterns.ll8
-rw-r--r--test/CodeGen/X86/fma_patterns_wide.ll84
-rw-r--r--test/CodeGen/X86/fold-add.ll2
-rw-r--r--test/CodeGen/X86/fold-and-shift.ll8
-rw-r--r--test/CodeGen/X86/fold-load-vec.ll6
-rw-r--r--test/CodeGen/X86/fold-load.ll14
-rw-r--r--test/CodeGen/X86/fold-pcmpeqd-1.ll4
-rw-r--r--test/CodeGen/X86/fold-pcmpeqd-2.ll17
-rw-r--r--test/CodeGen/X86/force-align-stack-alloca.ll2
-rw-r--r--test/CodeGen/X86/fp-elim-and-no-fp-elim.ll4
-rw-r--r--test/CodeGen/X86/fp-elim.ll70
-rw-r--r--test/CodeGen/X86/fp-fast.ll83
-rw-r--r--test/CodeGen/X86/fp-select-cmp-and.ll185
-rw-r--r--test/CodeGen/X86/fp-une-cmp.ll43
-rw-r--r--test/CodeGen/X86/fp_constant_op.ll24
-rw-r--r--test/CodeGen/X86/frame-base.ll22
-rw-r--r--test/CodeGen/X86/full-lsr.ll2
-rw-r--r--test/CodeGen/X86/gather-addresses.ll38
-rw-r--r--test/CodeGen/X86/gcc_except_table.ll6
-rw-r--r--test/CodeGen/X86/ghc-cc.ll7
-rw-r--r--test/CodeGen/X86/ghc-cc64.ll31
-rw-r--r--test/CodeGen/X86/global-sections.ll8
-rw-r--r--test/CodeGen/X86/h-register-addressing-32.ll23
-rw-r--r--test/CodeGen/X86/h-register-addressing-64.ll23
-rw-r--r--test/CodeGen/X86/h-registers-0.ll42
-rw-r--r--test/CodeGen/X86/h-registers-1.ll15
-rw-r--r--test/CodeGen/X86/h-registers-2.ll2
-rw-r--r--test/CodeGen/X86/haddsub.ll92
-rw-r--r--test/CodeGen/X86/hidden-vis-4.ll2
-rw-r--r--test/CodeGen/X86/hidden-vis.ll6
-rw-r--r--test/CodeGen/X86/hipe-cc.ll6
-rw-r--r--test/CodeGen/X86/hipe-cc64.ll12
-rw-r--r--test/CodeGen/X86/hipe-prologue.ll12
-rw-r--r--test/CodeGen/X86/hoist-common.ll14
-rw-r--r--test/CodeGen/X86/i128-mul.ll36
-rw-r--r--test/CodeGen/X86/i128-sdiv.ll6
-rw-r--r--test/CodeGen/X86/i486-fence-loop.ll27
-rw-r--r--test/CodeGen/X86/iabs.ll2
-rw-r--r--test/CodeGen/X86/ident-metadata.ll9
-rw-r--r--test/CodeGen/X86/inline-asm-R-constraint.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-error.ll2
-rw-r--r--test/CodeGen/X86/inline-asm-flag-clobber.ll31
-rw-r--r--test/CodeGen/X86/inline-asm-fpstack.ll2
-rw-r--r--test/CodeGen/X86/inreg.ll4
-rw-r--r--test/CodeGen/X86/ins_subreg_coalesce-1.ll4
-rw-r--r--test/CodeGen/X86/isel-optnone.ll42
-rw-r--r--test/CodeGen/X86/isel-sink.ll2
-rw-r--r--test/CodeGen/X86/jump_sign.ll76
-rw-r--r--test/CodeGen/X86/large-gep-chain.ll25607
-rw-r--r--test/CodeGen/X86/lea-2.ll2
-rw-r--r--test/CodeGen/X86/lea-recursion.ll3
-rw-r--r--test/CodeGen/X86/lea.ll10
-rw-r--r--test/CodeGen/X86/leaf-fp-elim.ll4
-rw-r--r--test/CodeGen/X86/legalize-shift-64.ll39
-rw-r--r--test/CodeGen/X86/licm-dominance.ll2
-rw-r--r--test/CodeGen/X86/licm-nested.ll2
-rw-r--r--test/CodeGen/X86/lit.local.cfg8
-rw-r--r--test/CodeGen/X86/load-slice.ll139
-rw-r--r--test/CodeGen/X86/lock-inst-encoding.ll10
-rw-r--r--test/CodeGen/X86/long-extend.ll18
-rw-r--r--test/CodeGen/X86/longlong-deadload.ll2
-rw-r--r--test/CodeGen/X86/loop-blocks.ll8
-rw-r--r--test/CodeGen/X86/lsr-interesting-step.ll2
-rw-r--r--test/CodeGen/X86/lsr-loop-exit-cond.ll15
-rw-r--r--test/CodeGen/X86/lsr-reuse.ll22
-rw-r--r--test/CodeGen/X86/lsr-static-addr.ll3
-rw-r--r--test/CodeGen/X86/lzcnt.ll16
-rw-r--r--test/CodeGen/X86/machine-cp.ll4
-rw-r--r--test/CodeGen/X86/machine-cse.ll11
-rw-r--r--test/CodeGen/X86/masked-iv-safe.ll50
-rw-r--r--test/CodeGen/X86/maskmovdqu.ll6
-rw-r--r--test/CodeGen/X86/mcinst-avx-lowering.ll4
-rw-r--r--test/CodeGen/X86/mcinst-lowering.ll18
-rw-r--r--test/CodeGen/X86/memcmp.ll14
-rw-r--r--test/CodeGen/X86/memcpy-2.ll54
-rw-r--r--test/CodeGen/X86/memcpy.ll12
-rw-r--r--test/CodeGen/X86/memset-2.ll8
-rw-r--r--test/CodeGen/X86/memset-sse-stack-realignment.ll20
-rw-r--r--test/CodeGen/X86/merge_store.ll30
-rw-r--r--test/CodeGen/X86/mingw-alloca.ll25
-rw-r--r--test/CodeGen/X86/misched-balance.ll59
-rw-r--r--test/CodeGen/X86/misched-copy.ll12
-rw-r--r--test/CodeGen/X86/misched-fusion.ll108
-rw-r--r--test/CodeGen/X86/misched-matmul.ll11
-rw-r--r--test/CodeGen/X86/misched-matrix.ll12
-rw-r--r--test/CodeGen/X86/mmx-arg-passing.ll21
-rw-r--r--test/CodeGen/X86/mmx-builtins.ll12
-rw-r--r--test/CodeGen/X86/mmx-punpckhdq.ll2
-rw-r--r--test/CodeGen/X86/mmx-shift.ll15
-rw-r--r--test/CodeGen/X86/movbe.ll17
-rw-r--r--test/CodeGen/X86/movgs.ll20
-rw-r--r--test/CodeGen/X86/movmsk.ll6
-rw-r--r--test/CodeGen/X86/ms-inline-asm.ll4
-rw-r--r--test/CodeGen/X86/narrow-shl-cst.ll22
-rw-r--r--test/CodeGen/X86/narrow-shl-load.ll2
-rw-r--r--test/CodeGen/X86/narrow_op-1.ll13
-rw-r--r--test/CodeGen/X86/neg_cmp.ll2
-rw-r--r--test/CodeGen/X86/neg_fp.ll2
-rw-r--r--test/CodeGen/X86/newline-and-quote.ll6
-rw-r--r--test/CodeGen/X86/no-cmov.ll2
-rw-r--r--test/CodeGen/X86/no-compact-unwind.ll24
-rw-r--r--test/CodeGen/X86/no-elf-compact-unwind.ll48
-rw-r--r--test/CodeGen/X86/nocx16.ll21
-rw-r--r--test/CodeGen/X86/non-lazy-bind.ll6
-rw-r--r--test/CodeGen/X86/nonconst-static-ev.ll9
-rw-r--r--test/CodeGen/X86/nonconst-static-iv.ll9
-rw-r--r--test/CodeGen/X86/nosse-error1.ll2
-rw-r--r--test/CodeGen/X86/nosse-error2.ll2
-rw-r--r--test/CodeGen/X86/object-size.ll8
-rw-r--r--test/CodeGen/X86/opt-shuff-tstore.ll2
-rw-r--r--test/CodeGen/X86/optimize-max-3.ll4
-rw-r--r--test/CodeGen/X86/or-address.ll2
-rw-r--r--test/CodeGen/X86/palignr-2.ll4
-rw-r--r--test/CodeGen/X86/palignr.ll20
-rw-r--r--test/CodeGen/X86/pass-three.ll2
-rw-r--r--test/CodeGen/X86/patchpoint.ll100
-rw-r--r--test/CodeGen/X86/peep-setb.ll18
-rw-r--r--test/CodeGen/X86/peep-test-3.ll8
-rw-r--r--test/CodeGen/X86/peep-test-4.ll191
-rw-r--r--test/CodeGen/X86/peep-vector-extract-concat.ll4
-rw-r--r--test/CodeGen/X86/phaddsub.ll56
-rw-r--r--test/CodeGen/X86/phys_subreg_coalesce-3.ll2
-rw-r--r--test/CodeGen/X86/pic.ll16
-rw-r--r--test/CodeGen/X86/pmovext.ll27
-rw-r--r--test/CodeGen/X86/pmovsx-inreg.ll51
-rw-r--r--test/CodeGen/X86/pmul.ll4
-rw-r--r--test/CodeGen/X86/pmulld.ll12
-rw-r--r--test/CodeGen/X86/popcnt.ll8
-rw-r--r--test/CodeGen/X86/postra-licm.ll4
-rw-r--r--test/CodeGen/X86/pr10523.ll2
-rw-r--r--test/CodeGen/X86/pr10524.ll2
-rw-r--r--test/CodeGen/X86/pr10525.ll2
-rw-r--r--test/CodeGen/X86/pr10526.ll2
-rw-r--r--test/CodeGen/X86/pr12312.ll2
-rw-r--r--test/CodeGen/X86/pr12360.ll8
-rw-r--r--test/CodeGen/X86/pr13209.ll2
-rw-r--r--test/CodeGen/X86/pr14088.ll15
-rw-r--r--test/CodeGen/X86/pr14090.ll10
-rw-r--r--test/CodeGen/X86/pr1505b.ll3
-rw-r--r--test/CodeGen/X86/pr16031.ll27
-rw-r--r--test/CodeGen/X86/pr16360.ll16
-rw-r--r--test/CodeGen/X86/pr16807.ll18
-rw-r--r--test/CodeGen/X86/pr17546.ll10
-rw-r--r--test/CodeGen/X86/pr17631.ll34
-rw-r--r--test/CodeGen/X86/pr17764.ll10
-rw-r--r--test/CodeGen/X86/pr18014.ll16
-rw-r--r--test/CodeGen/X86/pr18023.ll31
-rw-r--r--test/CodeGen/X86/pr18054.ll10
-rw-r--r--test/CodeGen/X86/pr18162.ll27
-rw-r--r--test/CodeGen/X86/pr2182.ll2
-rw-r--r--test/CodeGen/X86/pr3216.ll2
-rw-r--r--test/CodeGen/X86/pr3457.ll2
-rw-r--r--test/CodeGen/X86/pre-ra-sched.ll5
-rw-r--r--test/CodeGen/X86/prefetch.ll5
-rw-r--r--test/CodeGen/X86/prefixdata.ll17
-rw-r--r--test/CodeGen/X86/private.ll16
-rw-r--r--test/CodeGen/X86/promote-i16.ll4
-rw-r--r--test/CodeGen/X86/rd-mod-wr-eflags.ll2
-rw-r--r--test/CodeGen/X86/rdrand.ll18
-rw-r--r--test/CodeGen/X86/rdseed.ll14
-rw-r--r--test/CodeGen/X86/red-zone.ll4
-rw-r--r--test/CodeGen/X86/red-zone2.ll2
-rw-r--r--test/CodeGen/X86/rem-2.ll7
-rw-r--r--test/CodeGen/X86/rem.ll17
-rw-r--r--test/CodeGen/X86/remat-mov-0.ll6
-rw-r--r--test/CodeGen/X86/remat-phys-dead.ll23
-rw-r--r--test/CodeGen/X86/ret-mmx.ll8
-rw-r--r--test/CodeGen/X86/returned-trunc-tail-calls.ll97
-rw-r--r--test/CodeGen/X86/reverse_branches.ll2
-rw-r--r--test/CodeGen/X86/rodata-relocs.ll37
-rw-r--r--test/CodeGen/X86/rot16.ll16
-rw-r--r--test/CodeGen/X86/rot32.ll24
-rw-r--r--test/CodeGen/X86/rot64.ll8
-rw-r--r--test/CodeGen/X86/rounding-ops.ll42
-rw-r--r--test/CodeGen/X86/sandybridge-loads.ll2
-rw-r--r--test/CodeGen/X86/scalar_widen_div.ll2
-rw-r--r--test/CodeGen/X86/sdiv-exact.ll4
-rw-r--r--test/CodeGen/X86/segmented-stacks-dynamic.ll8
-rw-r--r--test/CodeGen/X86/segmented-stacks.ll38
-rw-r--r--test/CodeGen/X86/select.ll96
-rw-r--r--test/CodeGen/X86/select_const.ll2
-rw-r--r--test/CodeGen/X86/setcc-narrowing.ll18
-rw-r--r--test/CodeGen/X86/setcc-sentinals.ll13
-rw-r--r--test/CodeGen/X86/setcc.ll6
-rw-r--r--test/CodeGen/X86/sext-i1.ll12
-rw-r--r--test/CodeGen/X86/sext-load.ll4
-rw-r--r--test/CodeGen/X86/sext-subreg.ll2
-rw-r--r--test/CodeGen/X86/sha.ll139
-rw-r--r--test/CodeGen/X86/shift-and.ll18
-rw-r--r--test/CodeGen/X86/shift-bmi2.ll27
-rw-r--r--test/CodeGen/X86/shift-coalesce.ll4
-rw-r--r--test/CodeGen/X86/shift-codegen.ll4
-rw-r--r--test/CodeGen/X86/shift-combine.ll2
-rw-r--r--test/CodeGen/X86/shift-folding.ll10
-rw-r--r--test/CodeGen/X86/shl-anyext.ll2
-rw-r--r--test/CodeGen/X86/shl_elim.ll9
-rw-r--r--test/CodeGen/X86/shrink-compare.ll59
-rw-r--r--test/CodeGen/X86/sibcall-2.ll16
-rw-r--r--test/CodeGen/X86/sibcall-3.ll4
-rw-r--r--test/CodeGen/X86/sibcall-4.ll2
-rw-r--r--test/CodeGen/X86/sibcall-5.ll8
-rw-r--r--test/CodeGen/X86/sibcall-6.ll13
-rw-r--r--test/CodeGen/X86/sibcall.ll86
-rw-r--r--test/CodeGen/X86/simple-zext.ll16
-rw-r--r--test/CodeGen/X86/sincos-opt.ll14
-rw-r--r--test/CodeGen/X86/sincos.ll10
-rw-r--r--test/CodeGen/X86/sink-hoist.ll19
-rw-r--r--test/CodeGen/X86/smul-with-overflow.ll10
-rw-r--r--test/CodeGen/X86/splat-scalar-load.ll2
-rw-r--r--test/CodeGen/X86/sqrt-fastmath.ll60
-rw-r--r--test/CodeGen/X86/sse-align-12.ll8
-rw-r--r--test/CodeGen/X86/sse-align-2.ll4
-rw-r--r--test/CodeGen/X86/sse-commute.ll2
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86.ll308
-rw-r--r--test/CodeGen/X86/sse-minmax.ll407
-rw-r--r--test/CodeGen/X86/sse1.ll2
-rw-r--r--test/CodeGen/X86/sse2-blend.ll2
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-x86.ll712
-rw-r--r--test/CodeGen/X86/sse2-mul.ll2
-rw-r--r--test/CodeGen/X86/sse2-vector-shifts.ll247
-rw-r--r--test/CodeGen/X86/sse2.ll51
-rw-r--r--test/CodeGen/X86/sse3-intrinsics-x86.ll57
-rw-r--r--test/CodeGen/X86/sse3.ll56
-rw-r--r--test/CodeGen/X86/sse41-blend.ll16
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-x86.ll326
-rw-r--r--test/CodeGen/X86/sse41.ll16
-rw-r--r--test/CodeGen/X86/sse42-intrinsics-x86.ll182
-rw-r--r--test/CodeGen/X86/sse42.ll4
-rw-r--r--test/CodeGen/X86/sse42_64.ll2
-rw-r--r--test/CodeGen/X86/sse4a.ll12
-rw-r--r--test/CodeGen/X86/sse_partial_update.ll4
-rw-r--r--test/CodeGen/X86/ssse3-intrinsics-x86.ll120
-rw-r--r--test/CodeGen/X86/stack-align-memcpy.ll2
-rw-r--r--test/CodeGen/X86/stack-align.ll4
-rw-r--r--test/CodeGen/X86/stack-protector-dbginfo.ll97
-rw-r--r--test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll61
-rw-r--r--test/CodeGen/X86/stack-protector.ll825
-rw-r--r--test/CodeGen/X86/stackmap.ll292
-rw-r--r--test/CodeGen/X86/stdcall-notailcall.ll2
-rw-r--r--test/CodeGen/X86/store-narrow.ll56
-rw-r--r--test/CodeGen/X86/store_op_load_fold.ll4
-rw-r--r--test/CodeGen/X86/store_op_load_fold2.ll8
-rw-r--r--test/CodeGen/X86/sub-with-overflow.ll6
-rw-r--r--test/CodeGen/X86/sub.ll2
-rw-r--r--test/CodeGen/X86/subtarget-feature-change.ll62
-rw-r--r--test/CodeGen/X86/switch-bt.ll4
-rw-r--r--test/CodeGen/X86/switch-order-weight.ll2
-rw-r--r--test/CodeGen/X86/tail-call-attrs.ll56
-rw-r--r--test/CodeGen/X86/tail-call-got.ll4
-rw-r--r--test/CodeGen/X86/tail-call-legality.ll32
-rw-r--r--test/CodeGen/X86/tail-opts.ll22
-rw-r--r--test/CodeGen/X86/tailcall-64.ll32
-rw-r--r--test/CodeGen/X86/tailcall-calleesave.ll19
-rw-r--r--test/CodeGen/X86/tailcall-cgp-dup.ll4
-rw-r--r--test/CodeGen/X86/tailcall-disable.ll8
-rw-r--r--test/CodeGen/X86/tailcall-largecode.ll2
-rw-r--r--test/CodeGen/X86/tailcallbyval64.ll2
-rw-r--r--test/CodeGen/X86/tailcallfp2.ll2
-rw-r--r--test/CodeGen/X86/tbm-intrinsics-x86_64.ll43
-rw-r--r--test/CodeGen/X86/tbm_patterns.ll253
-rw-r--r--test/CodeGen/X86/test-nofold.ll9
-rw-r--r--test/CodeGen/X86/test-shrink.ll36
-rw-r--r--test/CodeGen/X86/testl-commute.ll6
-rw-r--r--test/CodeGen/X86/this-return-64.ll12
-rw-r--r--test/CodeGen/X86/tls-local-dynamic.ll4
-rw-r--r--test/CodeGen/X86/tls-models.ll80
-rw-r--r--test/CodeGen/X86/tls-pic.ll20
-rw-r--r--test/CodeGen/X86/tls-pie.ll16
-rw-r--r--test/CodeGen/X86/tls.ll109
-rw-r--r--test/CodeGen/X86/tlv-1.ll4
-rw-r--r--test/CodeGen/X86/tlv-3.ll10
-rw-r--r--test/CodeGen/X86/trap.ll4
-rw-r--r--test/CodeGen/X86/trunc-ext-ld-st.ll14
-rw-r--r--test/CodeGen/X86/trunc-to-bool.ll12
-rw-r--r--test/CodeGen/X86/twoaddr-lea.ll6
-rw-r--r--test/CodeGen/X86/uint_to_fp-2.ll2
-rw-r--r--test/CodeGen/X86/umul-with-overflow.ll6
-rw-r--r--test/CodeGen/X86/unaligned-spill-folding.ll49
-rw-r--r--test/CodeGen/X86/unknown-location.ll13
-rw-r--r--test/CodeGen/X86/unwind-init.ll36
-rw-r--r--test/CodeGen/X86/use-add-flags.ll6
-rw-r--r--test/CodeGen/X86/v-binop-widen.ll3
-rw-r--r--test/CodeGen/X86/v-binop-widen2.ll2
-rw-r--r--test/CodeGen/X86/v2f32.ll30
-rw-r--r--test/CodeGen/X86/v4i32load-crash.ll27
-rw-r--r--test/CodeGen/X86/v8i1-masks.ll2
-rw-r--r--test/CodeGen/X86/vec-sign.ll4
-rw-r--r--test/CodeGen/X86/vec_cast2.ll12
-rw-r--r--test/CodeGen/X86/vec_compare-sse4.ll16
-rw-r--r--test/CodeGen/X86/vec_compare.ll28
-rw-r--r--test/CodeGen/X86/vec_extract-sse4.ll2
-rw-r--r--test/CodeGen/X86/vec_extract.ll2
-rw-r--r--test/CodeGen/X86/vec_fpext.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-2.ll14
-rw-r--r--test/CodeGen/X86/vec_insert-3.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-5.ll44
-rw-r--r--test/CodeGen/X86/vec_insert-7.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-8.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-9.ll2
-rw-r--r--test/CodeGen/X86/vec_insert.ll4
-rw-r--r--test/CodeGen/X86/vec_round.ll22
-rw-r--r--test/CodeGen/X86/vec_sdiv_to_shift.ll8
-rw-r--r--test/CodeGen/X86/vec_set-8.ll4
-rw-r--r--test/CodeGen/X86/vec_set-9.ll2
-rw-r--r--test/CodeGen/X86/vec_set-C.ll6
-rw-r--r--test/CodeGen/X86/vec_set.ll2
-rw-r--r--test/CodeGen/X86/vec_setcc.ll187
-rw-r--r--test/CodeGen/X86/vec_shift4.ll2
-rw-r--r--test/CodeGen/X86/vec_shuffle-14.ll38
-rw-r--r--test/CodeGen/X86/vec_shuffle-16.ll16
-rw-r--r--test/CodeGen/X86/vec_shuffle-17.ll4
-rw-r--r--test/CodeGen/X86/vec_shuffle-25.ll2
-rw-r--r--test/CodeGen/X86/vec_shuffle-26.ll2
-rw-r--r--test/CodeGen/X86/vec_shuffle-27.ll6
-rw-r--r--test/CodeGen/X86/vec_shuffle-36.ll2
-rw-r--r--test/CodeGen/X86/vec_shuffle-39.ll18
-rw-r--r--test/CodeGen/X86/vec_splat-2.ll2
-rw-r--r--test/CodeGen/X86/vec_splat-3.ll50
-rw-r--r--test/CodeGen/X86/vec_splat.ll8
-rw-r--r--test/CodeGen/X86/vec_split.ll42
-rw-r--r--test/CodeGen/X86/vec_ss_load_fold.ll12
-rw-r--r--test/CodeGen/X86/vec_uint_to_fp.ll2
-rw-r--r--test/CodeGen/X86/vector-gep.ll14
-rw-r--r--test/CodeGen/X86/vector-variable-idx2.ll2
-rw-r--r--test/CodeGen/X86/viabs.ll50
-rw-r--r--test/CodeGen/X86/vselect-minmax.ll384
-rw-r--r--test/CodeGen/X86/vshift-1.ll24
-rw-r--r--test/CodeGen/X86/vshift-2.ll24
-rw-r--r--test/CodeGen/X86/vshift-3.ll22
-rw-r--r--test/CodeGen/X86/vshift-4.ll26
-rw-r--r--test/CodeGen/X86/vshift-5.ll8
-rw-r--r--test/CodeGen/X86/vsplit-and.ll2
-rw-r--r--test/CodeGen/X86/warn-stack.ll24
-rw-r--r--test/CodeGen/X86/weak_def_can_be_hidden.ll26
-rw-r--r--test/CodeGen/X86/wide-fma-contraction.ll19
-rw-r--r--test/CodeGen/X86/widen_arith-1.ll2
-rw-r--r--test/CodeGen/X86/widen_arith-2.ll2
-rw-r--r--test/CodeGen/X86/widen_arith-3.ll6
-rw-r--r--test/CodeGen/X86/widen_arith-4.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-5.ll4
-rw-r--r--test/CodeGen/X86/widen_arith-6.ll2
-rw-r--r--test/CodeGen/X86/widen_cast-1.ll4
-rw-r--r--test/CodeGen/X86/widen_cast-2.ll2
-rw-r--r--test/CodeGen/X86/widen_cast-3.ll2
-rw-r--r--test/CodeGen/X86/widen_cast-4.ll2
-rw-r--r--test/CodeGen/X86/widen_cast-5.ll2
-rw-r--r--test/CodeGen/X86/widen_cast-6.ll2
-rw-r--r--test/CodeGen/X86/widen_conv-1.ll2
-rw-r--r--test/CodeGen/X86/widen_conv-2.ll6
-rw-r--r--test/CodeGen/X86/widen_conv-3.ll2
-rw-r--r--test/CodeGen/X86/widen_conv-4.ll2
-rw-r--r--test/CodeGen/X86/widen_extract-1.ll4
-rw-r--r--test/CodeGen/X86/widen_load-1.ll4
-rw-r--r--test/CodeGen/X86/widen_load-2.ll14
-rw-r--r--test/CodeGen/X86/widen_shuffle-1.ll12
-rw-r--r--test/CodeGen/X86/win32_sret.ll4
-rw-r--r--test/CodeGen/X86/win64_alloca_dynalloca.ll66
-rw-r--r--test/CodeGen/X86/win64_params.ll25
-rw-r--r--test/CodeGen/X86/win64_vararg.ll66
-rw-r--r--test/CodeGen/X86/win_chkstk.ll13
-rw-r--r--test/CodeGen/X86/x86-64-and-mask.ll12
-rw-r--r--test/CodeGen/X86/x86-64-pic-10.ll4
-rw-r--r--test/CodeGen/X86/x86-64-psub.ll220
-rw-r--r--test/CodeGen/X86/x86-64-sret-return.ll8
-rw-r--r--test/CodeGen/X86/x86-64-tls-1.ll6
-rw-r--r--test/CodeGen/X86/x86-shifts.ll12
-rw-r--r--test/CodeGen/X86/xmulo.ll6
-rw-r--r--test/CodeGen/X86/xor-icmp.ll8
-rw-r--r--test/CodeGen/X86/xor.ll67
-rw-r--r--test/CodeGen/X86/zero-remat.ll8
-rw-r--r--test/CodeGen/X86/zext-extract_subreg.ll4
-rw-r--r--test/CodeGen/X86/zext-fold.ll6
-rw-r--r--test/CodeGen/X86/zext-sext.ll17
-rw-r--r--test/CodeGen/X86/zext-shl.ll4
-rw-r--r--test/CodeGen/X86/zext-trunc.ll2
-rw-r--r--test/CodeGen/XCore/2011-08-01-DynamicAllocBug.ll2
-rw-r--r--test/CodeGen/XCore/2011-08-01-VarargsBug.ll17
-rw-r--r--test/CodeGen/XCore/addsub64.ll6
-rw-r--r--test/CodeGen/XCore/aliases.ll14
-rw-r--r--test/CodeGen/XCore/alignment.ll9
-rw-r--r--test/CodeGen/XCore/ashr.ll32
-rw-r--r--test/CodeGen/XCore/atomic.ll16
-rw-r--r--test/CodeGen/XCore/bigstructret.ll4
-rw-r--r--test/CodeGen/XCore/byVal.ll73
-rw-r--r--test/CodeGen/XCore/constants.ll10
-rw-r--r--test/CodeGen/XCore/epilogue_prologue.ll26
-rw-r--r--test/CodeGen/XCore/events.ll4
-rw-r--r--test/CodeGen/XCore/exception.ll129
-rw-r--r--test/CodeGen/XCore/float-intrinsics.ll40
-rw-r--r--test/CodeGen/XCore/fneg.ll2
-rw-r--r--test/CodeGen/XCore/getid.ll2
-rw-r--r--test/CodeGen/XCore/globals.ll20
-rw-r--r--test/CodeGen/XCore/indirectbr.ll2
-rw-r--r--test/CodeGen/XCore/inline-asm.ll32
-rw-r--r--test/CodeGen/XCore/ladd_lsub_combine.ll10
-rw-r--r--test/CodeGen/XCore/licm-ldwcp.ll2
-rw-r--r--test/CodeGen/XCore/linkage.ll38
-rw-r--r--test/CodeGen/XCore/lit.local.cfg2
-rw-r--r--test/CodeGen/XCore/load.ll17
-rw-r--r--test/CodeGen/XCore/misc-intrinsics.ll18
-rw-r--r--test/CodeGen/XCore/mkmsk.ll2
-rw-r--r--test/CodeGen/XCore/mul64.ll8
-rw-r--r--test/CodeGen/XCore/offset_folding.ll8
-rw-r--r--test/CodeGen/XCore/private.ll2
-rw-r--r--test/CodeGen/XCore/ps-intrinsics.ll4
-rw-r--r--test/CodeGen/XCore/resources.ll54
-rw-r--r--test/CodeGen/XCore/sext.ll8
-rw-r--r--test/CodeGen/XCore/shedulingPreference.ll25
-rw-r--r--test/CodeGen/XCore/sr-intrinsics.ll4
-rw-r--r--test/CodeGen/XCore/store.ll8
-rw-r--r--test/CodeGen/XCore/threads.ll142
-rw-r--r--test/CodeGen/XCore/tls.ll2
-rw-r--r--test/CodeGen/XCore/trampoline.ll2
-rw-r--r--test/CodeGen/XCore/trap.ll2
-rw-r--r--test/CodeGen/XCore/unaligned_load.ll4
-rw-r--r--test/CodeGen/XCore/unaligned_store.ll2
-rw-r--r--test/CodeGen/XCore/unaligned_store_combine.ll2
-rw-r--r--test/CodeGen/XCore/varargs.ll55
-rw-r--r--test/CodeGen/XCore/zext.ll10
-rw-r--r--test/CodeGen/XCore/zextfree.ll15
2293 files changed, 151567 insertions, 19022 deletions
diff --git a/test/CodeGen/AArch64/adc.ll b/test/CodeGen/AArch64/adc.ll
index 7cb373232a2c..26fd3e66b798 100644
--- a/test/CodeGen/AArch64/adc.ll
+++ b/test/CodeGen/AArch64/adc.ll
@@ -1,7 +1,7 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
-; CHECK: test_simple:
+; CHECK-LABEL: test_simple:
%valadd = add i128 %a, %b
; CHECK: adds [[ADDLO:x[0-9]+]], x0, x2
@@ -16,7 +16,7 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
}
define i128 @test_imm(i128 %a) {
-; CHECK: test_imm:
+; CHECK-LABEL: test_imm:
%val = add i128 %a, 12
; CHECK: adds x0, x0, #12
@@ -27,7 +27,7 @@ define i128 @test_imm(i128 %a) {
}
define i128 @test_shifted(i128 %a, i128 %b) {
-; CHECK: test_shifted:
+; CHECK-LABEL: test_shifted:
%rhs = shl i128 %b, 45
@@ -40,7 +40,7 @@ define i128 @test_shifted(i128 %a, i128 %b) {
}
define i128 @test_extended(i128 %a, i16 %b) {
-; CHECK: test_extended:
+; CHECK-LABEL: test_extended:
%ext = sext i16 %b to i128
%rhs = shl i128 %ext, 3
diff --git a/test/CodeGen/AArch64/addsub-shifted.ll b/test/CodeGen/AArch64/addsub-shifted.ll
index f2c74f6952b0..269c1e8143b2 100644
--- a/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/test/CodeGen/AArch64/addsub-shifted.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK: test_lsl_arith:
+; CHECK-LABEL: test_lsl_arith:
%rhs1 = load volatile i32* @var32
%shift1 = shl i32 %rhs1, 18
@@ -73,7 +73,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
}
define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK: test_lsr_arith:
+; CHECK-LABEL: test_lsr_arith:
%shift1 = lshr i32 %rhs32, 18
%val1 = add i32 %lhs32, %shift1
@@ -132,7 +132,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
}
define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK: test_asr_arith:
+; CHECK-LABEL: test_asr_arith:
%shift1 = ashr i32 %rhs32, 18
%val1 = add i32 %lhs32, %shift1
@@ -191,7 +191,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
}
define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK: test_cmp:
+; CHECK-LABEL: test_cmp:
%shift1 = shl i32 %rhs32, 13
%tst1 = icmp uge i32 %lhs32, %shift1
@@ -237,7 +237,7 @@ end:
}
define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
-; CHECK: test_cmn:
+; CHECK-LABEL: test_cmn:
%shift1 = shl i32 %rhs32, 13
%val1 = sub i32 0, %shift1
diff --git a/test/CodeGen/AArch64/addsub.ll b/test/CodeGen/AArch64/addsub.ll
index 5148807163c9..4d46d04b80f1 100644
--- a/test/CodeGen/AArch64/addsub.ll
+++ b/test/CodeGen/AArch64/addsub.ll
@@ -9,7 +9,7 @@
; Add pure 12-bit immediates:
define void @add_small() {
-; CHECK: add_small:
+; CHECK-LABEL: add_small:
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4095
%val32 = load i32* @var_i32
@@ -26,7 +26,7 @@ define void @add_small() {
; Add 12-bit immediates, shifted left by 12 bits
define void @add_med() {
-; CHECK: add_med:
+; CHECK-LABEL: add_med:
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
%val32 = load i32* @var_i32
@@ -43,7 +43,7 @@ define void @add_med() {
; Subtract 12-bit immediates
define void @sub_small() {
-; CHECK: sub_small:
+; CHECK-LABEL: sub_small:
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4095
%val32 = load i32* @var_i32
@@ -60,7 +60,7 @@ define void @sub_small() {
; Subtract 12-bit immediates, shifted left by 12 bits
define void @sub_med() {
-; CHECK: sub_med:
+; CHECK-LABEL: sub_med:
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #3567, lsl #12
%val32 = load i32* @var_i32
@@ -76,7 +76,7 @@ define void @sub_med() {
}
define void @testing() {
-; CHECK: testing:
+; CHECK-LABEL: testing:
%val = load i32* @var_i32
; CHECK: cmp {{w[0-9]+}}, #4095
diff --git a/test/CodeGen/AArch64/addsub_ext.ll b/test/CodeGen/AArch64/addsub_ext.ll
index 2dd16626ea9f..f0e11c652240 100644
--- a/test/CodeGen/AArch64/addsub_ext.ll
+++ b/test/CodeGen/AArch64/addsub_ext.ll
@@ -6,7 +6,7 @@
@var64 = global i64 0
define void @addsub_i8rhs() {
-; CHECK: addsub_i8rhs:
+; CHECK-LABEL: addsub_i8rhs:
%val8_tmp = load i8* @var8
%lhs32 = load i32* @var32
%lhs64 = load i64* @var64
@@ -81,7 +81,7 @@ end:
}
define void @addsub_i16rhs() {
-; CHECK: addsub_i16rhs:
+; CHECK-LABEL: addsub_i16rhs:
%val16_tmp = load i16* @var16
%lhs32 = load i32* @var32
%lhs64 = load i64* @var64
@@ -159,7 +159,7 @@ end:
; example), but the remaining instructions are probably not idiomatic
; in the face of "add/sub (shifted register)" so I don't intend to.
define void @addsub_i32rhs() {
-; CHECK: addsub_i32rhs:
+; CHECK-LABEL: addsub_i32rhs:
%val32_tmp = load i32* @var32
%lhs64 = load i64* @var64
@@ -186,4 +186,4 @@ define void @addsub_i32rhs() {
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw #2
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/adrp-relocation.ll b/test/CodeGen/AArch64/adrp-relocation.ll
deleted file mode 100644
index cf411166a3a0..000000000000
--- a/test/CodeGen/AArch64/adrp-relocation.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -filetype=obj < %s | llvm-readobj -s -r | FileCheck %s
-
-define i64 @testfn() nounwind {
-entry:
- ret i64 0
-}
-
-define i64 @foo() nounwind {
-entry:
- %bar = alloca i64 ()*, align 8
- store i64 ()* @testfn, i64 ()** %bar, align 8
- %call = call i64 @testfn()
- ret i64 %call
-}
-
-; The above should produce an ADRP/ADD pair to calculate the address of
-; testfn. The important point is that LLVM shouldn't think it can deal with the
-; relocation on the ADRP itself (even though it knows everything about the
-; relative offsets of testfn and foo) because its value depends on where this
-; object file's .text section gets relocated in memory.
-
-; CHECK: Relocations [
-; CHECK-NEXT: Section (1) .text {
-; CHECK-NEXT: 0x10 R_AARCH64_ADR_PREL_PG_HI21 testfn 0x0
-; CHECK-NEXT: 0x14 R_AARCH64_ADD_ABS_LO12_NC testfn 0x0
-; CHECK-NEXT: }
-; CHECK-NEXT: ]
diff --git a/test/CodeGen/AArch64/alloca.ll b/test/CodeGen/AArch64/alloca.ll
index c62edf6503c6..1d3c0a02ac87 100644
--- a/test/CodeGen/AArch64/alloca.ll
+++ b/test/CodeGen/AArch64/alloca.ll
@@ -1,19 +1,20 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-NOFP %s
declare void @use_addr(i8*)
define void @test_simple_alloca(i64 %n) {
-; CHECK: test_simple_alloca:
+; CHECK-LABEL: test_simple_alloca:
%buf = alloca i8, i64 %n
; Make sure we align the stack change to 16 bytes:
-; CHECK: add [[SPDELTA:x[0-9]+]], x0, #15
-; CHECK: and x0, [[SPDELTA]], #0xfffffffffffffff0
+; CHECK-DAG: add [[SPDELTA:x[0-9]+]], x0, #15
+; CHECK-DAG: and x0, [[SPDELTA]], #0xfffffffffffffff0
; Make sure we change SP. It would be surprising if anything but x0 were used
; for the final sp, but it could be if it was then moved into x0.
-; CHECK: mov [[TMP:x[0-9]+]], sp
-; CHECK: sub x0, [[TMP]], [[SPDELTA]]
+; CHECK-DAG: mov [[TMP:x[0-9]+]], sp
+; CHECK-DAG: sub x0, [[TMP]], [[SPDELTA]]
; CHECK: mov sp, x0
call void @use_addr(i8* %buf)
@@ -30,20 +31,20 @@ define void @test_simple_alloca(i64 %n) {
declare void @use_addr_loc(i8*, i64*)
define i64 @test_alloca_with_local(i64 %n) {
-; CHECK: test_alloca_with_local:
+; CHECK-LABEL: test_alloca_with_local:
; CHECK: sub sp, sp, #32
; CHECK: stp x29, x30, [sp, #16]
%loc = alloca i64
%buf = alloca i8, i64 %n
; Make sure we align the stack change to 16 bytes:
-; CHECK: add [[SPDELTA:x[0-9]+]], x0, #15
-; CHECK: and x0, [[SPDELTA]], #0xfffffffffffffff0
+; CHECK-DAG: add [[SPDELTA:x[0-9]+]], x0, #15
+; CHECK-DAG: and x0, [[SPDELTA]], #0xfffffffffffffff0
; Make sure we change SP. It would be surprising if anything but x0 were used
; for the final sp, but it could be if it was then moved into x0.
-; CHECK: mov [[TMP:x[0-9]+]], sp
-; CHECK: sub x0, [[TMP]], [[SPDELTA]]
+; CHECK-DAG: mov [[TMP:x[0-9]+]], sp
+; CHECK-DAG: sub x0, [[TMP]], [[SPDELTA]]
; CHECK: mov sp, x0
; Obviously suboptimal code here, but it to get &local in x1
@@ -73,9 +74,15 @@ define void @test_variadic_alloca(i64 %n, ...) {
; CHECK: add x29, sp, #192
; CHECK: sub [[TMP:x[0-9]+]], x29, #192
; CHECK: add x8, [[TMP]], #0
-; CHECK: str q7, [x8, #112]
+; CHECK-FP: str q7, [x8, #112]
; [...]
-; CHECK: str q1, [x8, #16]
+; CHECK-FP: str q1, [x8, #16]
+
+; CHECK-NOFP: sub sp, sp, #80
+; CHECK-NOFP: stp x29, x30, [sp, #64]
+; CHECK-NOFP: add x29, sp, #64
+; CHECK-NOFP: sub [[TMP:x[0-9]+]], x29, #64
+; CHECK-NOFP: add x8, [[TMP]], #0
%addr = alloca i8, i64 %n
@@ -86,10 +93,14 @@ define void @test_variadic_alloca(i64 %n, ...) {
; CHECK: sub sp, x29, #192
; CHECK: ldp x29, x30, [sp, #192]
; CHECK: add sp, sp, #208
+
+; CHECK-NOFP: sub sp, x29, #64
+; CHECK-NOFP: ldp x29, x30, [sp, #64]
+; CHECK-NOFP: add sp, sp, #80
}
define void @test_alloca_large_frame(i64 %n) {
-; CHECK: test_alloca_large_frame:
+; CHECK-LABEL: test_alloca_large_frame:
; CHECK: sub sp, sp, #496
; CHECK: stp x29, x30, [sp, #480]
@@ -112,16 +123,16 @@ declare i8* @llvm.stacksave()
declare void @llvm.stackrestore(i8*)
define void @test_scoped_alloca(i64 %n) {
-; CHECK: test_scoped_alloca
+; CHECK-LABEL: test_scoped_alloca:
; CHECK: sub sp, sp, #32
%sp = call i8* @llvm.stacksave()
; CHECK: mov [[SAVED_SP:x[0-9]+]], sp
+; CHECK: mov [[OLDSP:x[0-9]+]], sp
%addr = alloca i8, i64 %n
; CHECK: and [[SPDELTA:x[0-9]+]], {{x[0-9]+}}, #0xfffffffffffffff0
-; CHECK: mov [[OLDSP:x[0-9]+]], sp
-; CHECK: sub [[NEWSP:x[0-9]+]], [[OLDSP]], [[SPDELTA]]
+; CHECK-DAG: sub [[NEWSP:x[0-9]+]], [[OLDSP]], [[SPDELTA]]
; CHECK: mov sp, [[NEWSP]]
call void @use_addr(i8* %addr)
diff --git a/test/CodeGen/AArch64/analyze-branch.ll b/test/CodeGen/AArch64/analyze-branch.ll
index e10bbb0f8691..36bc2e00d238 100644
--- a/test/CodeGen/AArch64/analyze-branch.ll
+++ b/test/CodeGen/AArch64/analyze-branch.ll
@@ -11,7 +11,7 @@ declare void @test_false()
!1 = metadata !{metadata !"branch_weights", i32 4, i32 64}
define void @test_Bcc_fallthrough_taken(i32 %in) nounwind {
-; CHECK: test_Bcc_fallthrough_taken:
+; CHECK-LABEL: test_Bcc_fallthrough_taken:
%tst = icmp eq i32 %in, 42
br i1 %tst, label %true, label %false, !prof !0
@@ -34,7 +34,7 @@ false:
}
define void @test_Bcc_fallthrough_nottaken(i32 %in) nounwind {
-; CHECK: test_Bcc_fallthrough_nottaken:
+; CHECK-LABEL: test_Bcc_fallthrough_nottaken:
%tst = icmp eq i32 %in, 42
br i1 %tst, label %true, label %false, !prof !1
@@ -57,7 +57,7 @@ false:
}
define void @test_CBZ_fallthrough_taken(i32 %in) nounwind {
-; CHECK: test_CBZ_fallthrough_taken:
+; CHECK-LABEL: test_CBZ_fallthrough_taken:
%tst = icmp eq i32 %in, 0
br i1 %tst, label %true, label %false, !prof !0
@@ -78,7 +78,7 @@ false:
}
define void @test_CBZ_fallthrough_nottaken(i64 %in) nounwind {
-; CHECK: test_CBZ_fallthrough_nottaken:
+; CHECK-LABEL: test_CBZ_fallthrough_nottaken:
%tst = icmp eq i64 %in, 0
br i1 %tst, label %true, label %false, !prof !1
@@ -99,7 +99,7 @@ false:
}
define void @test_CBNZ_fallthrough_taken(i32 %in) nounwind {
-; CHECK: test_CBNZ_fallthrough_taken:
+; CHECK-LABEL: test_CBNZ_fallthrough_taken:
%tst = icmp ne i32 %in, 0
br i1 %tst, label %true, label %false, !prof !0
@@ -120,7 +120,7 @@ false:
}
define void @test_CBNZ_fallthrough_nottaken(i64 %in) nounwind {
-; CHECK: test_CBNZ_fallthrough_nottaken:
+; CHECK-LABEL: test_CBNZ_fallthrough_nottaken:
%tst = icmp ne i64 %in, 0
br i1 %tst, label %true, label %false, !prof !1
@@ -141,7 +141,7 @@ false:
}
define void @test_TBZ_fallthrough_taken(i32 %in) nounwind {
-; CHECK: test_TBZ_fallthrough_taken:
+; CHECK-LABEL: test_TBZ_fallthrough_taken:
%bit = and i32 %in, 32768
%tst = icmp eq i32 %bit, 0
br i1 %tst, label %true, label %false, !prof !0
@@ -163,7 +163,7 @@ false:
}
define void @test_TBZ_fallthrough_nottaken(i64 %in) nounwind {
-; CHECK: test_TBZ_fallthrough_nottaken:
+; CHECK-LABEL: test_TBZ_fallthrough_nottaken:
%bit = and i64 %in, 32768
%tst = icmp eq i64 %bit, 0
br i1 %tst, label %true, label %false, !prof !1
@@ -186,7 +186,7 @@ false:
define void @test_TBNZ_fallthrough_taken(i32 %in) nounwind {
-; CHECK: test_TBNZ_fallthrough_taken:
+; CHECK-LABEL: test_TBNZ_fallthrough_taken:
%bit = and i32 %in, 32768
%tst = icmp ne i32 %bit, 0
br i1 %tst, label %true, label %false, !prof !0
@@ -208,7 +208,7 @@ false:
}
define void @test_TBNZ_fallthrough_nottaken(i64 %in) nounwind {
-; CHECK: test_TBNZ_fallthrough_nottaken:
+; CHECK-LABEL: test_TBNZ_fallthrough_nottaken:
%bit = and i64 %in, 32768
%tst = icmp ne i64 %bit, 0
br i1 %tst, label %true, label %false, !prof !1
diff --git a/test/CodeGen/AArch64/atomic-ops-not-barriers.ll b/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
index 9888a742e32b..da095a0a42c5 100644
--- a/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
+++ b/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
define i32 @foo(i32* %var, i1 %cond) {
-; CHECK: foo:
+; CHECK-LABEL: foo:
br i1 %cond, label %atomic_ver, label %simple_ver
simple_ver:
%oldval = load i32* %var
diff --git a/test/CodeGen/AArch64/atomic-ops.ll b/test/CodeGen/AArch64/atomic-ops.ll
index 5e87f21a217d..de84ff46ec3b 100644
--- a/test/CodeGen/AArch64/atomic-ops.ll
+++ b/test/CodeGen/AArch64/atomic-ops.ll
@@ -6,7 +6,7 @@
@var64 = global i64 0
define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_add_i8:
+; CHECK-LABEL: test_atomic_load_add_i8:
%old = atomicrmw add i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -26,7 +26,7 @@ define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_add_i16:
+; CHECK-LABEL: test_atomic_load_add_i16:
%old = atomicrmw add i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -46,7 +46,7 @@ define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_add_i32:
+; CHECK-LABEL: test_atomic_load_add_i32:
%old = atomicrmw add i32* @var32, i32 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -66,7 +66,7 @@ define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_add_i64:
+; CHECK-LABEL: test_atomic_load_add_i64:
%old = atomicrmw add i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -86,7 +86,7 @@ define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_sub_i8:
+; CHECK-LABEL: test_atomic_load_sub_i8:
%old = atomicrmw sub i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -106,7 +106,7 @@ define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_sub_i16:
+; CHECK-LABEL: test_atomic_load_sub_i16:
%old = atomicrmw sub i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -126,7 +126,7 @@ define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_sub_i32:
+; CHECK-LABEL: test_atomic_load_sub_i32:
%old = atomicrmw sub i32* @var32, i32 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -146,7 +146,7 @@ define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_sub_i64:
+; CHECK-LABEL: test_atomic_load_sub_i64:
%old = atomicrmw sub i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -166,7 +166,7 @@ define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_and_i8:
+; CHECK-LABEL: test_atomic_load_and_i8:
%old = atomicrmw and i8* @var8, i8 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -186,7 +186,7 @@ define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_and_i16:
+; CHECK-LABEL: test_atomic_load_and_i16:
%old = atomicrmw and i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -206,7 +206,7 @@ define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_and_i32:
+; CHECK-LABEL: test_atomic_load_and_i32:
%old = atomicrmw and i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -226,7 +226,7 @@ define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_and_i64:
+; CHECK-LABEL: test_atomic_load_and_i64:
%old = atomicrmw and i64* @var64, i64 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -246,7 +246,7 @@ define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_or_i8:
+; CHECK-LABEL: test_atomic_load_or_i8:
%old = atomicrmw or i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -266,7 +266,7 @@ define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_or_i16:
+; CHECK-LABEL: test_atomic_load_or_i16:
%old = atomicrmw or i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -286,7 +286,7 @@ define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_or_i32:
+; CHECK-LABEL: test_atomic_load_or_i32:
%old = atomicrmw or i32* @var32, i32 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -306,7 +306,7 @@ define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_or_i64:
+; CHECK-LABEL: test_atomic_load_or_i64:
%old = atomicrmw or i64* @var64, i64 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -326,7 +326,7 @@ define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_xor_i8:
+; CHECK-LABEL: test_atomic_load_xor_i8:
%old = atomicrmw xor i8* @var8, i8 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -346,7 +346,7 @@ define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_xor_i16:
+; CHECK-LABEL: test_atomic_load_xor_i16:
%old = atomicrmw xor i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -366,7 +366,7 @@ define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_xor_i32:
+; CHECK-LABEL: test_atomic_load_xor_i32:
%old = atomicrmw xor i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -386,7 +386,7 @@ define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_xor_i64:
+; CHECK-LABEL: test_atomic_load_xor_i64:
%old = atomicrmw xor i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -406,7 +406,7 @@ define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_xchg_i8:
+; CHECK-LABEL: test_atomic_load_xchg_i8:
%old = atomicrmw xchg i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -425,7 +425,7 @@ define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_xchg_i16:
+; CHECK-LABEL: test_atomic_load_xchg_i16:
%old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -444,7 +444,7 @@ define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_xchg_i32:
+; CHECK-LABEL: test_atomic_load_xchg_i32:
%old = atomicrmw xchg i32* @var32, i32 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -463,7 +463,7 @@ define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_xchg_i64:
+; CHECK-LABEL: test_atomic_load_xchg_i64:
%old = atomicrmw xchg i64* @var64, i64 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -483,7 +483,7 @@ define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_min_i8:
+; CHECK-LABEL: test_atomic_load_min_i8:
%old = atomicrmw min i8* @var8, i8 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -504,7 +504,7 @@ define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_min_i16:
+; CHECK-LABEL: test_atomic_load_min_i16:
%old = atomicrmw min i16* @var16, i16 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -525,7 +525,7 @@ define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_min_i32:
+; CHECK-LABEL: test_atomic_load_min_i32:
%old = atomicrmw min i32* @var32, i32 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -546,7 +546,7 @@ define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_min_i64:
+; CHECK-LABEL: test_atomic_load_min_i64:
%old = atomicrmw min i64* @var64, i64 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -567,7 +567,7 @@ define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_max_i8:
+; CHECK-LABEL: test_atomic_load_max_i8:
%old = atomicrmw max i8* @var8, i8 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -588,7 +588,7 @@ define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_max_i16:
+; CHECK-LABEL: test_atomic_load_max_i16:
%old = atomicrmw max i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -609,7 +609,7 @@ define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_max_i32:
+; CHECK-LABEL: test_atomic_load_max_i32:
%old = atomicrmw max i32* @var32, i32 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -630,7 +630,7 @@ define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_max_i64:
+; CHECK-LABEL: test_atomic_load_max_i64:
%old = atomicrmw max i64* @var64, i64 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -651,7 +651,7 @@ define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_umin_i8:
+; CHECK-LABEL: test_atomic_load_umin_i8:
%old = atomicrmw umin i8* @var8, i8 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -672,7 +672,7 @@ define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_umin_i16:
+; CHECK-LABEL: test_atomic_load_umin_i16:
%old = atomicrmw umin i16* @var16, i16 %offset acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -693,7 +693,7 @@ define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_umin_i32:
+; CHECK-LABEL: test_atomic_load_umin_i32:
%old = atomicrmw umin i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -714,7 +714,7 @@ define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_umin_i64:
+; CHECK-LABEL: test_atomic_load_umin_i64:
%old = atomicrmw umin i64* @var64, i64 %offset acq_rel
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -735,7 +735,7 @@ define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
-; CHECK: test_atomic_load_umax_i8:
+; CHECK-LABEL: test_atomic_load_umax_i8:
%old = atomicrmw umax i8* @var8, i8 %offset acq_rel
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -756,7 +756,7 @@ define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
}
define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
-; CHECK: test_atomic_load_umax_i16:
+; CHECK-LABEL: test_atomic_load_umax_i16:
%old = atomicrmw umax i16* @var16, i16 %offset monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -777,7 +777,7 @@ define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
}
define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
-; CHECK: test_atomic_load_umax_i32:
+; CHECK-LABEL: test_atomic_load_umax_i32:
%old = atomicrmw umax i32* @var32, i32 %offset seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -798,7 +798,7 @@ define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
}
define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
-; CHECK: test_atomic_load_umax_i64:
+; CHECK-LABEL: test_atomic_load_umax_i64:
%old = atomicrmw umax i64* @var64, i64 %offset release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -819,7 +819,7 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
}
define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
-; CHECK: test_atomic_cmpxchg_i8:
+; CHECK-LABEL: test_atomic_cmpxchg_i8:
%old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -841,7 +841,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
}
define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
-; CHECK: test_atomic_cmpxchg_i16:
+; CHECK-LABEL: test_atomic_cmpxchg_i16:
%old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
@@ -863,7 +863,7 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
}
define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
-; CHECK: test_atomic_cmpxchg_i32:
+; CHECK-LABEL: test_atomic_cmpxchg_i32:
%old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
@@ -885,7 +885,7 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
}
define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
-; CHECK: test_atomic_cmpxchg_i64:
+; CHECK-LABEL: test_atomic_cmpxchg_i64:
%old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
@@ -907,7 +907,7 @@ define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
}
define i8 @test_atomic_load_monotonic_i8() nounwind {
-; CHECK: test_atomic_load_monotonic_i8:
+; CHECK-LABEL: test_atomic_load_monotonic_i8:
%val = load atomic i8* @var8 monotonic, align 1
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var8
@@ -918,7 +918,7 @@ define i8 @test_atomic_load_monotonic_i8() nounwind {
}
define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
-; CHECK: test_atomic_load_monotonic_regoff_i8:
+; CHECK-LABEL: test_atomic_load_monotonic_regoff_i8:
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i8*
@@ -931,7 +931,7 @@ define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
}
define i8 @test_atomic_load_acquire_i8() nounwind {
-; CHECK: test_atomic_load_acquire_i8:
+; CHECK-LABEL: test_atomic_load_acquire_i8:
%val = load atomic i8* @var8 acquire, align 1
; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
@@ -944,7 +944,7 @@ define i8 @test_atomic_load_acquire_i8() nounwind {
}
define i8 @test_atomic_load_seq_cst_i8() nounwind {
-; CHECK: test_atomic_load_seq_cst_i8:
+; CHECK-LABEL: test_atomic_load_seq_cst_i8:
%val = load atomic i8* @var8 seq_cst, align 1
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
@@ -957,7 +957,7 @@ define i8 @test_atomic_load_seq_cst_i8() nounwind {
}
define i16 @test_atomic_load_monotonic_i16() nounwind {
-; CHECK: test_atomic_load_monotonic_i16:
+; CHECK-LABEL: test_atomic_load_monotonic_i16:
%val = load atomic i16* @var16 monotonic, align 2
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var16
@@ -969,7 +969,7 @@ define i16 @test_atomic_load_monotonic_i16() nounwind {
}
define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
-; CHECK: test_atomic_load_monotonic_regoff_i32:
+; CHECK-LABEL: test_atomic_load_monotonic_regoff_i32:
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i32*
@@ -982,7 +982,7 @@ define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind
}
define i64 @test_atomic_load_seq_cst_i64() nounwind {
-; CHECK: test_atomic_load_seq_cst_i64:
+; CHECK-LABEL: test_atomic_load_seq_cst_i64:
%val = load atomic i64* @var64 seq_cst, align 8
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var64
@@ -995,7 +995,7 @@ define i64 @test_atomic_load_seq_cst_i64() nounwind {
}
define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
-; CHECK: test_atomic_store_monotonic_i8:
+; CHECK-LABEL: test_atomic_store_monotonic_i8:
store atomic i8 %val, i8* @var8 monotonic, align 1
; CHECK: adrp x[[HIADDR:[0-9]+]], var8
; CHECK: strb w0, [x[[HIADDR]], #:lo12:var8]
@@ -1004,7 +1004,7 @@ define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
}
define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
-; CHECK: test_atomic_store_monotonic_regoff_i8:
+; CHECK-LABEL: test_atomic_store_monotonic_regoff_i8:
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i8*
@@ -1015,7 +1015,7 @@ define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val)
ret void
}
define void @test_atomic_store_release_i8(i8 %val) nounwind {
-; CHECK: test_atomic_store_release_i8:
+; CHECK-LABEL: test_atomic_store_release_i8:
store atomic i8 %val, i8* @var8 release, align 1
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
@@ -1028,7 +1028,7 @@ define void @test_atomic_store_release_i8(i8 %val) nounwind {
}
define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
-; CHECK: test_atomic_store_seq_cst_i8:
+; CHECK-LABEL: test_atomic_store_seq_cst_i8:
store atomic i8 %val, i8* @var8 seq_cst, align 1
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
@@ -1042,7 +1042,7 @@ define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
}
define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
-; CHECK: test_atomic_store_monotonic_i16:
+; CHECK-LABEL: test_atomic_store_monotonic_i16:
store atomic i16 %val, i16* @var16 monotonic, align 2
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var16
@@ -1053,7 +1053,7 @@ define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
}
define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
-; CHECK: test_atomic_store_monotonic_regoff_i32:
+; CHECK-LABEL: test_atomic_store_monotonic_regoff_i32:
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i32*
@@ -1067,7 +1067,7 @@ define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %va
}
define void @test_atomic_store_release_i64(i64 %val) nounwind {
-; CHECK: test_atomic_store_release_i64:
+; CHECK-LABEL: test_atomic_store_release_i64:
store atomic i64 %val, i64* @var64 release, align 8
; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var64
diff --git a/test/CodeGen/AArch64/basic-pic.ll b/test/CodeGen/AArch64/basic-pic.ll
index da94041c95ff..682b7ba69d95 100644
--- a/test/CodeGen/AArch64/basic-pic.ll
+++ b/test/CodeGen/AArch64/basic-pic.ll
@@ -1,70 +1,54 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic %s -o - | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -relocation-model=pic -filetype=obj %s -o -| llvm-objdump -r - | FileCheck --check-prefix=CHECK-ELF %s
@var = global i32 0
-; CHECK-ELF: RELOCATION RECORDS FOR [.text]
-
define i32 @get_globalvar() {
-; CHECK: get_globalvar:
+; CHECK-LABEL: get_globalvar:
%val = load i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], #:got_lo12:var]
; CHECK: ldr w0, [x[[GOTLOC]]]
-; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE var
-; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC var
ret i32 %val
}
define i32* @get_globalvaraddr() {
-; CHECK: get_globalvaraddr:
+; CHECK-LABEL: get_globalvaraddr:
%val = load i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
; CHECK: ldr x0, [x[[GOTHI]], #:got_lo12:var]
-; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE var
-; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC var
ret i32* @var
}
@hiddenvar = hidden global i32 0
define i32 @get_hiddenvar() {
-; CHECK: get_hiddenvar:
+; CHECK-LABEL: get_hiddenvar:
%val = load i32* @hiddenvar
; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
; CHECK: ldr w0, [x[[HI]], #:lo12:hiddenvar]
-; CHECK-ELF: R_AARCH64_ADR_PREL_PG_HI21 hiddenvar
-; CHECK-ELF: R_AARCH64_LDST32_ABS_LO12_NC hiddenvar
ret i32 %val
}
define i32* @get_hiddenvaraddr() {
-; CHECK: get_hiddenvaraddr:
+; CHECK-LABEL: get_hiddenvaraddr:
%val = load i32* @hiddenvar
; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
; CHECK: add x0, [[HI]], #:lo12:hiddenvar
-; CHECK-ELF: R_AARCH64_ADR_PREL_PG_HI21 hiddenvar
-; CHECK-ELF: R_AARCH64_ADD_ABS_LO12_NC hiddenvar
ret i32* @hiddenvar
}
define void()* @get_func() {
-; CHECK: get_func:
+; CHECK-LABEL: get_func:
ret void()* bitcast(void()*()* @get_func to void()*)
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:get_func
; CHECK: ldr x0, [x[[GOTHI]], #:got_lo12:get_func]
-
- ; Particularly important that the ADRP gets a relocation, LLVM tends to think
- ; it can relax it because it knows where get_func is. It can't!
-; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE get_func
-; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC get_func
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/bitfield-insert-0.ll b/test/CodeGen/AArch64/bitfield-insert-0.ll
index d1191f6aaa8a..37a18b7fb613 100644
--- a/test/CodeGen/AArch64/bitfield-insert-0.ll
+++ b/test/CodeGen/AArch64/bitfield-insert-0.ll
@@ -16,4 +16,4 @@ define void @test_bfi0(i32* %existing, i32* %new) {
store volatile i32 %combined, i32* %existing
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/bitfield-insert.ll b/test/CodeGen/AArch64/bitfield-insert.ll
index 3e871b9a6d27..1f046087abc0 100644
--- a/test/CodeGen/AArch64/bitfield-insert.ll
+++ b/test/CodeGen/AArch64/bitfield-insert.ll
@@ -6,7 +6,7 @@
%struct.foo = type { i8, [2 x i8], i8 }
define [1 x i64] @from_clang([1 x i64] %f.coerce, i32 %n) nounwind readnone {
-; CHECK: from_clang:
+; CHECK-LABEL: from_clang:
; CHECK: bfi w0, w1, #3, #4
; CHECK-NEXT: ret
@@ -25,7 +25,7 @@ entry:
}
define void @test_whole32(i32* %existing, i32* %new) {
-; CHECK: test_whole32:
+; CHECK-LABEL: test_whole32:
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #26, #5
%oldval = load volatile i32* %existing
@@ -42,7 +42,7 @@ define void @test_whole32(i32* %existing, i32* %new) {
}
define void @test_whole64(i64* %existing, i64* %new) {
-; CHECK: test_whole64:
+; CHECK-LABEL: test_whole64:
; CHECK: bfi {{x[0-9]+}}, {{x[0-9]+}}, #26, #14
; CHECK-NOT: and
; CHECK: ret
@@ -61,7 +61,7 @@ define void @test_whole64(i64* %existing, i64* %new) {
}
define void @test_whole32_from64(i64* %existing, i64* %new) {
-; CHECK: test_whole32_from64:
+; CHECK-LABEL: test_whole32_from64:
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #{{0|16}}, #16
; CHECK-NOT: and
; CHECK: ret
@@ -79,7 +79,7 @@ define void @test_whole32_from64(i64* %existing, i64* %new) {
}
define void @test_32bit_masked(i32 *%existing, i32 *%new) {
-; CHECK: test_32bit_masked:
+; CHECK-LABEL: test_32bit_masked:
; CHECK: bfi [[INSERT:w[0-9]+]], {{w[0-9]+}}, #3, #4
; CHECK: and {{w[0-9]+}}, [[INSERT]], #0xff
@@ -97,7 +97,7 @@ define void @test_32bit_masked(i32 *%existing, i32 *%new) {
}
define void @test_64bit_masked(i64 *%existing, i64 *%new) {
-; CHECK: test_64bit_masked:
+; CHECK-LABEL: test_64bit_masked:
; CHECK: bfi [[INSERT:x[0-9]+]], {{x[0-9]+}}, #40, #8
; CHECK: and {{x[0-9]+}}, [[INSERT]], #0xffff00000000
@@ -116,7 +116,7 @@ define void @test_64bit_masked(i64 *%existing, i64 *%new) {
; Mask is too complicated for literal ANDwwi, make sure other avenues are tried.
define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
-; CHECK: test_32bit_complexmask:
+; CHECK-LABEL: test_32bit_complexmask:
; CHECK: bfi {{w[0-9]+}}, {{w[0-9]+}}, #3, #4
; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -135,7 +135,7 @@ define void @test_32bit_complexmask(i32 *%existing, i32 *%new) {
; Neither mask is is a contiguous set of 1s. BFI can't be used
define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
-; CHECK: test_32bit_badmask:
+; CHECK-LABEL: test_32bit_badmask:
; CHECK-NOT: bfi
; CHECK: ret
@@ -154,7 +154,7 @@ define void @test_32bit_badmask(i32 *%existing, i32 *%new) {
; Ditto
define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
-; CHECK: test_64bit_badmask:
+; CHECK-LABEL: test_64bit_badmask:
; CHECK-NOT: bfi
; CHECK: ret
@@ -174,7 +174,7 @@ define void @test_64bit_badmask(i64 *%existing, i64 *%new) {
; Bitfield insert where there's a left-over shr needed at the beginning
; (e.g. result of str.bf1 = str.bf2)
define void @test_32bit_with_shr(i32* %existing, i32* %new) {
-; CHECK: test_32bit_with_shr:
+; CHECK-LABEL: test_32bit_with_shr:
%oldval = load volatile i32* %existing
%oldval_keep = and i32 %oldval, 2214592511 ; =0x83ffffff
diff --git a/test/CodeGen/AArch64/bitfield.ll b/test/CodeGen/AArch64/bitfield.ll
index 36d337ef05ef..1c84f5d57854 100644
--- a/test/CodeGen/AArch64/bitfield.ll
+++ b/test/CodeGen/AArch64/bitfield.ll
@@ -5,7 +5,7 @@
@var64 = global i64 0
define void @test_extendb(i8 %var) {
-; CHECK: test_extendb:
+; CHECK-LABEL: test_extendb:
%sxt32 = sext i8 %var to i32
store volatile i32 %sxt32, i32* @var32
@@ -29,7 +29,7 @@ define void @test_extendb(i8 %var) {
}
define void @test_extendh(i16 %var) {
-; CHECK: test_extendh:
+; CHECK-LABEL: test_extendh:
%sxt32 = sext i16 %var to i32
store volatile i32 %sxt32, i32* @var32
@@ -53,7 +53,7 @@ define void @test_extendh(i16 %var) {
}
define void @test_extendw(i32 %var) {
-; CHECK: test_extendw:
+; CHECK-LABEL: test_extendw:
%sxt64 = sext i32 %var to i64
store volatile i64 %sxt64, i64* @var64
@@ -66,7 +66,7 @@ define void @test_extendw(i32 %var) {
}
define void @test_shifts(i32 %val32, i64 %val64) {
-; CHECK: test_shifts:
+; CHECK-LABEL: test_shifts:
%shift1 = ashr i32 %val32, 31
store volatile i32 %shift1, i32* @var32
@@ -114,7 +114,7 @@ define void @test_shifts(i32 %val32, i64 %val64) {
; LLVM can produce in-register extensions taking place entirely with
; 64-bit registers too.
define void @test_sext_inreg_64(i64 %in) {
-; CHECK: test_sext_inreg_64:
+; CHECK-LABEL: test_sext_inreg_64:
; i1 doesn't have an official alias, but crops up and is handled by
; the bitfield ops.
@@ -143,7 +143,7 @@ define void @test_sext_inreg_64(i64 %in) {
; These instructions don't actually select to official bitfield
; operations, but it's important that we select them somehow:
define void @test_zext_inreg_64(i64 %in) {
-; CHECK: test_zext_inreg_64:
+; CHECK-LABEL: test_zext_inreg_64:
%trunc_i8 = trunc i64 %in to i8
%zext_i8 = zext i8 %trunc_i8 to i64
@@ -164,7 +164,7 @@ define void @test_zext_inreg_64(i64 %in) {
}
define i64 @test_sext_inreg_from_32(i32 %in) {
-; CHECK: test_sext_inreg_from_32:
+; CHECK-LABEL: test_sext_inreg_from_32:
%small = trunc i32 %in to i1
%ext = sext i1 %small to i64
@@ -178,7 +178,7 @@ define i64 @test_sext_inreg_from_32(i32 %in) {
define i32 @test_ubfx32(i32* %addr) {
-; CHECK: test_ubfx32:
+; CHECK-LABEL: test_ubfx32:
; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
%fields = load i32* %addr
@@ -188,7 +188,7 @@ define i32 @test_ubfx32(i32* %addr) {
}
define i64 @test_ubfx64(i64* %addr) {
-; CHECK: test_ubfx64:
+; CHECK-LABEL: test_ubfx64:
; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
%fields = load i64* %addr
@@ -198,7 +198,7 @@ define i64 @test_ubfx64(i64* %addr) {
}
define i32 @test_sbfx32(i32* %addr) {
-; CHECK: test_sbfx32:
+; CHECK-LABEL: test_sbfx32:
; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
%fields = load i32* %addr
@@ -208,7 +208,7 @@ define i32 @test_sbfx32(i32* %addr) {
}
define i64 @test_sbfx64(i64* %addr) {
-; CHECK: test_sbfx64:
+; CHECK-LABEL: test_sbfx64:
; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
%fields = load i64* %addr
diff --git a/test/CodeGen/AArch64/blockaddress.ll b/test/CodeGen/AArch64/blockaddress.ll
index 5e85057a3c3b..8cda431b8e92 100644
--- a/test/CodeGen/AArch64/blockaddress.ll
+++ b/test/CodeGen/AArch64/blockaddress.ll
@@ -4,7 +4,7 @@
@addr = global i8* null
define void @test_blockaddress() {
-; CHECK: test_blockaddress:
+; CHECK-LABEL: test_blockaddress:
store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr
%val = load volatile i8** @addr
indirectbr i8* %val, [label %block]
diff --git a/test/CodeGen/AArch64/breg.ll b/test/CodeGen/AArch64/breg.ll
index 38ed4734e1b4..1ed5b9b755dd 100644
--- a/test/CodeGen/AArch64/breg.ll
+++ b/test/CodeGen/AArch64/breg.ll
@@ -3,7 +3,7 @@
@stored_label = global i8* null
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%lab = load i8** @stored_label
indirectbr i8* %lab, [label %otherlab, label %retlab]
; CHECK: adrp {{x[0-9]+}}, stored_label
diff --git a/test/CodeGen/AArch64/callee-save.ll b/test/CodeGen/AArch64/callee-save.ll
index c66aa5bfc510..52243b05b4b9 100644
--- a/test/CodeGen/AArch64/callee-save.ll
+++ b/test/CodeGen/AArch64/callee-save.ll
@@ -3,7 +3,7 @@
@var = global float 0.0
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: stp d14, d15, [sp
; CHECK: stp d12, d13, [sp
diff --git a/test/CodeGen/AArch64/code-model-large-abs.ll b/test/CodeGen/AArch64/code-model-large-abs.ll
index a365568e11ee..b387f285d1d4 100644
--- a/test/CodeGen/AArch64/code-model-large-abs.ll
+++ b/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -6,7 +6,7 @@
@var64 = global i64 0
define i8* @global_addr() {
-; CHECK: global_addr:
+; CHECK-LABEL: global_addr:
ret i8* @var8
; The movz/movk calculation should end up returned directly in x0.
; CHECK: movz x0, #:abs_g3:var8
@@ -17,7 +17,7 @@ define i8* @global_addr() {
}
define i8 @global_i8() {
-; CHECK: global_i8:
+; CHECK-LABEL: global_i8:
%val = load i8* @var8
ret i8 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
@@ -28,7 +28,7 @@ define i8 @global_i8() {
}
define i16 @global_i16() {
-; CHECK: global_i16:
+; CHECK-LABEL: global_i16:
%val = load i16* @var16
ret i16 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
@@ -39,7 +39,7 @@ define i16 @global_i16() {
}
define i32 @global_i32() {
-; CHECK: global_i32:
+; CHECK-LABEL: global_i32:
%val = load i32* @var32
ret i32 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
@@ -50,7 +50,7 @@ define i32 @global_i32() {
}
define i64 @global_i64() {
-; CHECK: global_i64:
+; CHECK-LABEL: global_i64:
%val = load i64* @var64
ret i64 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
diff --git a/test/CodeGen/AArch64/compare-branch.ll b/test/CodeGen/AArch64/compare-branch.ll
index 4213110497d3..75efd9d4a0d6 100644
--- a/test/CodeGen/AArch64/compare-branch.ll
+++ b/test/CodeGen/AArch64/compare-branch.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%val1 = load volatile i32* @var32
%tst1 = icmp eq i32 %val1, 0
@@ -35,4 +35,4 @@ test5:
end:
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/complex-copy-noneon.ll b/test/CodeGen/AArch64/complex-copy-noneon.ll
new file mode 100644
index 000000000000..4ae547856ecd
--- /dev/null
+++ b/test/CodeGen/AArch64/complex-copy-noneon.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=-neon < %s
+
+; The DAG combiner decided to use a vector load/store for this struct copy
+; previously. This probably shouldn't happen without NEON, but the most
+; important thing is that it compiles.
+
+define void @store_combine() nounwind {
+ %src = alloca { double, double }, align 8
+ %dst = alloca { double, double }, align 8
+
+ %src.realp = getelementptr inbounds { double, double }* %src, i32 0, i32 0
+ %src.real = load double* %src.realp
+ %src.imagp = getelementptr inbounds { double, double }* %src, i32 0, i32 1
+ %src.imag = load double* %src.imagp
+
+ %dst.realp = getelementptr inbounds { double, double }* %dst, i32 0, i32 0
+ %dst.imagp = getelementptr inbounds { double, double }* %dst, i32 0, i32 1
+ store double %src.real, double* %dst.realp
+ store double %src.imag, double* %dst.imagp
+ ret void
+}
diff --git a/test/CodeGen/AArch64/cond-sel.ll b/test/CodeGen/AArch64/cond-sel.ll
index 3051cf53fdf8..9c1dfeb3c8d3 100644
--- a/test/CodeGen/AArch64/cond-sel.ll
+++ b/test/CodeGen/AArch64/cond-sel.ll
@@ -1,24 +1,25 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var32 = global i32 0
@var64 = global i64 0
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
-; CHECK: test_csel:
+; CHECK-LABEL: test_csel:
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, i32 42, i32 52
store i32 %val1, i32* @var32
-; CHECK: movz [[W52:w[0-9]+]], #52
-; CHECK: movz [[W42:w[0-9]+]], #42
+; CHECK-DAG: movz [[W52:w[0-9]+]], #52
+; CHECK-DAG: movz [[W42:w[0-9]+]], #42
; CHECK: csel {{w[0-9]+}}, [[W42]], [[W52]], hi
%rhs64 = sext i32 %rhs32 to i64
%tst2 = icmp sle i64 %lhs64, %rhs64
%val2 = select i1 %tst2, i64 %lhs64, i64 %rhs64
store i64 %val2, i64* @var64
-; CHECK: cmp [[LHS:x[0-9]+]], [[RHS:w[0-9]+]], sxtw
-; CHECK: sxtw [[EXT_RHS:x[0-9]+]], [[RHS]]
+; CHECK-DAG: cmp [[LHS:x[0-9]+]], [[RHS:w[0-9]+]], sxtw
+; CHECK-DAG: sxtw [[EXT_RHS:x[0-9]+]], [[RHS]]
; CHECK: csel {{x[0-9]+}}, [[LHS]], [[EXT_RHS]], le
ret void
@@ -26,10 +27,11 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
}
define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %rhs64) {
-; CHECK: test_floatcsel:
+; CHECK-LABEL: test_floatcsel:
%tst1 = fcmp one float %lhs32, %rhs32
; CHECK: fcmp {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFP-NOT: fcmp
%val1 = select i1 %tst1, i32 42, i32 52
store i32 %val1, i32* @var32
; CHECK: movz [[W52:w[0-9]+]], #52
@@ -40,6 +42,7 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r
%tst2 = fcmp ueq double %lhs64, %rhs64
; CHECK: fcmp {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NOFP-NOT: fcmp
%val2 = select i1 %tst2, i64 9, i64 15
store i64 %val2, i64* @var64
; CHECK: movz [[CONST15:x[0-9]+]], #15
@@ -53,7 +56,7 @@ define void @test_floatcsel(float %lhs32, float %rhs32, double %lhs64, double %r
define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
-; CHECK: test_csinc:
+; CHECK-LABEL: test_csinc:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%tst1 = icmp ugt i32 %lhs32, %rhs32
@@ -93,7 +96,7 @@ define void @test_csinc(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
}
define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
-; CHECK: test_csinv:
+; CHECK-LABEL: test_csinv:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%tst1 = icmp ugt i32 %lhs32, %rhs32
@@ -133,7 +136,7 @@ define void @test_csinv(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
}
define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
-; CHECK: test_csneg:
+; CHECK-LABEL: test_csneg:
; Note that commuting rhs and lhs in the select changes ugt to ule (i.e. hi to ls).
%tst1 = icmp ugt i32 %lhs32, %rhs32
@@ -173,7 +176,7 @@ define void @test_csneg(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
}
define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
-; CHECK: test_cset:
+; CHECK-LABEL: test_cset:
; N.b. code is not optimal here (32-bit csinc would be better) but
; incoming DAG is too complex
@@ -194,7 +197,7 @@ define void @test_cset(i32 %lhs, i32 %rhs, i64 %lhs64) {
}
define void @test_csetm(i32 %lhs, i32 %rhs, i64 %lhs64) {
-; CHECK: test_csetm:
+; CHECK-LABEL: test_csetm:
%tst1 = icmp eq i32 %lhs, %rhs
%val1 = sext i1 %tst1 to i32
diff --git a/test/CodeGen/AArch64/directcond.ll b/test/CodeGen/AArch64/directcond.ll
index f5d57593bfad..12c7b6aed643 100644
--- a/test/CodeGen/AArch64/directcond.ll
+++ b/test/CodeGen/AArch64/directcond.ll
@@ -1,7 +1,8 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
-; CHECK: test_select_i32:
+; CHECK-LABEL: test_select_i32:
%val = select i1 %bit, i32 %a, i32 %b
; CHECK: movz [[ONE:w[0-9]+]], #1
; CHECK: tst w0, [[ONE]]
@@ -11,7 +12,7 @@ define i32 @test_select_i32(i1 %bit, i32 %a, i32 %b) {
}
define i64 @test_select_i64(i1 %bit, i64 %a, i64 %b) {
-; CHECK: test_select_i64:
+; CHECK-LABEL: test_select_i64:
%val = select i1 %bit, i64 %a, i64 %b
; CHECK: movz [[ONE:w[0-9]+]], #1
; CHECK: tst w0, [[ONE]]
@@ -21,27 +22,28 @@ define i64 @test_select_i64(i1 %bit, i64 %a, i64 %b) {
}
define float @test_select_float(i1 %bit, float %a, float %b) {
-; CHECK: test_select_float:
+; CHECK-LABEL: test_select_float:
%val = select i1 %bit, float %a, float %b
; CHECK: movz [[ONE:w[0-9]+]], #1
; CHECK: tst w0, [[ONE]]
; CHECK-NEXT: fcsel s0, s0, s1, ne
-
+; CHECK-NOFP-NOT: fcsel
ret float %val
}
define double @test_select_double(i1 %bit, double %a, double %b) {
-; CHECK: test_select_double:
+; CHECK-LABEL: test_select_double:
%val = select i1 %bit, double %a, double %b
; CHECK: movz [[ONE:w[0-9]+]], #1
; CHECK: tst w0, [[ONE]]
; CHECK-NEXT: fcsel d0, d0, d1, ne
+; CHECK-NOFP-NOT: fcsel
ret double %val
}
define i32 @test_brcond(i1 %bit) {
-; CHECK: test_brcond:
+; CHECK-LABEL: test_brcond:
br i1 %bit, label %true, label %false
; CHECK: tbz {{w[0-9]+}}, #0, .LBB
@@ -56,6 +58,7 @@ define i1 @test_setcc_float(float %lhs, float %rhs) {
%val = fcmp oeq float %lhs, %rhs
; CHECK: fcmp s0, s1
; CHECK: csinc w0, wzr, wzr, ne
+; CHECK-NOFP-NOT: fcmp
ret i1 %val
}
@@ -64,6 +67,7 @@ define i1 @test_setcc_double(double %lhs, double %rhs) {
%val = fcmp oeq double %lhs, %rhs
; CHECK: fcmp d0, d1
; CHECK: csinc w0, wzr, wzr, ne
+; CHECK-NOFP-NOT: fcmp
ret i1 %val
}
diff --git a/test/CodeGen/AArch64/dp-3source.ll b/test/CodeGen/AArch64/dp-3source.ll
index c40d3933b44b..81d9e15532fa 100644
--- a/test/CodeGen/AArch64/dp-3source.ll
+++ b/test/CodeGen/AArch64/dp-3source.ll
@@ -1,7 +1,7 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) {
-; CHECK: test_madd32:
+; CHECK-LABEL: test_madd32:
%mid = mul i32 %val1, %val2
%res = add i32 %val0, %mid
; CHECK: madd {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -9,7 +9,7 @@ define i32 @test_madd32(i32 %val0, i32 %val1, i32 %val2) {
}
define i64 @test_madd64(i64 %val0, i64 %val1, i64 %val2) {
-; CHECK: test_madd64:
+; CHECK-LABEL: test_madd64:
%mid = mul i64 %val1, %val2
%res = add i64 %val0, %mid
; CHECK: madd {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
@@ -17,7 +17,7 @@ define i64 @test_madd64(i64 %val0, i64 %val1, i64 %val2) {
}
define i32 @test_msub32(i32 %val0, i32 %val1, i32 %val2) {
-; CHECK: test_msub32:
+; CHECK-LABEL: test_msub32:
%mid = mul i32 %val1, %val2
%res = sub i32 %val0, %mid
; CHECK: msub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -25,7 +25,7 @@ define i32 @test_msub32(i32 %val0, i32 %val1, i32 %val2) {
}
define i64 @test_msub64(i64 %val0, i64 %val1, i64 %val2) {
-; CHECK: test_msub64:
+; CHECK-LABEL: test_msub64:
%mid = mul i64 %val1, %val2
%res = sub i64 %val0, %mid
; CHECK: msub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
@@ -33,7 +33,7 @@ define i64 @test_msub64(i64 %val0, i64 %val1, i64 %val2) {
}
define i64 @test_smaddl(i64 %acc, i32 %val1, i32 %val2) {
-; CHECK: test_smaddl:
+; CHECK-LABEL: test_smaddl:
%ext1 = sext i32 %val1 to i64
%ext2 = sext i32 %val2 to i64
%prod = mul i64 %ext1, %ext2
@@ -43,7 +43,7 @@ define i64 @test_smaddl(i64 %acc, i32 %val1, i32 %val2) {
}
define i64 @test_smsubl(i64 %acc, i32 %val1, i32 %val2) {
-; CHECK: test_smsubl:
+; CHECK-LABEL: test_smsubl:
%ext1 = sext i32 %val1 to i64
%ext2 = sext i32 %val2 to i64
%prod = mul i64 %ext1, %ext2
@@ -53,7 +53,7 @@ define i64 @test_smsubl(i64 %acc, i32 %val1, i32 %val2) {
}
define i64 @test_umaddl(i64 %acc, i32 %val1, i32 %val2) {
-; CHECK: test_umaddl:
+; CHECK-LABEL: test_umaddl:
%ext1 = zext i32 %val1 to i64
%ext2 = zext i32 %val2 to i64
%prod = mul i64 %ext1, %ext2
@@ -63,7 +63,7 @@ define i64 @test_umaddl(i64 %acc, i32 %val1, i32 %val2) {
}
define i64 @test_umsubl(i64 %acc, i32 %val1, i32 %val2) {
-; CHECK: test_umsubl:
+; CHECK-LABEL: test_umsubl:
%ext1 = zext i32 %val1 to i64
%ext2 = zext i32 %val2 to i64
%prod = mul i64 %ext1, %ext2
@@ -73,7 +73,7 @@ define i64 @test_umsubl(i64 %acc, i32 %val1, i32 %val2) {
}
define i64 @test_smulh(i64 %lhs, i64 %rhs) {
-; CHECK: test_smulh:
+; CHECK-LABEL: test_smulh:
%ext1 = sext i64 %lhs to i128
%ext2 = sext i64 %rhs to i128
%res = mul i128 %ext1, %ext2
@@ -84,7 +84,7 @@ define i64 @test_smulh(i64 %lhs, i64 %rhs) {
}
define i64 @test_umulh(i64 %lhs, i64 %rhs) {
-; CHECK: test_umulh:
+; CHECK-LABEL: test_umulh:
%ext1 = zext i64 %lhs to i128
%ext2 = zext i64 %rhs to i128
%res = mul i128 %ext1, %ext2
@@ -95,21 +95,21 @@ define i64 @test_umulh(i64 %lhs, i64 %rhs) {
}
define i32 @test_mul32(i32 %lhs, i32 %rhs) {
-; CHECK: test_mul32:
+; CHECK-LABEL: test_mul32:
%res = mul i32 %lhs, %rhs
; CHECK: mul {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
ret i32 %res
}
define i64 @test_mul64(i64 %lhs, i64 %rhs) {
-; CHECK: test_mul64:
+; CHECK-LABEL: test_mul64:
%res = mul i64 %lhs, %rhs
; CHECK: mul {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
ret i64 %res
}
define i32 @test_mneg32(i32 %lhs, i32 %rhs) {
-; CHECK: test_mneg32:
+; CHECK-LABEL: test_mneg32:
%prod = mul i32 %lhs, %rhs
%res = sub i32 0, %prod
; CHECK: mneg {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
@@ -117,7 +117,7 @@ define i32 @test_mneg32(i32 %lhs, i32 %rhs) {
}
define i64 @test_mneg64(i64 %lhs, i64 %rhs) {
-; CHECK: test_mneg64:
+; CHECK-LABEL: test_mneg64:
%prod = mul i64 %lhs, %rhs
%res = sub i64 0, %prod
; CHECK: mneg {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
@@ -125,7 +125,7 @@ define i64 @test_mneg64(i64 %lhs, i64 %rhs) {
}
define i64 @test_smull(i32 %lhs, i32 %rhs) {
-; CHECK: test_smull:
+; CHECK-LABEL: test_smull:
%ext1 = sext i32 %lhs to i64
%ext2 = sext i32 %rhs to i64
%res = mul i64 %ext1, %ext2
@@ -134,7 +134,7 @@ define i64 @test_smull(i32 %lhs, i32 %rhs) {
}
define i64 @test_umull(i32 %lhs, i32 %rhs) {
-; CHECK: test_umull:
+; CHECK-LABEL: test_umull:
%ext1 = zext i32 %lhs to i64
%ext2 = zext i32 %rhs to i64
%res = mul i64 %ext1, %ext2
@@ -143,7 +143,7 @@ define i64 @test_umull(i32 %lhs, i32 %rhs) {
}
define i64 @test_smnegl(i32 %lhs, i32 %rhs) {
-; CHECK: test_smnegl:
+; CHECK-LABEL: test_smnegl:
%ext1 = sext i32 %lhs to i64
%ext2 = sext i32 %rhs to i64
%prod = mul i64 %ext1, %ext2
@@ -153,7 +153,7 @@ define i64 @test_smnegl(i32 %lhs, i32 %rhs) {
}
define i64 @test_umnegl(i32 %lhs, i32 %rhs) {
-; CHECK: test_umnegl:
+; CHECK-LABEL: test_umnegl:
%ext1 = zext i32 %lhs to i64
%ext2 = zext i32 %rhs to i64
%prod = mul i64 %ext1, %ext2
diff --git a/test/CodeGen/AArch64/dp1.ll b/test/CodeGen/AArch64/dp1.ll
index 83aa8b4f6631..6a8d55cdc7ea 100644
--- a/test/CodeGen/AArch64/dp1.ll
+++ b/test/CodeGen/AArch64/dp1.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @rev_i32() {
-; CHECK: rev_i32:
+; CHECK-LABEL: rev_i32:
%val0_tmp = load i32* @var32
%val1_tmp = call i32 @llvm.bswap.i32(i32 %val0_tmp)
; CHECK: rev {{w[0-9]+}}, {{w[0-9]+}}
@@ -13,7 +13,7 @@ define void @rev_i32() {
}
define void @rev_i64() {
-; CHECK: rev_i64:
+; CHECK-LABEL: rev_i64:
%val0_tmp = load i64* @var64
%val1_tmp = call i64 @llvm.bswap.i64(i64 %val0_tmp)
; CHECK: rev {{x[0-9]+}}, {{x[0-9]+}}
@@ -22,7 +22,7 @@ define void @rev_i64() {
}
define void @rev32_i64() {
-; CHECK: rev32_i64:
+; CHECK-LABEL: rev32_i64:
%val0_tmp = load i64* @var64
%val1_tmp = shl i64 %val0_tmp, 32
%val5_tmp = sub i64 64, 32
@@ -35,7 +35,7 @@ define void @rev32_i64() {
}
define void @rev16_i32() {
-; CHECK: rev16_i32:
+; CHECK-LABEL: rev16_i32:
%val0_tmp = load i32* @var32
%val1_tmp = shl i32 %val0_tmp, 16
%val2_tmp = lshr i32 %val0_tmp, 16
@@ -47,7 +47,7 @@ define void @rev16_i32() {
}
define void @clz_zerodef_i32() {
-; CHECK: clz_zerodef_i32:
+; CHECK-LABEL: clz_zerodef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 0)
; CHECK: clz {{w[0-9]+}}, {{w[0-9]+}}
@@ -56,7 +56,7 @@ define void @clz_zerodef_i32() {
}
define void @clz_zerodef_i64() {
-; CHECK: clz_zerodef_i64:
+; CHECK-LABEL: clz_zerodef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 0)
; CHECK: clz {{x[0-9]+}}, {{x[0-9]+}}
@@ -65,7 +65,7 @@ define void @clz_zerodef_i64() {
}
define void @clz_zeroundef_i32() {
-; CHECK: clz_zeroundef_i32:
+; CHECK-LABEL: clz_zeroundef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.ctlz.i32(i32 %val0_tmp, i1 1)
; CHECK: clz {{w[0-9]+}}, {{w[0-9]+}}
@@ -74,7 +74,7 @@ define void @clz_zeroundef_i32() {
}
define void @clz_zeroundef_i64() {
-; CHECK: clz_zeroundef_i64:
+; CHECK-LABEL: clz_zeroundef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.ctlz.i64(i64 %val0_tmp, i1 1)
; CHECK: clz {{x[0-9]+}}, {{x[0-9]+}}
@@ -83,7 +83,7 @@ define void @clz_zeroundef_i64() {
}
define void @cttz_zerodef_i32() {
-; CHECK: cttz_zerodef_i32:
+; CHECK-LABEL: cttz_zerodef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 0)
; CHECK: rbit [[REVERSED:w[0-9]+]], {{w[0-9]+}}
@@ -93,7 +93,7 @@ define void @cttz_zerodef_i32() {
}
define void @cttz_zerodef_i64() {
-; CHECK: cttz_zerodef_i64:
+; CHECK-LABEL: cttz_zerodef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 0)
; CHECK: rbit [[REVERSED:x[0-9]+]], {{x[0-9]+}}
@@ -103,7 +103,7 @@ define void @cttz_zerodef_i64() {
}
define void @cttz_zeroundef_i32() {
-; CHECK: cttz_zeroundef_i32:
+; CHECK-LABEL: cttz_zeroundef_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.cttz.i32(i32 %val0_tmp, i1 1)
; CHECK: rbit [[REVERSED:w[0-9]+]], {{w[0-9]+}}
@@ -113,7 +113,7 @@ define void @cttz_zeroundef_i32() {
}
define void @cttz_zeroundef_i64() {
-; CHECK: cttz_zeroundef_i64:
+; CHECK-LABEL: cttz_zeroundef_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.cttz.i64(i64 %val0_tmp, i1 1)
; CHECK: rbit [[REVERSED:x[0-9]+]], {{x[0-9]+}}
@@ -125,7 +125,7 @@ define void @cttz_zeroundef_i64() {
; These two are just compilation tests really: the operation's set to Expand in
; ISelLowering.
define void @ctpop_i32() {
-; CHECK: ctpop_i32:
+; CHECK-LABEL: ctpop_i32:
%val0_tmp = load i32* @var32
%val4_tmp = call i32 @llvm.ctpop.i32(i32 %val0_tmp)
store volatile i32 %val4_tmp, i32* @var32
@@ -133,7 +133,7 @@ define void @ctpop_i32() {
}
define void @ctpop_i64() {
-; CHECK: ctpop_i64:
+; CHECK-LABEL: ctpop_i64:
%val0_tmp = load i64* @var64
%val4_tmp = call i64 @llvm.ctpop.i64(i64 %val0_tmp)
store volatile i64 %val4_tmp, i64* @var64
diff --git a/test/CodeGen/AArch64/dp2.ll b/test/CodeGen/AArch64/dp2.ll
index 4c740f6b8623..48b0701ad1fa 100644
--- a/test/CodeGen/AArch64/dp2.ll
+++ b/test/CodeGen/AArch64/dp2.ll
@@ -6,7 +6,7 @@
@var64_1 = global i64 0
define void @rorv_i64() {
-; CHECK: rorv_i64:
+; CHECK-LABEL: rorv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val2_tmp = sub i64 64, %val1_tmp
@@ -19,7 +19,7 @@ define void @rorv_i64() {
}
define void @asrv_i64() {
-; CHECK: asrv_i64:
+; CHECK-LABEL: asrv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = ashr i64 %val0_tmp, %val1_tmp
@@ -29,7 +29,7 @@ define void @asrv_i64() {
}
define void @lsrv_i64() {
-; CHECK: lsrv_i64:
+; CHECK-LABEL: lsrv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = lshr i64 %val0_tmp, %val1_tmp
@@ -39,7 +39,7 @@ define void @lsrv_i64() {
}
define void @lslv_i64() {
-; CHECK: lslv_i64:
+; CHECK-LABEL: lslv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = shl i64 %val0_tmp, %val1_tmp
@@ -49,7 +49,7 @@ define void @lslv_i64() {
}
define void @udiv_i64() {
-; CHECK: udiv_i64:
+; CHECK-LABEL: udiv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = udiv i64 %val0_tmp, %val1_tmp
@@ -59,7 +59,7 @@ define void @udiv_i64() {
}
define void @sdiv_i64() {
-; CHECK: sdiv_i64:
+; CHECK-LABEL: sdiv_i64:
%val0_tmp = load i64* @var64_0
%val1_tmp = load i64* @var64_1
%val4_tmp = sdiv i64 %val0_tmp, %val1_tmp
@@ -70,7 +70,7 @@ define void @sdiv_i64() {
define void @lsrv_i32() {
-; CHECK: lsrv_i32:
+; CHECK-LABEL: lsrv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
@@ -81,7 +81,7 @@ define void @lsrv_i32() {
}
define void @lslv_i32() {
-; CHECK: lslv_i32:
+; CHECK-LABEL: lslv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
@@ -92,7 +92,7 @@ define void @lslv_i32() {
}
define void @rorv_i32() {
-; CHECK: rorv_i32:
+; CHECK-LABEL: rorv_i32:
%val0_tmp = load i32* @var32_0
%val6_tmp = load i32* @var32_1
%val1_tmp = add i32 1, %val6_tmp
@@ -106,7 +106,7 @@ define void @rorv_i32() {
}
define void @asrv_i32() {
-; CHECK: asrv_i32:
+; CHECK-LABEL: asrv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val2_tmp = add i32 1, %val1_tmp
@@ -117,7 +117,7 @@ define void @asrv_i32() {
}
define void @sdiv_i32() {
-; CHECK: sdiv_i32:
+; CHECK-LABEL: sdiv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val4_tmp = sdiv i32 %val0_tmp, %val1_tmp
@@ -127,7 +127,7 @@ define void @sdiv_i32() {
}
define void @udiv_i32() {
-; CHECK: udiv_i32:
+; CHECK-LABEL: udiv_i32:
%val0_tmp = load i32* @var32_0
%val1_tmp = load i32* @var32_1
%val4_tmp = udiv i32 %val0_tmp, %val1_tmp
@@ -139,7 +139,7 @@ define void @udiv_i32() {
; The point of this test is that we may not actually see (shl GPR32:$Val, (zext GPR32:$Val2))
; in the DAG (the RHS may be natively 64-bit), but we should still use the lsl instructions.
define i32 @test_lsl32() {
-; CHECK: test_lsl32:
+; CHECK-LABEL: test_lsl32:
%val = load i32* @var32_0
%ret = shl i32 1, %val
@@ -149,7 +149,7 @@ define i32 @test_lsl32() {
}
define i32 @test_lsr32() {
-; CHECK: test_lsr32:
+; CHECK-LABEL: test_lsr32:
%val = load i32* @var32_0
%ret = lshr i32 1, %val
@@ -159,7 +159,7 @@ define i32 @test_lsr32() {
}
define i32 @test_asr32(i32 %in) {
-; CHECK: test_asr32:
+; CHECK-LABEL: test_asr32:
%val = load i32* @var32_0
%ret = ashr i32 %in, %val
diff --git a/test/CodeGen/AArch64/elf-extern.ll b/test/CodeGen/AArch64/elf-extern.ll
deleted file mode 100644
index 8bf1b2ff4fa9..000000000000
--- a/test/CodeGen/AArch64/elf-extern.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -filetype=obj | llvm-readobj -r | FileCheck %s
-
-; External symbols are a different concept to global variables but should still
-; get relocations and so on when used.
-
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
-
-define i32 @check_extern() {
- call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 undef, i32 4, i1 0)
- ret i32 0
-}
-
-; CHECK: Relocations [
-; CHECK: Section (1) .text {
-; CHECK: 0x{{[0-9,A-F]+}} R_AARCH64_CALL26 memcpy
-; CHECK: }
-; CHECK: ]
diff --git a/test/CodeGen/AArch64/extern-weak.ll b/test/CodeGen/AArch64/extern-weak.ll
index bc0acc253388..322b3f4522d6 100644
--- a/test/CodeGen/AArch64/extern-weak.ll
+++ b/test/CodeGen/AArch64/extern-weak.ll
@@ -51,4 +51,4 @@ define i32* @wibble() {
; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
; CHECK-LARGE: movk x0, #:abs_g0_nc:defined_weak_var
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/extract.ll b/test/CodeGen/AArch64/extract.ll
index 06267816a4e1..62d9ed2fc9d9 100644
--- a/test/CodeGen/AArch64/extract.ll
+++ b/test/CodeGen/AArch64/extract.ll
@@ -1,7 +1,7 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
define i64 @ror_i64(i64 %in) {
-; CHECK: ror_i64:
+; CHECK-LABEL: ror_i64:
%left = shl i64 %in, 19
%right = lshr i64 %in, 45
%val5 = or i64 %left, %right
@@ -10,7 +10,7 @@ define i64 @ror_i64(i64 %in) {
}
define i32 @ror_i32(i32 %in) {
-; CHECK: ror_i32:
+; CHECK-LABEL: ror_i32:
%left = shl i32 %in, 9
%right = lshr i32 %in, 23
%val5 = or i32 %left, %right
@@ -19,7 +19,7 @@ define i32 @ror_i32(i32 %in) {
}
define i32 @extr_i32(i32 %lhs, i32 %rhs) {
-; CHECK: extr_i32:
+; CHECK-LABEL: extr_i32:
%left = shl i32 %lhs, 6
%right = lshr i32 %rhs, 26
%val = or i32 %left, %right
@@ -31,7 +31,7 @@ define i32 @extr_i32(i32 %lhs, i32 %rhs) {
}
define i64 @extr_i64(i64 %lhs, i64 %rhs) {
-; CHECK: extr_i64:
+; CHECK-LABEL: extr_i64:
%right = lshr i64 %rhs, 40
%left = shl i64 %lhs, 24
%val = or i64 %right, %left
@@ -45,7 +45,7 @@ define i64 @extr_i64(i64 %lhs, i64 %rhs) {
; Regression test: a bad experimental pattern crept into git which optimised
; this pattern to a single EXTR.
define i32 @extr_regress(i32 %a, i32 %b) {
-; CHECK: extr_regress:
+; CHECK-LABEL: extr_regress:
%sh1 = shl i32 %a, 14
%sh2 = lshr i32 %b, 14
diff --git a/test/CodeGen/AArch64/fastcc-reserved.ll b/test/CodeGen/AArch64/fastcc-reserved.ll
index e40aa3033bde..c6c050570dd6 100644
--- a/test/CodeGen/AArch64/fastcc-reserved.ll
+++ b/test/CodeGen/AArch64/fastcc-reserved.ll
@@ -7,7 +7,7 @@
declare fastcc void @will_pop([8 x i32], i32 %val)
define fastcc void @foo(i32 %in) {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%addr = alloca i8, i32 %in
@@ -34,7 +34,7 @@ define fastcc void @foo(i32 %in) {
declare void @wont_pop([8 x i32], i32 %val)
define void @foo1(i32 %in) {
-; CHECK: foo1:
+; CHECK-LABEL: foo1:
%addr = alloca i8, i32 %in
; Normal frame setup again
diff --git a/test/CodeGen/AArch64/fastcc.ll b/test/CodeGen/AArch64/fastcc.ll
index 41cde94edc1c..a4cd37858ee4 100644
--- a/test/CodeGen/AArch64/fastcc.ll
+++ b/test/CodeGen/AArch64/fastcc.ll
@@ -5,10 +5,10 @@
; stack, so try to make sure this is respected.
define fastcc void @func_stack0() {
-; CHECK: func_stack0:
+; CHECK-LABEL: func_stack0:
; CHECK: sub sp, sp, #48
-; CHECK-TAIL: func_stack0:
+; CHECK-TAIL-LABEL: func_stack0:
; CHECK-TAIL: sub sp, sp, #48
@@ -45,10 +45,10 @@ define fastcc void @func_stack0() {
}
define fastcc void @func_stack8([8 x i32], i32 %stacked) {
-; CHECK: func_stack8:
+; CHECK-LABEL: func_stack8:
; CHECK: sub sp, sp, #48
-; CHECK-TAIL: func_stack8:
+; CHECK-TAIL-LABEL: func_stack8:
; CHECK-TAIL: sub sp, sp, #48
@@ -84,10 +84,10 @@ define fastcc void @func_stack8([8 x i32], i32 %stacked) {
}
define fastcc void @func_stack32([8 x i32], i128 %stacked0, i128 %stacked1) {
-; CHECK: func_stack32:
+; CHECK-LABEL: func_stack32:
; CHECK: sub sp, sp, #48
-; CHECK-TAIL: func_stack32:
+; CHECK-TAIL-LABEL: func_stack32:
; CHECK-TAIL: sub sp, sp, #48
diff --git a/test/CodeGen/AArch64/fcmp.ll b/test/CodeGen/AArch64/fcmp.ll
index ad4a903c9b25..a9518eabb754 100644
--- a/test/CodeGen/AArch64/fcmp.ll
+++ b/test/CodeGen/AArch64/fcmp.ll
@@ -3,7 +3,7 @@
declare void @bar(i32)
define void @test_float(float %a, float %b) {
-; CHECK: test_float:
+; CHECK-LABEL: test_float:
%tst1 = fcmp oeq float %a, %b
br i1 %tst1, label %end, label %t2
@@ -42,7 +42,7 @@ end:
}
define void @test_double(double %a, double %b) {
-; CHECK: test_double:
+; CHECK-LABEL: test_double:
%tst1 = fcmp oeq double %a, %b
br i1 %tst1, label %end, label %t2
diff --git a/test/CodeGen/AArch64/fcvt-fixed.ll b/test/CodeGen/AArch64/fcvt-fixed.ll
index 0f7b95b2a48f..9d66da49437b 100644
--- a/test/CodeGen/AArch64/fcvt-fixed.ll
+++ b/test/CodeGen/AArch64/fcvt-fixed.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @test_fcvtzs(float %flt, double %dbl) {
-; CHECK: test_fcvtzs:
+; CHECK-LABEL: test_fcvtzs:
%fix1 = fmul float %flt, 128.0
%cvt1 = fptosi float %fix1 to i32
@@ -50,7 +50,7 @@ define void @test_fcvtzs(float %flt, double %dbl) {
}
define void @test_fcvtzu(float %flt, double %dbl) {
-; CHECK: test_fcvtzu:
+; CHECK-LABEL: test_fcvtzu:
%fix1 = fmul float %flt, 128.0
%cvt1 = fptoui float %fix1 to i32
@@ -99,7 +99,7 @@ define void @test_fcvtzu(float %flt, double %dbl) {
@vardouble = global double 0.0
define void @test_scvtf(i32 %int, i64 %long) {
-; CHECK: test_scvtf:
+; CHECK-LABEL: test_scvtf:
%cvt1 = sitofp i32 %int to float
%fix1 = fdiv float %cvt1, 128.0
@@ -145,7 +145,7 @@ define void @test_scvtf(i32 %int, i64 %long) {
}
define void @test_ucvtf(i32 %int, i64 %long) {
-; CHECK: test_ucvtf:
+; CHECK-LABEL: test_ucvtf:
%cvt1 = uitofp i32 %int to float
%fix1 = fdiv float %cvt1, 128.0
diff --git a/test/CodeGen/AArch64/fcvt-int.ll b/test/CodeGen/AArch64/fcvt-int.ll
index c771d683a99c..b28eb3ea1bef 100644
--- a/test/CodeGen/AArch64/fcvt-int.ll
+++ b/test/CodeGen/AArch64/fcvt-int.ll
@@ -1,12 +1,12 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
define i32 @test_floattoi32(float %in) {
-; CHECK: test_floattoi32:
+; CHECK-LABEL: test_floattoi32:
%signed = fptosi float %in to i32
%unsigned = fptoui float %in to i32
-; CHECK: fcvtzu [[UNSIG:w[0-9]+]], {{s[0-9]+}}
-; CHECK: fcvtzs [[SIG:w[0-9]+]], {{s[0-9]+}}
+; CHECK-DAG: fcvtzu [[UNSIG:w[0-9]+]], {{s[0-9]+}}
+; CHECK-DAG: fcvtzs [[SIG:w[0-9]+]], {{s[0-9]+}}
%res = sub i32 %signed, %unsigned
; CHECK: sub {{w[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -16,12 +16,12 @@ define i32 @test_floattoi32(float %in) {
}
define i32 @test_doubletoi32(double %in) {
-; CHECK: test_doubletoi32:
+; CHECK-LABEL: test_doubletoi32:
%signed = fptosi double %in to i32
%unsigned = fptoui double %in to i32
-; CHECK: fcvtzu [[UNSIG:w[0-9]+]], {{d[0-9]+}}
-; CHECK: fcvtzs [[SIG:w[0-9]+]], {{d[0-9]+}}
+; CHECK-DAG: fcvtzu [[UNSIG:w[0-9]+]], {{d[0-9]+}}
+; CHECK-DAG: fcvtzs [[SIG:w[0-9]+]], {{d[0-9]+}}
%res = sub i32 %signed, %unsigned
; CHECK: sub {{w[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -31,12 +31,12 @@ define i32 @test_doubletoi32(double %in) {
}
define i64 @test_floattoi64(float %in) {
-; CHECK: test_floattoi64:
+; CHECK-LABEL: test_floattoi64:
%signed = fptosi float %in to i64
%unsigned = fptoui float %in to i64
-; CHECK: fcvtzu [[UNSIG:x[0-9]+]], {{s[0-9]+}}
-; CHECK: fcvtzs [[SIG:x[0-9]+]], {{s[0-9]+}}
+; CHECK-DAG: fcvtzu [[UNSIG:x[0-9]+]], {{s[0-9]+}}
+; CHECK-DAG: fcvtzs [[SIG:x[0-9]+]], {{s[0-9]+}}
%res = sub i64 %signed, %unsigned
; CHECK: sub {{x[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -46,12 +46,12 @@ define i64 @test_floattoi64(float %in) {
}
define i64 @test_doubletoi64(double %in) {
-; CHECK: test_doubletoi64:
+; CHECK-LABEL: test_doubletoi64:
%signed = fptosi double %in to i64
%unsigned = fptoui double %in to i64
-; CHECK: fcvtzu [[UNSIG:x[0-9]+]], {{d[0-9]+}}
-; CHECK: fcvtzs [[SIG:x[0-9]+]], {{d[0-9]+}}
+; CHECK-DAG: fcvtzu [[UNSIG:x[0-9]+]], {{d[0-9]+}}
+; CHECK-DAG: fcvtzs [[SIG:x[0-9]+]], {{d[0-9]+}}
%res = sub i64 %signed, %unsigned
; CHECK: sub {{x[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -61,12 +61,12 @@ define i64 @test_doubletoi64(double %in) {
}
define float @test_i32tofloat(i32 %in) {
-; CHECK: test_i32tofloat:
+; CHECK-LABEL: test_i32tofloat:
%signed = sitofp i32 %in to float
%unsigned = uitofp i32 %in to float
-; CHECK: ucvtf [[UNSIG:s[0-9]+]], {{w[0-9]+}}
-; CHECK: scvtf [[SIG:s[0-9]+]], {{w[0-9]+}}
+; CHECK-DAG: ucvtf [[UNSIG:s[0-9]+]], {{w[0-9]+}}
+; CHECK-DAG: scvtf [[SIG:s[0-9]+]], {{w[0-9]+}}
%res = fsub float %signed, %unsigned
; CHECL: fsub {{s[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -75,12 +75,12 @@ define float @test_i32tofloat(i32 %in) {
}
define double @test_i32todouble(i32 %in) {
-; CHECK: test_i32todouble:
+; CHECK-LABEL: test_i32todouble:
%signed = sitofp i32 %in to double
%unsigned = uitofp i32 %in to double
-; CHECK: ucvtf [[UNSIG:d[0-9]+]], {{w[0-9]+}}
-; CHECK: scvtf [[SIG:d[0-9]+]], {{w[0-9]+}}
+; CHECK-DAG: ucvtf [[UNSIG:d[0-9]+]], {{w[0-9]+}}
+; CHECK-DAG: scvtf [[SIG:d[0-9]+]], {{w[0-9]+}}
%res = fsub double %signed, %unsigned
; CHECK: fsub {{d[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -89,12 +89,12 @@ define double @test_i32todouble(i32 %in) {
}
define float @test_i64tofloat(i64 %in) {
-; CHECK: test_i64tofloat:
+; CHECK-LABEL: test_i64tofloat:
%signed = sitofp i64 %in to float
%unsigned = uitofp i64 %in to float
-; CHECK: ucvtf [[UNSIG:s[0-9]+]], {{x[0-9]+}}
-; CHECK: scvtf [[SIG:s[0-9]+]], {{x[0-9]+}}
+; CHECK-DAG: ucvtf [[UNSIG:s[0-9]+]], {{x[0-9]+}}
+; CHECK-DAG: scvtf [[SIG:s[0-9]+]], {{x[0-9]+}}
%res = fsub float %signed, %unsigned
; CHECK: fsub {{s[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -103,12 +103,12 @@ define float @test_i64tofloat(i64 %in) {
}
define double @test_i64todouble(i64 %in) {
-; CHECK: test_i64todouble:
+; CHECK-LABEL: test_i64todouble:
%signed = sitofp i64 %in to double
%unsigned = uitofp i64 %in to double
-; CHECK: ucvtf [[UNSIG:d[0-9]+]], {{x[0-9]+}}
-; CHECK: scvtf [[SIG:d[0-9]+]], {{x[0-9]+}}
+; CHECK-DAG: ucvtf [[UNSIG:d[0-9]+]], {{x[0-9]+}}
+; CHECK-DAG: scvtf [[SIG:d[0-9]+]], {{x[0-9]+}}
%res = fsub double %signed, %unsigned
; CHECK: sub {{d[0-9]+}}, [[SIG]], [[UNSIG]]
@@ -117,7 +117,7 @@ define double @test_i64todouble(i64 %in) {
}
define i32 @test_bitcastfloattoi32(float %in) {
-; CHECK: test_bitcastfloattoi32:
+; CHECK-LABEL: test_bitcastfloattoi32:
%res = bitcast float %in to i32
; CHECK: fmov {{w[0-9]+}}, {{s[0-9]+}}
@@ -125,7 +125,7 @@ define i32 @test_bitcastfloattoi32(float %in) {
}
define i64 @test_bitcastdoubletoi64(double %in) {
-; CHECK: test_bitcastdoubletoi64:
+; CHECK-LABEL: test_bitcastdoubletoi64:
%res = bitcast double %in to i64
; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
@@ -133,7 +133,7 @@ define i64 @test_bitcastdoubletoi64(double %in) {
}
define float @test_bitcasti32tofloat(i32 %in) {
-; CHECK: test_bitcasti32tofloat:
+; CHECK-LABEL: test_bitcasti32tofloat:
%res = bitcast i32 %in to float
; CHECK: fmov {{s[0-9]+}}, {{w[0-9]+}}
@@ -142,7 +142,7 @@ define float @test_bitcasti32tofloat(i32 %in) {
}
define double @test_bitcasti64todouble(i64 %in) {
-; CHECK: test_bitcasti64todouble:
+; CHECK-LABEL: test_bitcasti64todouble:
%res = bitcast i64 %in to double
; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
diff --git a/test/CodeGen/AArch64/flags-multiuse.ll b/test/CodeGen/AArch64/flags-multiuse.ll
index 940c146f0a9f..e99c72833997 100644
--- a/test/CodeGen/AArch64/flags-multiuse.ll
+++ b/test/CodeGen/AArch64/flags-multiuse.ll
@@ -9,7 +9,7 @@ declare void @bar()
@var = global i32 0
define i32 @test_multiflag(i32 %n, i32 %m, i32 %o) {
-; CHECK: test_multiflag:
+; CHECK-LABEL: test_multiflag:
%test = icmp ne i32 %n, %m
; CHECK: cmp [[LHS:w[0-9]+]], [[RHS:w[0-9]+]]
diff --git a/test/CodeGen/AArch64/floatdp_1source.ll b/test/CodeGen/AArch64/floatdp_1source.ll
index c94ba9b57b5a..3d7f8f0369fc 100644
--- a/test/CodeGen/AArch64/floatdp_1source.ll
+++ b/test/CodeGen/AArch64/floatdp_1source.ll
@@ -26,7 +26,7 @@ declare float @nearbyintf(float) readonly
declare double @nearbyint(double) readonly
define void @simple_float() {
-; CHECK: simple_float:
+; CHECK-LABEL: simple_float:
%val1 = load volatile float* @varfloat
%valabs = call float @fabsf(float %val1)
@@ -65,7 +65,7 @@ define void @simple_float() {
}
define void @simple_double() {
-; CHECK: simple_double:
+; CHECK-LABEL: simple_double:
%val1 = load volatile double* @vardouble
%valabs = call double @fabs(double %val1)
@@ -104,7 +104,7 @@ define void @simple_double() {
}
define void @converts() {
-; CHECK: converts:
+; CHECK-LABEL: converts:
%val16 = load volatile half* @varhalf
%val32 = load volatile float* @varfloat
diff --git a/test/CodeGen/AArch64/floatdp_2source.ll b/test/CodeGen/AArch64/floatdp_2source.ll
index b2256b342acf..bb655285ac54 100644
--- a/test/CodeGen/AArch64/floatdp_2source.ll
+++ b/test/CodeGen/AArch64/floatdp_2source.ll
@@ -4,7 +4,7 @@
@vardouble = global double 0.0
define void @testfloat() {
-; CHECK: testfloat:
+; CHECK-LABEL: testfloat:
%val1 = load float* @varfloat
%val2 = fadd float %val1, %val1
@@ -32,7 +32,7 @@ define void @testfloat() {
}
define void @testdouble() {
-; CHECK: testdouble:
+; CHECK-LABEL: testdouble:
%val1 = load double* @vardouble
%val2 = fadd double %val1, %val1
diff --git a/test/CodeGen/AArch64/fp-cond-sel.ll b/test/CodeGen/AArch64/fp-cond-sel.ll
index 56e8f16f9b36..572f42e210b1 100644
--- a/test/CodeGen/AArch64/fp-cond-sel.ll
+++ b/test/CodeGen/AArch64/fp-cond-sel.ll
@@ -4,7 +4,7 @@
@vardouble = global double 0.0
define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
-; CHECK: test_csel:
+; CHECK-LABEL: test_csel:
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, float 0.0, float 1.0
diff --git a/test/CodeGen/AArch64/fp-dp3.ll b/test/CodeGen/AArch64/fp-dp3.ll
index 39db9be15771..590557f1e8ed 100644
--- a/test/CodeGen/AArch64/fp-dp3.ll
+++ b/test/CodeGen/AArch64/fp-dp3.ll
@@ -1,102 +1,137 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -fp-contract=fast | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s -check-prefix=CHECK-NOFAST
declare float @llvm.fma.f32(float, float, float)
declare double @llvm.fma.f64(double, double, double)
define float @test_fmadd(float %a, float %b, float %c) {
-; CHECK: test_fmadd:
+; CHECK-LABEL: test_fmadd:
+; CHECK-NOFAST-LABEL: test_fmadd:
%val = call float @llvm.fma.f32(float %a, float %b, float %c)
; CHECK: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %val
}
define float @test_fmsub(float %a, float %b, float %c) {
-; CHECK: test_fmsub:
+; CHECK-LABEL: test_fmsub:
+; CHECK-NOFAST-LABEL: test_fmsub:
%nega = fsub float -0.0, %a
%val = call float @llvm.fma.f32(float %nega, float %b, float %c)
; CHECK: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %val
}
define float @test_fnmadd(float %a, float %b, float %c) {
-; CHECK: test_fnmadd:
+; CHECK-LABEL: test_fnmadd:
+; CHECK-NOFAST-LABEL: test_fnmadd:
%negc = fsub float -0.0, %c
%val = call float @llvm.fma.f32(float %a, float %b, float %negc)
; CHECK: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %val
}
define float @test_fnmsub(float %a, float %b, float %c) {
-; CHECK: test_fnmsub:
+; CHECK-LABEL: test_fnmsub:
+; CHECK-NOFAST-LABEL: test_fnmsub:
%nega = fsub float -0.0, %a
%negc = fsub float -0.0, %c
%val = call float @llvm.fma.f32(float %nega, float %b, float %negc)
; CHECK: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %val
}
define double @testd_fmadd(double %a, double %b, double %c) {
-; CHECK: testd_fmadd:
+; CHECK-LABEL: testd_fmadd:
+; CHECK-NOFAST-LABEL: testd_fmadd:
%val = call double @llvm.fma.f64(double %a, double %b, double %c)
; CHECK: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NOFAST: fmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
ret double %val
}
define double @testd_fmsub(double %a, double %b, double %c) {
-; CHECK: testd_fmsub:
+; CHECK-LABEL: testd_fmsub:
+; CHECK-NOFAST-LABEL: testd_fmsub:
%nega = fsub double -0.0, %a
%val = call double @llvm.fma.f64(double %nega, double %b, double %c)
; CHECK: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NOFAST: fmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
ret double %val
}
define double @testd_fnmadd(double %a, double %b, double %c) {
-; CHECK: testd_fnmadd:
+; CHECK-LABEL: testd_fnmadd:
+; CHECK-NOFAST-LABEL: testd_fnmadd:
%negc = fsub double -0.0, %c
%val = call double @llvm.fma.f64(double %a, double %b, double %negc)
; CHECK: fnmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NOFAST: fnmadd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
ret double %val
}
define double @testd_fnmsub(double %a, double %b, double %c) {
-; CHECK: testd_fnmsub:
+; CHECK-LABEL: testd_fnmsub:
+; CHECK-NOFAST-LABEL: testd_fnmsub:
%nega = fsub double -0.0, %a
%negc = fsub double -0.0, %c
%val = call double @llvm.fma.f64(double %nega, double %b, double %negc)
; CHECK: fnmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-NOFAST: fnmsub {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
ret double %val
}
define float @test_fmadd_unfused(float %a, float %b, float %c) {
-; CHECK: test_fmadd_unfused:
+; CHECK-LABEL: test_fmadd_unfused:
+; CHECK-NOFAST-LABEL: test_fmadd_unfused:
%prod = fmul float %b, %c
%sum = fadd float %a, %prod
; CHECK: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-NOT: fmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %sum
}
define float @test_fmsub_unfused(float %a, float %b, float %c) {
-; CHECK: test_fmsub_unfused:
+; CHECK-LABEL: test_fmsub_unfused:
+; CHECK-NOFAST-LABEL: test_fmsub_unfused:
%prod = fmul float %b, %c
%diff = fsub float %a, %prod
; CHECK: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-NOT: fmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %diff
}
define float @test_fnmadd_unfused(float %a, float %b, float %c) {
-; CHECK: test_fnmadd_unfused:
+; CHECK-LABEL: test_fnmadd_unfused:
+; CHECK-NOFAST-LABEL: test_fnmadd_unfused:
%nega = fsub float -0.0, %a
%prod = fmul float %b, %c
%sum = fadd float %nega, %prod
; CHECK: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-NOT: fnmadd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
ret float %sum
}
define float @test_fnmsub_unfused(float %a, float %b, float %c) {
-; CHECK: test_fnmsub_unfused:
+; CHECK-LABEL: test_fnmsub_unfused:
+; CHECK-NOFAST-LABEL: test_fnmsub_unfused:
%nega = fsub float -0.0, %a
%prod = fmul float %b, %c
%diff = fsub float %nega, %prod
; CHECK: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-NOT: fnmsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-DAG: fmul {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-DAG: fneg {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST-DAG: fsub {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+; CHECK-NOFAST: ret
ret float %diff
}
diff --git a/test/CodeGen/AArch64/fp128-folding.ll b/test/CodeGen/AArch64/fp128-folding.ll
index b5bdcf4f37b4..b1c560d2b648 100644
--- a/test/CodeGen/AArch64/fp128-folding.ll
+++ b/test/CodeGen/AArch64/fp128-folding.ll
@@ -5,7 +5,7 @@ declare void @bar(i8*, i8*, i32*)
; which is not supported.
define fp128 @test_folding() {
-; CHECK: test_folding:
+; CHECK-LABEL: test_folding:
%l = alloca i32
store i32 42, i32* %l
%val = load i32* %l
@@ -14,4 +14,4 @@ define fp128 @test_folding() {
; successfully.
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI
ret fp128 %fpval
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/fp128.ll b/test/CodeGen/AArch64/fp128.ll
index 258d34b8f81f..c312bb1917ab 100644
--- a/test/CodeGen/AArch64/fp128.ll
+++ b/test/CodeGen/AArch64/fp128.ll
@@ -4,7 +4,7 @@
@rhs = global fp128 zeroinitializer
define fp128 @test_add() {
-; CHECK: test_add:
+; CHECK-LABEL: test_add:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -17,7 +17,7 @@ define fp128 @test_add() {
}
define fp128 @test_sub() {
-; CHECK: test_sub:
+; CHECK-LABEL: test_sub:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -30,7 +30,7 @@ define fp128 @test_sub() {
}
define fp128 @test_mul() {
-; CHECK: test_mul:
+; CHECK-LABEL: test_mul:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -43,7 +43,7 @@ define fp128 @test_mul() {
}
define fp128 @test_div() {
-; CHECK: test_div:
+; CHECK-LABEL: test_div:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -59,7 +59,7 @@ define fp128 @test_div() {
@var64 = global i64 0
define void @test_fptosi() {
-; CHECK: test_fptosi:
+; CHECK-LABEL: test_fptosi:
%val = load fp128* @lhs
%val32 = fptosi fp128 %val to i32
@@ -74,7 +74,7 @@ define void @test_fptosi() {
}
define void @test_fptoui() {
-; CHECK: test_fptoui:
+; CHECK-LABEL: test_fptoui:
%val = load fp128* @lhs
%val32 = fptoui fp128 %val to i32
@@ -89,7 +89,7 @@ define void @test_fptoui() {
}
define void @test_sitofp() {
-; CHECK: test_sitofp:
+; CHECK-LABEL: test_sitofp:
%src32 = load i32* @var32
%val32 = sitofp i32 %src32 to fp128
@@ -105,7 +105,7 @@ define void @test_sitofp() {
}
define void @test_uitofp() {
-; CHECK: test_uitofp:
+; CHECK-LABEL: test_uitofp:
%src32 = load i32* @var32
%val32 = uitofp i32 %src32 to fp128
@@ -121,7 +121,7 @@ define void @test_uitofp() {
}
define i1 @test_setcc1() {
-; CHECK: test_setcc1:
+; CHECK-LABEL: test_setcc1:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -140,7 +140,7 @@ define i1 @test_setcc1() {
}
define i1 @test_setcc2() {
-; CHECK: test_setcc2:
+; CHECK-LABEL: test_setcc2:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -150,14 +150,14 @@ define i1 @test_setcc2() {
; Technically, everything after the call to __letf2 is redundant, but we'll let
; LLVM have its fun for now.
%val = fcmp ugt fp128 %lhs, %rhs
-; CHECK: bl __unordtf2
-; CHECK: mov x[[UNORDERED:[0-9]+]], x0
-
; CHECK: bl __gttf2
; CHECK: cmp w0, #0
; CHECK: csinc [[GT:w[0-9]+]], wzr, wzr, le
-; CHECK: cmp w[[UNORDERED]], #0
+
+; CHECK: bl __unordtf2
+; CHECK: cmp w0, #0
; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
+
; CHECK: orr w0, [[UNORDERED]], [[GT]]
ret i1 %val
@@ -165,7 +165,7 @@ define i1 @test_setcc2() {
}
define i32 @test_br_cc() {
-; CHECK: test_br_cc:
+; CHECK-LABEL: test_br_cc:
%lhs = load fp128* @lhs
%rhs = load fp128* @rhs
@@ -174,15 +174,14 @@ define i32 @test_br_cc() {
; olt == !uge, which LLVM unfortunately "optimizes" this to.
%cond = fcmp olt fp128 %lhs, %rhs
-; CHECK: bl __unordtf2
-; CHECK: mov x[[UNORDERED:[0-9]+]], x0
-
; CHECK: bl __getf2
; CHECK: cmp w0, #0
-
; CHECK: csinc [[OGE:w[0-9]+]], wzr, wzr, lt
-; CHECK: cmp w[[UNORDERED]], #0
+
+; CHECK: bl __unordtf2
+; CHECK: cmp w0, #0
; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
+
; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
br i1 %cond, label %iftrue, label %iffalse
@@ -202,7 +201,7 @@ iffalse:
}
define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
-; CHECK: test_select:
+; CHECK-LABEL: test_select:
%val = select i1 %cond, fp128 %lhs, fp128 %rhs
store fp128 %val, fp128* @lhs
@@ -222,7 +221,7 @@ define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
@vardouble = global double 0.0
define void @test_round() {
-; CHECK: test_round:
+; CHECK-LABEL: test_round:
%val = load fp128* @lhs
@@ -240,7 +239,7 @@ define void @test_round() {
}
define void @test_extend() {
-; CHECK: test_extend:
+; CHECK-LABEL: test_extend:
%val = load fp128* @lhs
@@ -265,7 +264,7 @@ define fp128 @test_neg(fp128 %in) {
; Make sure the weird hex constant below *is* -0.0
; CHECK-NEXT: fp128 -0
-; CHECK: test_neg:
+; CHECK-LABEL: test_neg:
; Could in principle be optimized to fneg which we can't select, this makes
; sure that doesn't happen.
diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll
index fd28aeef9291..b8f716959449 100644
--- a/test/CodeGen/AArch64/fpimm.ll
+++ b/test/CodeGen/AArch64/fpimm.ll
@@ -4,31 +4,33 @@
@varf64 = global double 0.0
define void @check_float() {
-; CHECK: check_float:
+; CHECK-LABEL: check_float:
%val = load float* @varf32
%newval1 = fadd float %val, 8.5
store volatile float %newval1, float* @varf32
-; CHECK: fmov {{s[0-9]+}}, #8.5
+; CHECK-DAG: fmov [[EIGHT5:s[0-9]+]], #8.5
%newval2 = fadd float %val, 128.0
store volatile float %newval2, float* @varf32
-; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI0_0
+; CHECK-DAG: ldr [[HARD:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI0_0
+; CHECK: ret
ret void
}
define void @check_double() {
-; CHECK: check_double:
+; CHECK-LABEL: check_double:
%val = load double* @varf64
%newval1 = fadd double %val, 8.5
store volatile double %newval1, double* @varf64
-; CHECK: fmov {{d[0-9]+}}, #8.5
+; CHECK-DAG: fmov {{d[0-9]+}}, #8.5
%newval2 = fadd double %val, 128.0
store volatile double %newval2, double* @varf64
-; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI1_0
+; CHECK-DAG: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI1_0
+; CHECK: ret
ret void
}
diff --git a/test/CodeGen/AArch64/frameaddr.ll b/test/CodeGen/AArch64/frameaddr.ll
new file mode 100644
index 000000000000..182704bd6541
--- /dev/null
+++ b/test/CodeGen/AArch64/frameaddr.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+
+define i8* @t() nounwind {
+entry:
+; CHECK-LABEL: t:
+; CHECK: mov x0, x29
+ %0 = call i8* @llvm.frameaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @t2() nounwind {
+entry:
+; CHECK-LABEL: t2:
+; CHECK: ldr x[[reg:[0-9]+]], [x29]
+; CHECK: ldr x[[reg]], [x[[reg]]]
+ %0 = call i8* @llvm.frameaddress(i32 2)
+ ret i8* %0
+}
+
+declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/func-argpassing.ll b/test/CodeGen/AArch64/func-argpassing.ll
index 78fde6a3c33a..430d77f9e932 100644
--- a/test/CodeGen/AArch64/func-argpassing.ll
+++ b/test/CodeGen/AArch64/func-argpassing.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
%myStruct = type { i64 , i8, i32 }
@@ -11,7 +12,7 @@
@varstruct = global %myStruct zeroinitializer
define void @take_i8s(i8 %val1, i8 %val2) {
-; CHECK: take_i8s:
+; CHECK-LABEL: take_i8s:
store i8 %val2, i8* @var8
; Not using w1 may be technically allowed, but it would indicate a
; problem in itself.
@@ -20,9 +21,10 @@ define void @take_i8s(i8 %val1, i8 %val2) {
}
define void @add_floats(float %val1, float %val2) {
-; CHECK: add_floats:
+; CHECK-LABEL: add_floats:
%newval = fadd float %val1, %val2
; CHECK: fadd [[ADDRES:s[0-9]+]], s0, s1
+; CHECK-NOFP-NOT: fadd
store float %newval, float* @varfloat
; CHECK: str [[ADDRES]], [{{x[0-9]+}}, #:lo12:varfloat]
ret void
@@ -31,19 +33,19 @@ define void @add_floats(float %val1, float %val2) {
; byval pointers should be allocated to the stack and copied as if
; with memcpy.
define void @take_struct(%myStruct* byval %structval) {
-; CHECK: take_struct:
+; CHECK-LABEL: take_struct:
%addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
%addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
- %val0 = load i32* %addr0
+ %val0 = load volatile i32* %addr0
; Some weird move means x0 is used for one access
; CHECK: ldr [[REG32:w[0-9]+]], [{{x[0-9]+|sp}}, #12]
- store i32 %val0, i32* @var32
+ store volatile i32 %val0, i32* @var32
; CHECK: str [[REG32]], [{{x[0-9]+}}, #:lo12:var32]
- %val1 = load i64* %addr1
+ %val1 = load volatile i64* %addr1
; CHECK: ldr [[REG64:x[0-9]+]], [{{x[0-9]+|sp}}]
- store i64 %val1, i64* @var64
+ store volatile i64 %val1, i64* @var64
; CHECK: str [[REG64]], [{{x[0-9]+}}, #:lo12:var64]
ret void
@@ -51,19 +53,19 @@ define void @take_struct(%myStruct* byval %structval) {
; %structval should be at sp + 16
define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %structval) {
-; CHECK: check_byval_align:
+; CHECK-LABEL: check_byval_align:
%addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
%addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
- %val0 = load i32* %addr0
+ %val0 = load volatile i32* %addr0
; Some weird move means x0 is used for one access
; CHECK: add x[[STRUCTVAL_ADDR:[0-9]+]], sp, #16
; CHECK: ldr [[REG32:w[0-9]+]], [x[[STRUCTVAL_ADDR]], #12]
store i32 %val0, i32* @var32
; CHECK: str [[REG32]], [{{x[0-9]+}}, #:lo12:var32]
- %val1 = load i64* %addr1
+ %val1 = load volatile i64* %addr1
; CHECK: ldr [[REG64:x[0-9]+]], [sp, #16]
store i64 %val1, i64* @var64
; CHECK: str [[REG64]], [{{x[0-9]+}}, #:lo12:var64]
@@ -72,7 +74,7 @@ define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %st
}
define i32 @return_int() {
-; CHECK: return_int:
+; CHECK-LABEL: return_int:
%val = load i32* @var32
ret i32 %val
; CHECK: ldr w0, [{{x[0-9]+}}, #:lo12:var32]
@@ -81,16 +83,17 @@ define i32 @return_int() {
}
define double @return_double() {
-; CHECK: return_double:
+; CHECK-LABEL: return_double:
ret double 3.14
; CHECK: ldr d0, [{{x[0-9]+}}, #:lo12:.LCPI
+; CHECK-NOFP-NOT: ldr d0,
}
; This is the kind of IR clang will produce for returning a struct
; small enough to go into registers. Not all that pretty, but it
; works.
define [2 x i64] @return_struct() {
-; CHECK: return_struct:
+; CHECK-LABEL: return_struct:
%addr = bitcast %myStruct* @varstruct to [2 x i64]*
%val = load [2 x i64]* %addr
ret [2 x i64] %val
@@ -107,7 +110,7 @@ define [2 x i64] @return_struct() {
; structs larger than 16 bytes, but C semantics can still be provided
; if LLVM does it to %myStruct too. So this is the simplest check
define void @return_large_struct(%myStruct* sret %retval) {
-; CHECK: return_large_struct:
+; CHECK-LABEL: return_large_struct:
%addr0 = getelementptr %myStruct* %retval, i64 0, i32 0
%addr1 = getelementptr %myStruct* %retval, i64 0, i32 1
%addr2 = getelementptr %myStruct* %retval, i64 0, i32 2
@@ -128,19 +131,20 @@ define void @return_large_struct(%myStruct* sret %retval) {
define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45,
i32* %var6, %myStruct* byval %struct, i32* byval %stacked,
double %notstacked) {
-; CHECK: struct_on_stack:
+; CHECK-LABEL: struct_on_stack:
%addr = getelementptr %myStruct* %struct, i64 0, i32 0
- %val64 = load i64* %addr
- store i64 %val64, i64* @var64
+ %val64 = load volatile i64* %addr
+ store volatile i64 %val64, i64* @var64
; Currently nothing on local stack, so struct should be at sp
; CHECK: ldr [[VAL64:x[0-9]+]], [sp]
; CHECK: str [[VAL64]], [{{x[0-9]+}}, #:lo12:var64]
- store double %notstacked, double* @vardouble
+ store volatile double %notstacked, double* @vardouble
; CHECK-NOT: ldr d0
; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble
+; CHECK-NOFP-NOT: str d0,
- %retval = load i32* %stacked
+ %retval = load volatile i32* %stacked
ret i32 %retval
; CHECK: ldr w0, [sp, #16]
}
@@ -148,7 +152,7 @@ define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var
define void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
float %var4, float %var5, float %var6, float %var7,
float %var8) {
-; CHECK: stacked_fpu:
+; CHECK-LABEL: stacked_fpu:
store float %var8, float* @varfloat
; Beware as above: the offset would be different on big-endian
; machines if the first ldr were changed to use s-registers.
@@ -176,17 +180,17 @@ define void @check_i128_stackalign(i32 %val0, i32 %val1, i32 %val2, i32 %val3,
; CHECK: check_i128_stackalign
store i128 %stack2, i128* @var128
; Nothing local on stack in current codegen, so first stack is 16 away
-; CHECK: ldr {{x[0-9]+}}, [sp, #16]
+; CHECK: add x[[REG:[0-9]+]], sp, #16
+; CHECK: ldr {{x[0-9]+}}, [x[[REG]], #8]
; Important point is that we address sp+24 for second dword
-; CHECK: add [[REG:x[0-9]+]], sp, #16
-; CHECK: ldr {{x[0-9]+}}, {{\[}}[[REG]], #8]
+; CHECK: ldr {{x[0-9]+}}, [sp, #16]
ret void
}
declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
define i32 @test_extern() {
-; CHECK: test_extern:
+; CHECK-LABEL: test_extern:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* undef, i32 undef, i32 4, i1 0)
; CHECK: bl memcpy
ret i32 0
diff --git a/test/CodeGen/AArch64/func-calls.ll b/test/CodeGen/AArch64/func-calls.ll
index 13b689c40886..ac188bb3bb57 100644
--- a/test/CodeGen/AArch64/func-calls.ll
+++ b/test/CodeGen/AArch64/func-calls.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
%myStruct = type { i64 , i8, i32 }
@@ -17,20 +18,22 @@ declare void @take_i8s(i8 %val1, i8 %val2)
declare void @take_floats(float %val1, float %val2)
define void @simple_args() {
-; CHECK: simple_args:
+; CHECK-LABEL: simple_args:
%char1 = load i8* @var8
%char2 = load i8* @var8_2
call void @take_i8s(i8 %char1, i8 %char2)
-; CHECK: ldrb w0, [{{x[0-9]+}}, #:lo12:var8]
-; CHECK: ldrb w1, [{{x[0-9]+}}, #:lo12:var8_2]
+; CHECK-DAG: ldrb w0, [{{x[0-9]+}}, #:lo12:var8]
+; CHECK-DAG: ldrb w1, [{{x[0-9]+}}, #:lo12:var8_2]
; CHECK: bl take_i8s
%float1 = load float* @varfloat
%float2 = load float* @varfloat_2
call void @take_floats(float %float1, float %float2)
-; CHECK: ldr s1, [{{x[0-9]+}}, #:lo12:varfloat_2]
-; CHECK: ldr s0, [{{x[0-9]+}}, #:lo12:varfloat]
+; CHECK-DAG: ldr s1, [{{x[0-9]+}}, #:lo12:varfloat_2]
+; CHECK-DAG: ldr s0, [{{x[0-9]+}}, #:lo12:varfloat]
; CHECK: bl take_floats
+; CHECK-NOFP-NOT: ldr s1,
+; CHECK-NOFP-NOT: ldr s0,
ret void
}
@@ -41,7 +44,7 @@ declare [2 x i64] @return_smallstruct()
declare void @return_large_struct(%myStruct* sret %retval)
define void @simple_rets() {
-; CHECK: simple_rets:
+; CHECK-LABEL: simple_rets:
%int = call i32 @return_int()
store i32 %int, i32* @var32
@@ -52,6 +55,7 @@ define void @simple_rets() {
store double %dbl, double* @vardouble
; CHECK: bl return_double
; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
+; CHECK-NOFP-NOT: str d0,
%arr = call [2 x i64] @return_smallstruct()
store [2 x i64] %arr, [2 x i64]* @varsmallstruct
@@ -75,17 +79,19 @@ declare void @stacked_fpu(float %var0, double %var1, float %var2, float %var3,
float %var8)
define void @check_stack_args() {
+; CHECK-LABEL: check_stack_args:
call i32 @struct_on_stack(i8 0, i16 12, i32 42, i64 99, i128 1,
i32* @var32, %myStruct* byval @varstruct,
i32 999, double 1.0)
; Want to check that the final double is passed in registers and
; that varstruct is passed on the stack. Rather dependent on how a
; memcpy gets created, but the following works for now.
-; CHECK: mov x0, sp
-; CHECK: str {{w[0-9]+}}, [x0]
-; CHECK: str {{w[0-9]+}}, [x0, #12]
-; CHECK: fmov d0,
+; CHECK: mov x[[SPREG:[0-9]+]], sp
+; CHECK-DAG: str {{w[0-9]+}}, [x[[SPREG]]]
+; CHECK-DAG: str {{w[0-9]+}}, [x[[SPREG]], #12]
+; CHECK-DAG: fmov d0,
; CHECK: bl struct_on_stack
+; CHECK-NOFP-NOT: fmov
call void @stacked_fpu(float -1.0, double 1.0, float 4.0, float 2.0,
float -2.0, float -8.0, float 16.0, float 1.0,
@@ -106,7 +112,7 @@ declare void @check_i128_regalign(i32 %val0, i128 %val1)
define void @check_i128_align() {
-; CHECK: check_i128_align:
+; CHECK-LABEL: check_i128_align:
%val = load i128* @var128
call void @check_i128_stackalign(i32 0, i32 1, i32 2, i32 3,
i32 4, i32 5, i32 6, i32 7,
@@ -130,7 +136,7 @@ define void @check_i128_align() {
@fptr = global void()* null
define void @check_indirect_call() {
-; CHECK: check_indirect_call:
+; CHECK-LABEL: check_indirect_call:
%func = load void()** @fptr
call void %func()
; CHECK: ldr [[FPTR:x[0-9]+]], [{{x[0-9]+}}, #:lo12:fptr]
diff --git a/test/CodeGen/AArch64/global-alignment.ll b/test/CodeGen/AArch64/global-alignment.ll
index 8ed6e551cdeb..56e5cba519c1 100644
--- a/test/CodeGen/AArch64/global-alignment.ll
+++ b/test/CodeGen/AArch64/global-alignment.ll
@@ -5,7 +5,7 @@
@var32_align64 = global [3 x i32] zeroinitializer, align 8
define i64 @test_align32() {
-; CHECK: test_align32:
+; CHECK-LABEL: test_align32:
%addr = bitcast [3 x i32]* @var32 to i64*
; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
@@ -19,7 +19,7 @@ define i64 @test_align32() {
}
define i64 @test_align64() {
-; CHECK: test_align64:
+; CHECK-LABEL: test_align64:
%addr = bitcast [3 x i64]* @var64 to i64*
; However, var64 *is* properly aligned and emitting an adrp/add/ldr would be
@@ -33,7 +33,7 @@ define i64 @test_align64() {
}
define i64 @test_var32_align64() {
-; CHECK: test_var32_align64:
+; CHECK-LABEL: test_var32_align64:
%addr = bitcast [3 x i32]* @var32_align64 to i64*
; Since @var32 is only guaranteed to be aligned to 32-bits, it's invalid to
@@ -49,7 +49,7 @@ define i64 @test_var32_align64() {
@yet_another_var = external global {i32, i32}
define i64 @test_yet_another_var() {
-; CHECK: test_yet_another_var:
+; CHECK-LABEL: test_yet_another_var:
; @yet_another_var has a preferred alignment of 8, but that's not enough if
; we're going to be linking against other things. Its ABI alignment is only 4
@@ -62,7 +62,7 @@ define i64 @test_yet_another_var() {
}
define i64()* @test_functions() {
-; CHECK: test_functions:
+; CHECK-LABEL: test_functions:
ret i64()* @test_yet_another_var
; CHECK: adrp [[HIBITS:x[0-9]+]], test_yet_another_var
; CHECK: add x0, [[HIBITS]], #:lo12:test_yet_another_var
diff --git a/test/CodeGen/AArch64/got-abuse.ll b/test/CodeGen/AArch64/got-abuse.ll
index c474e5845a64..8b06031c88f7 100644
--- a/test/CodeGen/AArch64/got-abuse.ll
+++ b/test/CodeGen/AArch64/got-abuse.ll
@@ -13,7 +13,7 @@ declare void @consume(i32)
declare void @func()
define void @foo() nounwind {
-; CHECK: foo:
+; CHECK-LABEL: foo:
entry:
call void @consume(i32 ptrtoint (void ()* @func to i32))
; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:func
diff --git a/test/CodeGen/AArch64/i128-align.ll b/test/CodeGen/AArch64/i128-align.ll
index f019ea0a6706..21ca7eda66bb 100644
--- a/test/CodeGen/AArch64/i128-align.ll
+++ b/test/CodeGen/AArch64/i128-align.ll
@@ -5,7 +5,7 @@
@var = global %struct zeroinitializer
define i64 @check_size() {
-; CHECK: check_size:
+; CHECK-LABEL: check_size:
%starti = ptrtoint %struct* @var to i64
%endp = getelementptr %struct* @var, i64 1
@@ -17,7 +17,7 @@ define i64 @check_size() {
}
define i64 @check_field() {
-; CHECK: check_field:
+; CHECK-LABEL: check_field:
%starti = ptrtoint %struct* @var to i64
%endp = getelementptr %struct* @var, i64 0, i32 1
@@ -26,4 +26,4 @@ define i64 @check_field() {
%diff = sub i64 %endi, %starti
ret i64 %diff
; CHECK: movz x0, #16
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/illegal-float-ops.ll b/test/CodeGen/AArch64/illegal-float-ops.ll
index 446151b8ffac..03c6d8d10087 100644
--- a/test/CodeGen/AArch64/illegal-float-ops.ll
+++ b/test/CodeGen/AArch64/illegal-float-ops.ll
@@ -9,7 +9,7 @@ declare double @llvm.cos.f64(double)
declare fp128 @llvm.cos.f128(fp128)
define void @test_cos(float %float, double %double, fp128 %fp128) {
-; CHECK: test_cos:
+; CHECK-LABEL: test_cos:
%cosfloat = call float @llvm.cos.f32(float %float)
store float %cosfloat, float* @varfloat
@@ -31,7 +31,7 @@ declare double @llvm.exp.f64(double)
declare fp128 @llvm.exp.f128(fp128)
define void @test_exp(float %float, double %double, fp128 %fp128) {
-; CHECK: test_exp:
+; CHECK-LABEL: test_exp:
%expfloat = call float @llvm.exp.f32(float %float)
store float %expfloat, float* @varfloat
@@ -53,7 +53,7 @@ declare double @llvm.exp2.f64(double)
declare fp128 @llvm.exp2.f128(fp128)
define void @test_exp2(float %float, double %double, fp128 %fp128) {
-; CHECK: test_exp2:
+; CHECK-LABEL: test_exp2:
%exp2float = call float @llvm.exp2.f32(float %float)
store float %exp2float, float* @varfloat
@@ -75,7 +75,7 @@ declare double @llvm.log.f64(double)
declare fp128 @llvm.log.f128(fp128)
define void @test_log(float %float, double %double, fp128 %fp128) {
-; CHECK: test_log:
+; CHECK-LABEL: test_log:
%logfloat = call float @llvm.log.f32(float %float)
store float %logfloat, float* @varfloat
@@ -97,7 +97,7 @@ declare double @llvm.log2.f64(double)
declare fp128 @llvm.log2.f128(fp128)
define void @test_log2(float %float, double %double, fp128 %fp128) {
-; CHECK: test_log2:
+; CHECK-LABEL: test_log2:
%log2float = call float @llvm.log2.f32(float %float)
store float %log2float, float* @varfloat
@@ -119,7 +119,7 @@ declare double @llvm.log10.f64(double)
declare fp128 @llvm.log10.f128(fp128)
define void @test_log10(float %float, double %double, fp128 %fp128) {
-; CHECK: test_log10:
+; CHECK-LABEL: test_log10:
%log10float = call float @llvm.log10.f32(float %float)
store float %log10float, float* @varfloat
@@ -141,7 +141,7 @@ declare double @llvm.sin.f64(double)
declare fp128 @llvm.sin.f128(fp128)
define void @test_sin(float %float, double %double, fp128 %fp128) {
-; CHECK: test_sin:
+; CHECK-LABEL: test_sin:
%sinfloat = call float @llvm.sin.f32(float %float)
store float %sinfloat, float* @varfloat
@@ -163,7 +163,7 @@ declare double @llvm.pow.f64(double, double)
declare fp128 @llvm.pow.f128(fp128, fp128)
define void @test_pow(float %float, double %double, fp128 %fp128) {
-; CHECK: test_pow:
+; CHECK-LABEL: test_pow:
%powfloat = call float @llvm.pow.f32(float %float, float %float)
store float %powfloat, float* @varfloat
@@ -185,7 +185,7 @@ declare double @llvm.powi.f64(double, i32)
declare fp128 @llvm.powi.f128(fp128, i32)
define void @test_powi(float %float, double %double, i32 %exponent, fp128 %fp128) {
-; CHECK: test_powi:
+; CHECK-LABEL: test_powi:
%powifloat = call float @llvm.powi.f32(float %float, i32 %exponent)
store float %powifloat, float* @varfloat
@@ -203,7 +203,7 @@ define void @test_powi(float %float, double %double, i32 %exponent, fp128 %fp128
}
define void @test_frem(float %float, double %double, fp128 %fp128) {
-; CHECK: test_frem:
+; CHECK-LABEL: test_frem:
%fremfloat = frem float %float, %float
store float %fremfloat, float* @varfloat
@@ -219,3 +219,29 @@ define void @test_frem(float %float, double %double, fp128 %fp128) {
ret void
}
+
+declare fp128 @llvm.fma.f128(fp128, fp128, fp128)
+
+define void @test_fma(fp128 %fp128) {
+; CHECK-LABEL: test_fma:
+
+ %fmafp128 = call fp128 @llvm.fma.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
+ store fp128 %fmafp128, fp128* @varfp128
+; CHECK: bl fmal
+
+ ret void
+}
+
+declare fp128 @llvm.fmuladd.f128(fp128, fp128, fp128)
+
+define void @test_fmuladd(fp128 %fp128) {
+; CHECK-LABEL: test_fmuladd:
+
+ %fmuladdfp128 = call fp128 @llvm.fmuladd.f128(fp128 %fp128, fp128 %fp128, fp128 %fp128)
+ store fp128 %fmuladdfp128, fp128* @varfp128
+; CHECK-NOT: bl fmal
+; CHECK: bl __multf3
+; CHECK: bl __addtf3
+
+ ret void
+}
diff --git a/test/CodeGen/AArch64/init-array.ll b/test/CodeGen/AArch64/init-array.ll
index d80be8f3a639..3ff1c1a86ec6 100644
--- a/test/CodeGen/AArch64/init-array.ll
+++ b/test/CodeGen/AArch64/init-array.ll
@@ -6,4 +6,4 @@ define internal void @_GLOBAL__I_a() section ".text.startup" {
@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
-; CHECK: .section .init_array \ No newline at end of file
+; CHECK: .section .init_array
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badI.ll b/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
index c39c57f05822..61bbfc201354 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badI.ll
@@ -4,4 +4,4 @@ define void @foo() {
; Out of range immediate for I.
call void asm sideeffect "add x0, x0, $0", "I"(i32 4096)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badK.ll b/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
index 47c5f98bf009..40746e1528ce 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badK.ll
@@ -4,4 +4,4 @@ define void @foo() {
; 32-bit bitpattern ending in 1101 can't be produced.
call void asm sideeffect "and w0, w0, $0", "K"(i32 13)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll b/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
index 7a5b99e23b3d..2c5338191fde 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badK2.ll
@@ -4,4 +4,4 @@ define void @foo() {
; 32-bit bitpattern ending in 1101 can't be produced.
call void asm sideeffect "and w0, w0, $0", "K"(i64 4294967296)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/inline-asm-constraints-badL.ll b/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
index 4f0039865a35..d82d5a2ee4d0 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints-badL.ll
@@ -4,4 +4,4 @@ define void @foo() {
; 32-bit bitpattern ending in 1101 can't be produced.
call void asm sideeffect "and x0, x0, $0", "L"(i32 13)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/inline-asm-constraints.ll b/test/CodeGen/AArch64/inline-asm-constraints.ll
index c232f3208cfa..18a3b37b41d1 100644
--- a/test/CodeGen/AArch64/inline-asm-constraints.ll
+++ b/test/CodeGen/AArch64/inline-asm-constraints.ll
@@ -1,21 +1,21 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+;RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
define i64 @test_inline_constraint_r(i64 %base, i32 %offset) {
-; CHECK: test_inline_constraint_r:
+; CHECK-LABEL: test_inline_constraint_r:
%val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 %base, i32 %offset)
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw
ret i64 %val
}
define i16 @test_small_reg(i16 %lhs, i16 %rhs) {
-; CHECK: test_small_reg:
+; CHECK-LABEL: test_small_reg:
%val = call i16 asm sideeffect "add $0, $1, $2, sxth", "=r,r,r"(i16 %lhs, i16 %rhs)
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth
ret i16 %val
}
define i64 @test_inline_constraint_r_imm(i64 %base, i32 %offset) {
-; CHECK: test_inline_constraint_r_imm:
+; CHECK-LABEL: test_inline_constraint_r_imm:
%val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 4, i32 12)
; CHECK: movz [[FOUR:x[0-9]+]], #4
; CHECK: movz [[TWELVE:w[0-9]+]], #12
@@ -26,7 +26,7 @@ define i64 @test_inline_constraint_r_imm(i64 %base, i32 %offset) {
; m is permitted to have a base/offset form. We don't do that
; currently though.
define i32 @test_inline_constraint_m(i32 *%ptr) {
-; CHECK: test_inline_constraint_m:
+; CHECK-LABEL: test_inline_constraint_m:
%val = call i32 asm "ldr $0, $1", "=r,m"(i32 *%ptr)
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
ret i32 %val
@@ -36,7 +36,7 @@ define i32 @test_inline_constraint_m(i32 *%ptr) {
; Q should *never* have base/offset form even if given the chance.
define i32 @test_inline_constraint_Q(i32 *%ptr) {
-; CHECK: test_inline_constraint_Q:
+; CHECK-LABEL: test_inline_constraint_Q:
%val = call i32 asm "ldr $0, $1", "=r,Q"(i32* getelementptr([8 x i32]* @arr, i32 0, i32 1))
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}]
ret i32 %val
@@ -44,8 +44,28 @@ define i32 @test_inline_constraint_Q(i32 *%ptr) {
@dump = global fp128 zeroinitializer
+define void @test_inline_constraint_w(<8 x i8> %vec64, <4 x float> %vec128, half %hlf, float %flt, double %dbl, fp128 %quad) {
+; CHECK: test_inline_constraint_w:
+ call <8 x i8> asm sideeffect "add $0.8b, $1.8b, $1.8b", "=w,w"(<8 x i8> %vec64)
+ call <8 x i8> asm sideeffect "fadd $0.4s, $1.4s, $1.4s", "=w,w"(<4 x float> %vec128)
+; CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+
+ ; Arguably semantically dodgy to output "vN", but it's what GCC does
+ ; so purely for compatibility we want vector registers to be output.
+ call float asm sideeffect "fcvt ${0:s}, ${1:h}", "=w,w"(half undef)
+ call float asm sideeffect "fadd $0.2s, $0.2s, $0.2s", "=w,w"(float %flt)
+ call double asm sideeffect "fadd $0.2d, $0.2d, $0.2d", "=w,w"(double %dbl)
+ call fp128 asm sideeffect "fadd $0.2d, $0.2d, $0.2d", "=w,w"(fp128 %quad)
+; CHECK: fcvt {{s[0-9]+}}, {{h[0-9]+}}
+; CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+; CHECK: fadd {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+; CHECK: fadd {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ ret void
+}
+
define void @test_inline_constraint_I() {
-; CHECK: test_inline_constraint_I:
+; CHECK-LABEL: test_inline_constraint_I:
call void asm sideeffect "add x0, x0, $0", "I"(i32 0)
call void asm sideeffect "add x0, x0, $0", "I"(i64 4095)
; CHECK: add x0, x0, #0
@@ -57,7 +77,7 @@ define void @test_inline_constraint_I() {
; Skip J because it's useless
define void @test_inline_constraint_K() {
-; CHECK: test_inline_constraint_K:
+; CHECK-LABEL: test_inline_constraint_K:
call void asm sideeffect "and w0, w0, $0", "K"(i32 2863311530) ; = 0xaaaaaaaa
call void asm sideeffect "and w0, w0, $0", "K"(i32 65535)
; CHECK: and w0, w0, #-1431655766
@@ -67,7 +87,7 @@ define void @test_inline_constraint_K() {
}
define void @test_inline_constraint_L() {
-; CHECK: test_inline_constraint_L:
+; CHECK-LABEL: test_inline_constraint_L:
call void asm sideeffect "and x0, x0, $0", "L"(i64 4294967296) ; = 0xaaaaaaaa
call void asm sideeffect "and x0, x0, $0", "L"(i64 65535)
; CHECK: and x0, x0, #4294967296
@@ -81,7 +101,7 @@ define void @test_inline_constraint_L() {
@var = global i32 0
define void @test_inline_constraint_S() {
-; CHECK: test_inline_constraint_S:
+; CHECK-LABEL: test_inline_constraint_S:
call void asm sideeffect "adrp x0, $0", "S"(i32* @var)
call void asm sideeffect "adrp x0, ${0:A}", "S"(i32* @var)
call void asm sideeffect "add x0, x0, ${0:L}", "S"(i32* @var)
@@ -92,7 +112,7 @@ define void @test_inline_constraint_S() {
}
define i32 @test_inline_constraint_S_label(i1 %in) {
-; CHECK: test_inline_constraint_S_label:
+; CHECK-LABEL: test_inline_constraint_S_label:
call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc))
; CHECK: adr x0, .Ltmp{{[0-9]+}}
br i1 %in, label %loc, label %loc2
@@ -103,15 +123,15 @@ loc2:
}
define void @test_inline_constraint_Y() {
-; CHECK: test_inline_constraint_Y:
+; CHECK-LABEL: test_inline_constraint_Y:
call void asm sideeffect "fcmp s0, $0", "Y"(float 0.0)
; CHECK: fcmp s0, #0.0
ret void
}
define void @test_inline_constraint_Z() {
-; CHECK: test_inline_constraint_Z:
+; CHECK-LABEL: test_inline_constraint_Z:
call void asm sideeffect "cmp w0, $0", "Z"(i32 0)
; CHECK: cmp w0, #0
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/inline-asm-modifiers.ll b/test/CodeGen/AArch64/inline-asm-modifiers.ll
index 3b55945561eb..b7f4d3c57ba3 100644
--- a/test/CodeGen/AArch64/inline-asm-modifiers.ll
+++ b/test/CodeGen/AArch64/inline-asm-modifiers.ll
@@ -1,5 +1,4 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-none-linux-gnu -relocation-model=pic -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-ELF %s
@var_simple = hidden global i32 0
@var_got = global i32 0
@@ -9,7 +8,7 @@
@var_tlsle = thread_local(localexec) global i32 0
define void @test_inline_modifier_L() nounwind {
-; CHECK: test_inline_modifier_L:
+; CHECK-LABEL: test_inline_modifier_L:
call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_simple)
call void asm sideeffect "ldr x0, [x0, ${0:L}]", "S,~{x0}"(i32* @var_got)
call void asm sideeffect "add x0, x0, ${0:L}", "S,~{x0}"(i32* @var_tlsgd)
@@ -23,31 +22,28 @@ define void @test_inline_modifier_L() nounwind {
; CHECK: ldr x0, [x0, #:gottprel_lo12:var_tlsie]
; CHECK: add x0, x0, #:tprel_lo12:var_tlsle
-; CHECK-ELF: R_AARCH64_ADD_ABS_LO12_NC var_simple
-; CHECK-ELF: R_AARCH64_LD64_GOT_LO12_NC var_got
-; CHECK-ELF: R_AARCH64_TLSDESC_ADD_LO12_NC var_tlsgd
-; CHECK-ELF: R_AARCH64_TLSLD_ADD_DTPREL_LO12 var_tlsld
-; CHECK-ELF: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC var_tlsie
-; CHECK-ELF: R_AARCH64_TLSLE_ADD_TPREL_LO12 var_tlsle
+ call void asm sideeffect "add x0, x0, ${0:L}", "Si,~{x0}"(i32 64)
+ call void asm sideeffect "ldr x0, [x0, ${0:L}]", "Si,~{x0}"(i32 64)
+; CHECK: add x0, x0, #64
+; CHECK: ldr x0, [x0, #64]
ret void
}
define void @test_inline_modifier_G() nounwind {
-; CHECK: test_inline_modifier_G:
+; CHECK-LABEL: test_inline_modifier_G:
call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsld)
call void asm sideeffect "add x0, x0, ${0:G}, lsl #12", "S,~{x0}"(i32* @var_tlsle)
; CHECK: add x0, x0, #:dtprel_hi12:var_tlsld, lsl #12
; CHECK: add x0, x0, #:tprel_hi12:var_tlsle, lsl #12
-; CHECK-ELF: R_AARCH64_TLSLD_ADD_DTPREL_HI12 var_tlsld
-; CHECK-ELF: R_AARCH64_TLSLE_ADD_TPREL_HI12 var_tlsle
-
+ call void asm sideeffect "add x0, x0, ${0:G}", "Si,~{x0}"(i32 42)
+; CHECK: add x0, x0, #42
ret void
}
define void @test_inline_modifier_A() nounwind {
-; CHECK: test_inline_modifier_A:
+; CHECK-LABEL: test_inline_modifier_A:
call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_simple)
call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_got)
call void asm sideeffect "adrp x0, ${0:A}", "S,~{x0}"(i32* @var_tlsgd)
@@ -58,16 +54,14 @@ define void @test_inline_modifier_A() nounwind {
; CHECK: adrp x0, :tlsdesc:var_tlsgd
; CHECK: adrp x0, :gottprel:var_tlsie
-; CHECK-ELF: R_AARCH64_ADR_PREL_PG_HI21 var_simple
-; CHECK-ELF: R_AARCH64_ADR_GOT_PAGE var_got
-; CHECK-ELF: R_AARCH64_TLSDESC_ADR_PAGE var_tlsgd
-; CHECK-ELF: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 var_tlsie
+ call void asm sideeffect "adrp x0, ${0:A}", "Si,~{x0}"(i32 40)
+; CHECK: adrp x0, #40
ret void
}
define void @test_inline_modifier_wx(i32 %small, i64 %big) nounwind {
-; CHECK: test_inline_modifier_wx:
+; CHECK-LABEL: test_inline_modifier_wx:
call i32 asm sideeffect "add $0, $0, $0", "=r,0"(i32 %small)
call i32 asm sideeffect "add ${0:w}, ${0:w}, ${0:w}", "=r,0"(i32 %small)
call i32 asm sideeffect "add ${0:x}, ${0:x}, ${0:x}", "=r,0"(i32 %small)
@@ -87,11 +81,17 @@ define void @test_inline_modifier_wx(i32 %small, i64 %big) nounwind {
call i32 asm sideeffect "add ${0:x}, ${1:x}, ${1:x}", "=r,r"(i32 0)
; CHECK: add {{w[0-9]+}}, wzr, wzr
; CHECK: add {{x[0-9]+}}, xzr, xzr
+
+ call i32 asm sideeffect "add ${0:w}, ${0:w}, ${1:w}", "=r,Ir,0"(i32 123, i32 %small)
+ call i64 asm sideeffect "add ${0:x}, ${0:x}, ${1:x}", "=r,Ir,0"(i32 456, i64 %big)
+; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #123
+; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #456
+
ret void
}
define void @test_inline_modifier_bhsdq() nounwind {
-; CHECK: test_inline_modifier_bhsdq:
+; CHECK-LABEL: test_inline_modifier_bhsdq:
call float asm sideeffect "ldr ${0:b}, [sp]", "=w"()
call float asm sideeffect "ldr ${0:h}, [sp]", "=w"()
call float asm sideeffect "ldr ${0:s}, [sp]", "=w"()
@@ -113,13 +113,35 @@ define void @test_inline_modifier_bhsdq() nounwind {
; CHECK: ldr s0, [sp]
; CHECK: ldr d0, [sp]
; CHECK: ldr q0, [sp]
+
+ call void asm sideeffect "fcmp b0, ${0:b}", "Yw"(float 0.0)
+ call void asm sideeffect "fcmp h0, ${0:h}", "Yw"(float 0.0)
+ call void asm sideeffect "fcmp s0, ${0:s}", "Yw"(float 0.0)
+ call void asm sideeffect "fcmp d0, ${0:d}", "Yw"(float 0.0)
+ call void asm sideeffect "fcmp q0, ${0:q}", "Yw"(float 0.0)
+; CHECK: fcmp b0, #0
+; CHECK: fcmp h0, #0
+; CHECK: fcmp s0, #0
+; CHECK: fcmp d0, #0
+; CHECK: fcmp q0, #0
+
ret void
}
define void @test_inline_modifier_c() nounwind {
-; CHECK: test_inline_modifier_c:
+; CHECK-LABEL: test_inline_modifier_c:
call void asm sideeffect "adr x0, ${0:c}", "i"(i32 3)
; CHECK: adr x0, 3
ret void
-} \ No newline at end of file
+}
+
+define void @test_inline_modifier_a() nounwind {
+; CHECK-LABEL: test_inline_modifier_a:
+ call void asm sideeffect "prfm pldl1keep, ${0:a}", "r"(i32* @var_simple)
+; CHECK: adrp [[VARHI:x[0-9]+]], var_simple
+; CHECK: add x[[VARADDR:[0-9]+]], [[VARHI]], #:lo12:var_simple
+; CHECK: prfm pldl1keep, [x[[VARADDR]]]
+ ret void
+}
+
diff --git a/test/CodeGen/AArch64/jump-table.ll b/test/CodeGen/AArch64/jump-table.ll
index 3c7f5f9ec1b0..4bb094217af3 100644
--- a/test/CodeGen/AArch64/jump-table.ll
+++ b/test/CodeGen/AArch64/jump-table.ll
@@ -1,6 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; RUN: llc -code-model=large -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK-LARGE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -filetype=obj | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-ELF
define i32 @test_jumptable(i32 %in) {
; CHECK: test_jumptable
@@ -48,19 +47,3 @@ lbl4:
; CHECK-NEXT: .xword
; CHECK-NEXT: .xword
; CHECK-NEXT: .xword
-
-; ELF tests:
-
-; First make sure we get a page/lo12 pair in .text to pick up the jump-table
-
-; CHECK-ELF: Relocations [
-; CHECK-ELF: Section ({{[0-9]+}}) .text {
-; CHECK-ELF-NEXT: 0x{{[0-9,A-F]+}} R_AARCH64_ADR_PREL_PG_HI21 .rodata
-; CHECK-ELF-NEXT: 0x{{[0-9,A-F]+}} R_AARCH64_ADD_ABS_LO12_NC .rodata
-; CHECK-ELF: }
-
-; Also check the targets in .rodata are relocated
-; CHECK-ELF: Section ({{[0-9]+}}) .rodata {
-; CHECK-ELF-NEXT: 0x{{[0-9,A-F]+}} R_AARCH64_ABS64 .text
-; CHECK-ELF: }
-; CHECK-ELF: ]
diff --git a/test/CodeGen/AArch64/large-consts.ll b/test/CodeGen/AArch64/large-consts.ll
new file mode 100644
index 000000000000..1b769c6e350d
--- /dev/null
+++ b/test/CodeGen/AArch64/large-consts.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -o - %s -code-model=large -show-mc-encoding | FileCheck %s
+
+; Make sure the shift amount is encoded into the instructions by LLVM because
+; it's not the linker's job to put it there.
+
+define double @foo() {
+; CHECK: movz [[CPADDR:x[0-9]+]], #:abs_g3:.LCPI0_0 // encoding: [A,A,0xe0'A',0xd2'A']
+; CHECK: movk [[CPADDR]], #:abs_g2_nc:.LCPI0_0 // encoding: [A,A,0xc0'A',0xf2'A']
+; CHECK: movk [[CPADDR]], #:abs_g1_nc:.LCPI0_0 // encoding: [A,A,0xa0'A',0xf2'A']
+; CHECK: movk [[CPADDR]], #:abs_g0_nc:.LCPI0_0 // encoding: [A,A,0x80'A',0xf2'A']
+
+ ret double 3.14159
+}
diff --git a/test/CodeGen/AArch64/large-frame.ll b/test/CodeGen/AArch64/large-frame.ll
index 2b2e1295c4f6..fde3036aef4a 100644
--- a/test/CodeGen/AArch64/large-frame.ll
+++ b/test/CodeGen/AArch64/large-frame.ll
@@ -4,17 +4,21 @@ declare void @use_addr(i8*)
@addr = global i8* null
define void @test_bigframe() {
-; CHECK: test_bigframe:
+; CHECK-LABEL: test_bigframe:
+; CHECK: .cfi_startproc
%var1 = alloca i8, i32 20000000
%var2 = alloca i8, i32 16
%var3 = alloca i8, i32 20000000
; CHECK: sub sp, sp, #496
+; CHECK: .cfi_def_cfa sp, 496
; CHECK: str x30, [sp, #488]
; Total adjust is 39999536
; CHECK: movz [[SUBCONST:x[0-9]+]], #22576
; CHECK: movk [[SUBCONST]], #610, lsl #16
; CHECK: sub sp, sp, [[SUBCONST]]
+; CHECK: .cfi_def_cfa sp, 40000032
+; CHECK: .cfi_offset x30, -8
; Total offset is 20000024
; CHECK: movz [[VAR1OFFSET:x[0-9]+]], #11544
@@ -41,11 +45,12 @@ define void @test_bigframe() {
; CHECK: movz [[ADDCONST:x[0-9]+]], #22576
; CHECK: movk [[ADDCONST]], #610, lsl #16
; CHECK: add sp, sp, [[ADDCONST]]
+; CHECK: .cfi_endproc
ret void
}
define void @test_mediumframe() {
-; CHECK: test_mediumframe:
+; CHECK-LABEL: test_mediumframe:
%var1 = alloca i8, i32 1000000
%var2 = alloca i8, i32 16
%var3 = alloca i8, i32 1000000
@@ -88,7 +93,7 @@ define void @test_mediumframe() {
; If temporary registers are allocated for adjustment, they should *not* clobber
; argument registers.
define void @test_tempallocation([8 x i64] %val) nounwind {
-; CHECK: test_tempallocation:
+; CHECK-LABEL: test_tempallocation:
%var = alloca i8, i32 1000000
; CHECK: sub sp, sp,
diff --git a/test/CodeGen/AArch64/ldst-regoffset.ll b/test/CodeGen/AArch64/ldst-regoffset.ll
index 45935129fd7e..db30fd915fb0 100644
--- a/test/CodeGen/AArch64/ldst-regoffset.ll
+++ b/test/CodeGen/AArch64/ldst-regoffset.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var_8bit = global i8 0
@var_16bit = global i16 0
@@ -9,7 +10,7 @@
@var_double = global double 0.0
define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_8bit:
+; CHECK-LABEL: ldst_8bit:
%addr8_sxtw = getelementptr i8* %base, i32 %off32
%val8_sxtw = load volatile i8* %addr8_sxtw
@@ -37,7 +38,7 @@ define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) {
define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_16bit:
+; CHECK-LABEL: ldst_16bit:
%addr8_sxtwN = getelementptr i16* %base, i32 %off32
%val8_sxtwN = load volatile i16* %addr8_sxtwN
@@ -91,7 +92,7 @@ define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) {
}
define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_32bit:
+; CHECK-LABEL: ldst_32bit:
%addr_sxtwN = getelementptr i32* %base, i32 %off32
%val_sxtwN = load volatile i32* %addr_sxtwN
@@ -143,7 +144,7 @@ define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) {
}
define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_64bit:
+; CHECK-LABEL: ldst_64bit:
%addr_sxtwN = getelementptr i64* %base, i32 %off32
%val_sxtwN = load volatile i64* %addr_sxtwN
@@ -191,17 +192,19 @@ define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) {
}
define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_float:
+; CHECK-LABEL: ldst_float:
%addr_sxtwN = getelementptr float* %base, i32 %off32
%val_sxtwN = load volatile float* %addr_sxtwN
store volatile float %val_sxtwN, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #2]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%addr_lslN = getelementptr float* %base, i64 %off64
%val_lslN = load volatile float* %addr_lslN
store volatile float %val_lslN, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%addrint_uxtw = ptrtoint float* %base to i64
%offset_uxtw = zext i32 %off32 to i64
@@ -210,6 +213,7 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%val_uxtw = load volatile float* %addr_uxtw
store volatile float %val_uxtw, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%base_sxtw = ptrtoint float* %base to i64
%offset_sxtw = sext i32 %off32 to i64
@@ -218,6 +222,7 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%val64_sxtw = load volatile float* %addr_sxtw
store volatile float %val64_sxtw, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%base_lsl = ptrtoint float* %base to i64
%addrint_lsl = add i64 %base_lsl, %off64
@@ -225,6 +230,7 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%val64_lsl = load volatile float* %addr_lsl
store volatile float %val64_lsl, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
%base_uxtwN = ptrtoint float* %base to i64
%offset_uxtwN = zext i32 %off32 to i64
@@ -234,21 +240,24 @@ define void @ldst_float(float* %base, i32 %off32, i64 %off64) {
%val64 = load volatile float* @var_float
store volatile float %val64, float* %addr_uxtwN
; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
ret void
}
define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_double:
+; CHECK-LABEL: ldst_double:
%addr_sxtwN = getelementptr double* %base, i32 %off32
%val_sxtwN = load volatile double* %addr_sxtwN
store volatile double %val_sxtwN, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #3]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%addr_lslN = getelementptr double* %base, i64 %off64
%val_lslN = load volatile double* %addr_lslN
store volatile double %val_lslN, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%addrint_uxtw = ptrtoint double* %base to i64
%offset_uxtw = zext i32 %off32 to i64
@@ -257,6 +266,7 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%val_uxtw = load volatile double* %addr_uxtw
store volatile double %val_uxtw, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%base_sxtw = ptrtoint double* %base to i64
%offset_sxtw = sext i32 %off32 to i64
@@ -265,6 +275,7 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%val64_sxtw = load volatile double* %addr_sxtw
store volatile double %val64_sxtw, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%base_lsl = ptrtoint double* %base to i64
%addrint_lsl = add i64 %base_lsl, %off64
@@ -272,6 +283,7 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%val64_lsl = load volatile double* %addr_lsl
store volatile double %val64_lsl, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
%base_uxtwN = ptrtoint double* %base to i64
%offset_uxtwN = zext i32 %off32 to i64
@@ -281,22 +293,25 @@ define void @ldst_double(double* %base, i32 %off32, i64 %off64) {
%val64 = load volatile double* @var_double
store volatile double %val64, double* %addr_uxtwN
; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
ret void
}
define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
-; CHECK: ldst_128bit:
+; CHECK-LABEL: ldst_128bit:
%addr_sxtwN = getelementptr fp128* %base, i32 %off32
%val_sxtwN = load volatile fp128* %addr_sxtwN
store volatile fp128 %val_sxtwN, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
%addr_lslN = getelementptr fp128* %base, i64 %off64
%val_lslN = load volatile fp128* %addr_lslN
store volatile fp128 %val_lslN, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
%addrint_uxtw = ptrtoint fp128* %base to i64
%offset_uxtw = zext i32 %off32 to i64
@@ -305,6 +320,7 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%val_uxtw = load volatile fp128* %addr_uxtw
store volatile fp128 %val_uxtw, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
%base_sxtw = ptrtoint fp128* %base to i64
%offset_sxtw = sext i32 %off32 to i64
@@ -313,6 +329,7 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%val64_sxtw = load volatile fp128* %addr_sxtw
store volatile fp128 %val64_sxtw, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
%base_lsl = ptrtoint fp128* %base to i64
%addrint_lsl = add i64 %base_lsl, %off64
@@ -320,6 +337,7 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%val64_lsl = load volatile fp128* %addr_lsl
store volatile fp128 %val64_lsl, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
%base_uxtwN = ptrtoint fp128* %base to i64
%offset_uxtwN = zext i32 %off32 to i64
@@ -329,5 +347,6 @@ define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) {
%val64 = load volatile fp128* %base
store volatile fp128 %val64, fp128* %addr_uxtwN
; CHECK: str {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #4]
+; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw #4]
ret void
}
diff --git a/test/CodeGen/AArch64/ldst-unscaledimm.ll b/test/CodeGen/AArch64/ldst-unscaledimm.ll
index 78a3c83c3dd8..bea5bb5d6dd6 100644
--- a/test/CodeGen/AArch64/ldst-unscaledimm.ll
+++ b/test/CodeGen/AArch64/ldst-unscaledimm.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var_8bit = global i8 0
@var_16bit = global i16 0
@@ -11,7 +12,7 @@
@varptr = global i8* null
define void @ldst_8bit() {
-; CHECK: ldst_8bit:
+; CHECK-LABEL: ldst_8bit:
; No architectural support for loads to 16-bit or 8-bit since we
; promote i8 during lowering.
@@ -72,7 +73,7 @@ define void @ldst_8bit() {
}
define void @ldst_16bit() {
-; CHECK: ldst_16bit:
+; CHECK-LABEL: ldst_16bit:
; No architectural support for loads to 16-bit or 16-bit since we
; promote i16 during lowering.
@@ -140,7 +141,7 @@ define void @ldst_16bit() {
}
define void @ldst_32bit() {
-; CHECK: ldst_32bit:
+; CHECK-LABEL: ldst_32bit:
%addr_8bit = load i8** @varptr
@@ -186,7 +187,7 @@ define void @ldst_32bit() {
}
define void @ldst_float() {
-; CHECK: ldst_float:
+; CHECK-LABEL: ldst_float:
%addr_8bit = load i8** @varptr
%addrfp_8 = getelementptr i8* %addr_8bit, i64 -5
@@ -194,15 +195,17 @@ define void @ldst_float() {
%valfp = load volatile float* %addrfp
; CHECK: ldur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
+; CHECK-NOFP-NOT: ldur {{s[0-9]+}},
store volatile float %valfp, float* %addrfp
; CHECK: stur {{s[0-9]+}}, [{{x[0-9]+}}, #-5]
+; CHECK-NOFP-NOT: stur {{s[0-9]+}},
ret void
}
define void @ldst_double() {
-; CHECK: ldst_double:
+; CHECK-LABEL: ldst_double:
%addr_8bit = load i8** @varptr
%addrfp_8 = getelementptr i8* %addr_8bit, i64 4
@@ -210,9 +213,11 @@ define void @ldst_double() {
%valfp = load volatile double* %addrfp
; CHECK: ldur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
+; CHECK-NOFP-NOT: ldur {{d[0-9]+}},
store volatile double %valfp, double* %addrfp
; CHECK: stur {{d[0-9]+}}, [{{x[0-9]+}}, #4]
+; CHECK-NOFP-NOT: stur {{d[0-9]+}},
ret void
}
diff --git a/test/CodeGen/AArch64/ldst-unsignedimm.ll b/test/CodeGen/AArch64/ldst-unsignedimm.ll
index 1e7540d9be0a..44c1586e1ec7 100644
--- a/test/CodeGen/AArch64/ldst-unsignedimm.ll
+++ b/test/CodeGen/AArch64/ldst-unsignedimm.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
@var_8bit = global i8 0
@var_16bit = global i16 0
@@ -9,7 +10,7 @@
@var_double = global double 0.0
define void @ldst_8bit() {
-; CHECK: ldst_8bit:
+; CHECK-LABEL: ldst_8bit:
; No architectural support for loads to 16-bit or 8-bit since we
; promote i8 during lowering.
@@ -63,7 +64,7 @@ define void @ldst_8bit() {
}
define void @ldst_16bit() {
-; CHECK: ldst_16bit:
+; CHECK-LABEL: ldst_16bit:
; No architectural support for load volatiles to 16-bit promote i16 during
; lowering.
@@ -117,7 +118,7 @@ define void @ldst_16bit() {
}
define void @ldst_32bit() {
-; CHECK: ldst_32bit:
+; CHECK-LABEL: ldst_32bit:
; Straight 32-bit load/store
%val32_noext = load volatile i32* @var_32bit
@@ -225,27 +226,31 @@ define void @ldst_complex_offsets() {
}
define void @ldst_float() {
-; CHECK: ldst_float:
+; CHECK-LABEL: ldst_float:
%valfp = load volatile float* @var_float
; CHECK: adrp {{x[0-9]+}}, var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
store volatile float %valfp, float* @var_float
; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_float]
+; CHECK-NOFP-NOT: str {{s[0-9]+}},
ret void
}
define void @ldst_double() {
-; CHECK: ldst_double:
+; CHECK-LABEL: ldst_double:
%valfp = load volatile double* @var_double
; CHECK: adrp {{x[0-9]+}}, var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
store volatile double %valfp, double* @var_double
; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:var_double]
+; CHECK-NOFP-NOT: str {{d[0-9]+}},
ret void
}
diff --git a/test/CodeGen/AArch64/lit.local.cfg b/test/CodeGen/AArch64/lit.local.cfg
index c5ce2411ed48..9a66a00189ea 100644
--- a/test/CodeGen/AArch64/lit.local.cfg
+++ b/test/CodeGen/AArch64/lit.local.cfg
@@ -1,5 +1,3 @@
-config.suffixes = ['.ll', '.c', '.cpp']
-
targets = set(config.root.targets_to_build.split())
if not 'AArch64' in targets:
config.unsupported = True
diff --git a/test/CodeGen/AArch64/literal_pools.ll b/test/CodeGen/AArch64/literal_pools.ll
index 9cfa8c5426e4..fc33aee10d84 100644
--- a/test/CodeGen/AArch64/literal_pools.ll
+++ b/test/CodeGen/AArch64/literal_pools.ll
@@ -1,11 +1,13 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP-LARGE %s
@var32 = global i32 0
@var64 = global i64 0
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%val32 = load i32* @var32
%val64 = load i64* @var64
@@ -60,13 +62,13 @@ define void @foo() {
@vardouble = global double 0.0
define void @floating_lits() {
-; CHECK: floating_lits:
+; CHECK-LABEL: floating_lits:
%floatval = load float* @varfloat
%newfloat = fadd float %floatval, 128.0
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr {{s[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK: fadd
+; CHECK: ldr [[LIT128:s[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
@@ -74,20 +76,26 @@ define void @floating_lits() {
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
; CHECK-LARGE: fadd
+; CHECK-NOFP-LARGE-NOT: ldr {{s[0-9]+}},
+; CHECK-NOFP-LARGE-NOT: fadd
store float %newfloat, float* @varfloat
%doubleval = load double* @vardouble
%newdouble = fadd double %doubleval, 129.0
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
-; CHECK: ldr {{d[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
-; CHECK: fadd
+; CHECK: ldr [[LIT129:d[0-9]+]], [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK: fadd {{s[0-9]+}}, {{s[0-9]+}}, [[LIT128]]
+; CHECK: fadd {{d[0-9]+}}, {{d[0-9]+}}, [[LIT129]]
+; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
+; CHECK-NOFP-NOT: fadd
; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
+; CHECK-NOFP-LARGE-NOT: ldr {{d[0-9]+}},
store double %newdouble, double* @vardouble
diff --git a/test/CodeGen/AArch64/local_vars.ll b/test/CodeGen/AArch64/local_vars.ll
index 5cbf5a37ec54..b5cef859e35f 100644
--- a/test/CodeGen/AArch64/local_vars.ll
+++ b/test/CodeGen/AArch64/local_vars.ll
@@ -24,7 +24,7 @@ define void @trivial_func() nounwind {
}
define void @trivial_fp_func() {
-; CHECK-WITHFP: trivial_fp_func:
+; CHECK-WITHFP-LABEL: trivial_fp_func:
; CHECK-WITHFP: sub sp, sp, #16
; CHECK-WITHFP: stp x29, x30, [sp]
@@ -43,7 +43,7 @@ define void @trivial_fp_func() {
define void @stack_local() {
%local_var = alloca i64
-; CHECK: stack_local:
+; CHECK-LABEL: stack_local:
; CHECK: sub sp, sp, #16
%val = load i64* @var
diff --git a/test/CodeGen/AArch64/logical-imm.ll b/test/CodeGen/AArch64/logical-imm.ll
index 5f3f4da0cdad..e04bb510ebf2 100644
--- a/test/CodeGen/AArch64/logical-imm.ll
+++ b/test/CodeGen/AArch64/logical-imm.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @test_and(i32 %in32, i64 %in64) {
-; CHECK: test_and:
+; CHECK-LABEL: test_and:
%val0 = and i32 %in32, 2863311530
store volatile i32 %val0, i32* @var32
@@ -26,7 +26,7 @@ define void @test_and(i32 %in32, i64 %in64) {
}
define void @test_orr(i32 %in32, i64 %in64) {
-; CHECK: test_orr:
+; CHECK-LABEL: test_orr:
%val0 = or i32 %in32, 2863311530
store volatile i32 %val0, i32* @var32
@@ -48,7 +48,7 @@ define void @test_orr(i32 %in32, i64 %in64) {
}
define void @test_eor(i32 %in32, i64 %in64) {
-; CHECK: test_eor:
+; CHECK-LABEL: test_eor:
%val0 = xor i32 %in32, 2863311530
store volatile i32 %val0, i32* @var32
@@ -70,7 +70,7 @@ define void @test_eor(i32 %in32, i64 %in64) {
}
define void @test_mov(i32 %in32, i64 %in64) {
-; CHECK: test_mov:
+; CHECK-LABEL: test_mov:
%val0 = add i32 %in32, 2863311530
store i32 %val0, i32* @var32
; CHECK: orr {{w[0-9]+}}, wzr, #0xaaaaaaaa
diff --git a/test/CodeGen/AArch64/logical_shifted_reg.ll b/test/CodeGen/AArch64/logical_shifted_reg.ll
index bbbfcc1b9118..a08ba20c7f11 100644
--- a/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -7,7 +7,7 @@
@var2_64 = global i64 0
define void @logical_32bit() {
-; CHECK: logical_32bit:
+; CHECK-LABEL: logical_32bit:
%val1 = load i32* @var1_32
%val2 = load i32* @var2_32
@@ -97,7 +97,7 @@ define void @logical_32bit() {
}
define void @logical_64bit() {
-; CHECK: logical_64bit:
+; CHECK-LABEL: logical_64bit:
%val1 = load i64* @var1_64
%val2 = load i64* @var2_64
@@ -190,7 +190,7 @@ define void @logical_64bit() {
}
define void @flag_setting() {
-; CHECK: flag_setting:
+; CHECK-LABEL: flag_setting:
%val1 = load i64* @var1_64
%val2 = load i64* @var2_64
diff --git a/test/CodeGen/AArch64/logical_shifted_reg.s b/test/CodeGen/AArch64/logical_shifted_reg.s
deleted file mode 100644
index 89aea580119b..000000000000
--- a/test/CodeGen/AArch64/logical_shifted_reg.s
+++ /dev/null
@@ -1,208 +0,0 @@
- .file "/home/timnor01/a64-trunk/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll"
- .text
- .globl logical_32bit
- .type logical_32bit,@function
-logical_32bit: // @logical_32bit
- .cfi_startproc
-// BB#0:
- adrp x0, var1_32
- ldr w1, [x0, #:lo12:var1_32]
- adrp x0, var2_32
- ldr w2, [x0, #:lo12:var2_32]
- and w3, w1, w2
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- bic w3, w1, w2
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- orr w3, w1, w2
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- orn w3, w1, w2
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eor w3, w1, w2
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eon w3, w2, w1
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- and w3, w1, w2, lsl #31
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- bic w3, w1, w2, lsl #31
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- orr w3, w1, w2, lsl #31
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- orn w3, w1, w2, lsl #31
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eor w3, w1, w2, lsl #31
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eon w3, w1, w2, lsl #31
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- bic w3, w1, w2, asr #10
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eor w3, w1, w2, asr #10
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- orn w3, w1, w2, lsr #1
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eor w3, w1, w2, lsr #1
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- eon w3, w1, w2, ror #20
- adrp x0, var1_32
- str w3, [x0, #:lo12:var1_32]
- and w1, w1, w2, ror #20
- adrp x0, var1_32
- str w1, [x0, #:lo12:var1_32]
- ret
-.Ltmp0:
- .size logical_32bit, .Ltmp0-logical_32bit
- .cfi_endproc
-
- .globl logical_64bit
- .type logical_64bit,@function
-logical_64bit: // @logical_64bit
- .cfi_startproc
-// BB#0:
- adrp x0, var1_64
- ldr x0, [x0, #:lo12:var1_64]
- adrp x1, var2_64
- ldr x1, [x1, #:lo12:var2_64]
- and x2, x0, x1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- bic x2, x0, x1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- orr x2, x0, x1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- orn x2, x0, x1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eor x2, x0, x1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eon x2, x1, x0
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- and x2, x0, x1, lsl #63
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- bic x2, x0, x1, lsl #63
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- orr x2, x0, x1, lsl #63
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- orn x2, x0, x1, lsl #63
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eor x2, x0, x1, lsl #63
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eon x2, x0, x1, lsl #63
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- bic x2, x0, x1, asr #10
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eor x2, x0, x1, asr #10
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- orn x2, x0, x1, lsr #1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eor x2, x0, x1, lsr #1
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- eon x2, x0, x1, ror #20
- adrp x3, var1_64
- str x2, [x3, #:lo12:var1_64]
- and x0, x0, x1, ror #20
- adrp x1, var1_64
- str x0, [x1, #:lo12:var1_64]
- ret
-.Ltmp1:
- .size logical_64bit, .Ltmp1-logical_64bit
- .cfi_endproc
-
- .globl flag_setting
- .type flag_setting,@function
-flag_setting: // @flag_setting
- .cfi_startproc
-// BB#0:
- sub sp, sp, #16
- adrp x0, var1_64
- ldr x0, [x0, #:lo12:var1_64]
- adrp x1, var2_64
- ldr x1, [x1, #:lo12:var2_64]
- tst x0, x1
- str x0, [sp, #8] // 8-byte Folded Spill
- str x1, [sp] // 8-byte Folded Spill
- b.gt .LBB2_4
- b .LBB2_1
-.LBB2_1: // %test2
- ldr x0, [sp, #8] // 8-byte Folded Reload
- ldr x1, [sp] // 8-byte Folded Reload
- tst x0, x1, lsl #63
- b.lt .LBB2_4
- b .LBB2_2
-.LBB2_2: // %test3
- ldr x0, [sp, #8] // 8-byte Folded Reload
- ldr x1, [sp] // 8-byte Folded Reload
- tst x0, x1, asr #12
- b.gt .LBB2_4
- b .LBB2_3
-.LBB2_3: // %other_exit
- adrp x0, var1_64
- ldr x1, [sp, #8] // 8-byte Folded Reload
- str x1, [x0, #:lo12:var1_64]
- add sp, sp, #16
- ret
-.LBB2_4: // %ret
- add sp, sp, #16
- ret
-.Ltmp2:
- .size flag_setting, .Ltmp2-flag_setting
- .cfi_endproc
-
- .type var1_32,@object // @var1_32
- .bss
- .globl var1_32
- .align 2
-var1_32:
- .word 0 // 0x0
- .size var1_32, 4
-
- .type var2_32,@object // @var2_32
- .globl var2_32
- .align 2
-var2_32:
- .word 0 // 0x0
- .size var2_32, 4
-
- .type var1_64,@object // @var1_64
- .globl var1_64
- .align 3
-var1_64:
- .xword 0 // 0x0
- .size var1_64, 8
-
- .type var2_64,@object // @var2_64
- .globl var2_64
- .align 3
-var2_64:
- .xword 0 // 0x0
- .size var2_64, 8
-
-
diff --git a/test/CodeGen/AArch64/movw-consts.ll b/test/CodeGen/AArch64/movw-consts.ll
index b8a5fb932202..38e37db7b58c 100644
--- a/test/CodeGen/AArch64/movw-consts.ll
+++ b/test/CodeGen/AArch64/movw-consts.ll
@@ -1,50 +1,50 @@
; RUN: llc -verify-machineinstrs -O0 < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
define i64 @test0() {
-; CHECK: test0:
+; CHECK-LABEL: test0:
; Not produced by move wide instructions, but good to make sure we can return 0 anyway:
; CHECK: mov x0, xzr
ret i64 0
}
define i64 @test1() {
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK: movz x0, #1
ret i64 1
}
define i64 @test2() {
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK: movz x0, #65535
ret i64 65535
}
define i64 @test3() {
-; CHECK: test3:
+; CHECK-LABEL: test3:
; CHECK: movz x0, #1, lsl #16
ret i64 65536
}
define i64 @test4() {
-; CHECK: test4:
+; CHECK-LABEL: test4:
; CHECK: movz x0, #65535, lsl #16
ret i64 4294901760
}
define i64 @test5() {
-; CHECK: test5:
+; CHECK-LABEL: test5:
; CHECK: movz x0, #1, lsl #32
ret i64 4294967296
}
define i64 @test6() {
-; CHECK: test6:
+; CHECK-LABEL: test6:
; CHECK: movz x0, #65535, lsl #32
ret i64 281470681743360
}
define i64 @test7() {
-; CHECK: test7:
+; CHECK-LABEL: test7:
; CHECK: movz x0, #1, lsl #48
ret i64 281474976710656
}
@@ -52,19 +52,19 @@ define i64 @test7() {
; A 32-bit MOVN can generate some 64-bit patterns that a 64-bit one
; couldn't. Useful even for i64
define i64 @test8() {
-; CHECK: test8:
+; CHECK-LABEL: test8:
; CHECK: movn w0, #60875
ret i64 4294906420
}
define i64 @test9() {
-; CHECK: test9:
+; CHECK-LABEL: test9:
; CHECK: movn x0, #0
ret i64 -1
}
define i64 @test10() {
-; CHECK: test10:
+; CHECK-LABEL: test10:
; CHECK: movn x0, #60875, lsl #16
ret i64 18446744069720047615
}
@@ -74,49 +74,49 @@ define i64 @test10() {
@var32 = global i32 0
define void @test11() {
-; CHECK: test11:
+; CHECK-LABEL: test11:
; CHECK: mov {{w[0-9]+}}, wzr
store i32 0, i32* @var32
ret void
}
define void @test12() {
-; CHECK: test12:
+; CHECK-LABEL: test12:
; CHECK: movz {{w[0-9]+}}, #1
store i32 1, i32* @var32
ret void
}
define void @test13() {
-; CHECK: test13:
+; CHECK-LABEL: test13:
; CHECK: movz {{w[0-9]+}}, #65535
store i32 65535, i32* @var32
ret void
}
define void @test14() {
-; CHECK: test14:
+; CHECK-LABEL: test14:
; CHECK: movz {{w[0-9]+}}, #1, lsl #16
store i32 65536, i32* @var32
ret void
}
define void @test15() {
-; CHECK: test15:
+; CHECK-LABEL: test15:
; CHECK: movz {{w[0-9]+}}, #65535, lsl #16
store i32 4294901760, i32* @var32
ret void
}
define void @test16() {
-; CHECK: test16:
+; CHECK-LABEL: test16:
; CHECK: movn {{w[0-9]+}}, #0
store i32 -1, i32* @var32
ret void
}
define i64 @test17() {
-; CHECK: test17:
+; CHECK-LABEL: test17:
; Mustn't MOVN w0 here.
; CHECK: movn x0, #2
diff --git a/test/CodeGen/AArch64/movw-shift-encoding.ll b/test/CodeGen/AArch64/movw-shift-encoding.ll
new file mode 100644
index 000000000000..ec133bd706b1
--- /dev/null
+++ b/test/CodeGen/AArch64/movw-shift-encoding.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=aarch64-linux-gnu < %s -show-mc-encoding -code-model=large | FileCheck %s
+
+@var = global i32 0
+
+; CodeGen should ensure that the correct shift bits are set, because the linker
+; isn't going to!
+
+define i32* @get_var() {
+ ret i32* @var
+; CHECK: movz x0, #:abs_g3:var // encoding: [A,A,0xe0'A',0xd2'A']
+; CHECK: movk x0, #:abs_g2_nc:var // encoding: [A,A,0xc0'A',0xf2'A']
+; CHECK: movk x0, #:abs_g1_nc:var // encoding: [A,A,0xa0'A',0xf2'A']
+; CHECK: movk x0, #:abs_g0_nc:var // encoding: [A,A,0x80'A',0xf2'A']
+}
diff --git a/test/CodeGen/AArch64/neon-2velem-high.ll b/test/CodeGen/AArch64/neon-2velem-high.ll
new file mode 100644
index 000000000000..97031d98b7c0
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-2velem-high.ll
@@ -0,0 +1,331 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
+
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>)
+
+define <4 x i32> @test_vmull_high_n_s16(<8 x i16> %a, i16 %b) {
+; CHECK: test_vmull_high_n_s16:
+; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
+ %vmull15.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ ret <4 x i32> %vmull15.i.i
+}
+
+define <2 x i64> @test_vmull_high_n_s32(<4 x i32> %a, i32 %b) {
+; CHECK: test_vmull_high_n_s32:
+; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
+ %vmull9.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ ret <2 x i64> %vmull9.i.i
+}
+
+define <4 x i32> @test_vmull_high_n_u16(<8 x i16> %a, i16 %b) {
+; CHECK: test_vmull_high_n_u16:
+; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
+ %vmull15.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ ret <4 x i32> %vmull15.i.i
+}
+
+define <2 x i64> @test_vmull_high_n_u32(<4 x i32> %a, i32 %b) {
+; CHECK: test_vmull_high_n_u32:
+; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
+ %vmull9.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ ret <2 x i64> %vmull9.i.i
+}
+
+define <4 x i32> @test_vqdmull_high_n_s16(<8 x i16> %a, i16 %b) {
+; CHECK: test_vqdmull_high_n_s16:
+; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %b, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %b, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %b, i32 3
+ %vqdmull15.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ ret <4 x i32> %vqdmull15.i.i
+}
+
+define <2 x i64> @test_vqdmull_high_n_s32(<4 x i32> %a, i32 %b) {
+; CHECK: test_vqdmull_high_n_s32:
+; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %b, i32 1
+ %vqdmull9.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ ret <2 x i64> %vqdmull9.i.i
+}
+
+define <4 x i32> @test_vmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK: test_vmlal_high_n_s16:
+; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK: test_vmlal_high_n_s32:
+; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <4 x i32> @test_vmlal_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK: test_vmlal_high_n_u16:
+; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK: test_vmlal_high_n_u32:
+; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <4 x i32> @test_vqdmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK: test_vqdmlal_high_n_s16:
+; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vqdmlal15.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %vqdmlal17.i.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal15.i.i)
+ ret <4 x i32> %vqdmlal17.i.i
+}
+
+define <2 x i64> @test_vqdmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK: test_vqdmlal_high_n_s32:
+; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vqdmlal9.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %vqdmlal11.i.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal9.i.i)
+ ret <2 x i64> %vqdmlal11.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK: test_vmlsl_high_n_s16:
+; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK: test_vmlsl_high_n_s32:
+; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK: test_vmlsl_high_n_u16:
+; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK: test_vmlsl_high_n_u32:
+; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) {
+; CHECK: test_vqdmlsl_high_n_s16:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vecinit.i.i = insertelement <4 x i16> undef, i16 %c, i32 0
+ %vecinit1.i.i = insertelement <4 x i16> %vecinit.i.i, i16 %c, i32 1
+ %vecinit2.i.i = insertelement <4 x i16> %vecinit1.i.i, i16 %c, i32 2
+ %vecinit3.i.i = insertelement <4 x i16> %vecinit2.i.i, i16 %c, i32 3
+ %vqdmlsl15.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %vecinit3.i.i)
+ %vqdmlsl17.i.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.i.i)
+ ret <4 x i32> %vqdmlsl17.i.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) {
+; CHECK: test_vqdmlsl_high_n_s32:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit.i.i = insertelement <2 x i32> undef, i32 %c, i32 0
+ %vecinit1.i.i = insertelement <2 x i32> %vecinit.i.i, i32 %c, i32 1
+ %vqdmlsl9.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %vecinit1.i.i)
+ %vqdmlsl11.i.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i)
+ ret <2 x i64> %vqdmlsl11.i.i
+}
+
+define <2 x float> @test_vmul_n_f32(<2 x float> %a, float %b) {
+; CHECK: test_vmul_n_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %vecinit.i = insertelement <2 x float> undef, float %b, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %b, i32 1
+ %mul.i = fmul <2 x float> %vecinit1.i, %a
+ ret <2 x float> %mul.i
+}
+
+define <4 x float> @test_vmulq_n_f32(<4 x float> %a, float %b) {
+; CHECK: test_vmulq_n_f32:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %vecinit.i = insertelement <4 x float> undef, float %b, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %b, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %b, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %b, i32 3
+ %mul.i = fmul <4 x float> %vecinit3.i, %a
+ ret <4 x float> %mul.i
+}
+
+define <2 x double> @test_vmulq_n_f64(<2 x double> %a, double %b) {
+; CHECK: test_vmulq_n_f64:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %vecinit.i = insertelement <2 x double> undef, double %b, i32 0
+ %vecinit1.i = insertelement <2 x double> %vecinit.i, double %b, i32 1
+ %mul.i = fmul <2 x double> %vecinit1.i, %a
+ ret <2 x double> %mul.i
+}
+
+define <2 x float> @test_vfma_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
+; CHECK: test_vfma_n_f32:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <2 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %b, <2 x float> %vecinit1.i, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
+; CHECK: test_vfmaq_n_f32:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <4 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %b, <4 x float> %vecinit3.i, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_n_f32(<2 x float> %a, <2 x float> %b, float %n) {
+; CHECK: test_vfms_n_f32:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <2 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <2 x float> %vecinit.i, float %n, i32 1
+ %0 = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %b
+ %1 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %0, <2 x float> %vecinit1.i, <2 x float> %a)
+ ret <2 x float> %1
+}
+
+define <4 x float> @test_vfmsq_n_f32(<4 x float> %a, <4 x float> %b, float %n) {
+; CHECK: test_vfmsq_n_f32:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[{{[0-9]+}}]
+entry:
+ %vecinit.i = insertelement <4 x float> undef, float %n, i32 0
+ %vecinit1.i = insertelement <4 x float> %vecinit.i, float %n, i32 1
+ %vecinit2.i = insertelement <4 x float> %vecinit1.i, float %n, i32 2
+ %vecinit3.i = insertelement <4 x float> %vecinit2.i, float %n, i32 3
+ %0 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %b
+ %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %0, <4 x float> %vecinit3.i, <4 x float> %a)
+ ret <4 x float> %1
+}
diff --git a/test/CodeGen/AArch64/neon-2velem.ll b/test/CodeGen/AArch64/neon-2velem.ll
new file mode 100644
index 000000000000..9d6184243713
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-2velem.ll
@@ -0,0 +1,2550 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+declare <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double>, <2 x double>)
+
+declare <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float>, <4 x float>)
+
+declare <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float>, <2 x float>)
+
+declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>)
+
+declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>)
+
+declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
+
+declare <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32>, <2 x i32>)
+
+declare <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16>, <8 x i16>)
+
+declare <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>)
+
+declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_vmla_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmla_lane_s16:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlaq_lane_s16:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmla_lane_s32:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlaq_lane_s32:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmla_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmla_laneq_s16:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlaq_laneq_s16:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmla_laneq_s32:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlaq_laneq_s32:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmls_lane_s16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmls_lane_s16:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_lane_s16(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsq_lane_s16:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_lane_s32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmls_lane_s32:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_lane_s32(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsq_lane_s32:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmls_laneq_s16(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmls_laneq_s16:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_laneq_s16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsq_laneq_s16:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_laneq_s32(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmls_laneq_s32:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_laneq_s32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsq_laneq_s32:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmul_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmul_lane_s16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmulq_lane_s16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmul_lane_s32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmulq_lane_s32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_lane_u16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmul_lane_u16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_u16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmulq_lane_u16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_u32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmul_lane_u32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_u32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmulq_lane_u32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmul_laneq_s16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmulq_laneq_s16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmul_laneq_s32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmulq_laneq_s32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_u16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmul_laneq_u16:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_u16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmulq_laneq_u16:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_u32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmul_laneq_u32:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_u32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmulq_laneq_u32:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <2 x float> @test_vfma_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK: test_vfma_lane_f32:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
+
+define <4 x float> @test_vfmaq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK: test_vfmaq_lane_f32:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+define <2 x float> @test_vfma_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK: test_vfma_laneq_f32:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK: test_vfmaq_laneq_f32:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK: test_vfms_lane_f32:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_lane_f32(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK: test_vfmsq_lane_f32:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_laneq_f32(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK: test_vfms_laneq_f32:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_laneq_f32(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK: test_vfmsq_laneq_f32:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x double> @test_vfmaq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) {
+; CHECK: test_vfmaq_lane_f64:
+; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %lane = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x double> @test_vfmaq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK: test_vfmaq_laneq_f64:
+; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+entry:
+ %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <2 x double> @test_vfmsq_lane_f64(<2 x double> %a, <2 x double> %b, <1 x double> %v) {
+; CHECK: test_vfmsq_lane_f64:
+; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %sub = fsub <1 x double> <double -0.000000e+00>, %v
+ %lane = shufflevector <1 x double> %sub, <1 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <2 x double> @test_vfmsq_laneq_f64(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK: test_vfmsq_laneq_f64:
+; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+entry:
+ %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %v
+ %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <4 x i32> @test_vmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_lane_s16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_lane_s32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_laneq_s16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_laneq_s32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_high_lane_s16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_high_lane_s32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_high_laneq_s16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_high_laneq_s32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_lane_s16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_lane_s32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_s16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_laneq_s16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_s32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_laneq_s32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_high_lane_s16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_high_lane_s32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_high_laneq_s16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_high_laneq_s32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlal_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_lane_u16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_lane_u32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_laneq_u16:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_laneq_u32:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_high_lane_u16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_high_lane_u32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_high_laneq_u16:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_high_laneq_u32:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_lane_u16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_lane_u32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_u16(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_laneq_u16:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_u32(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_laneq_u32:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_u16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_high_lane_u16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_u32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_high_lane_u32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_high_laneq_u16:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_high_laneq_u32:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmull_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_lane_s16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_lane_s32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_lane_u16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_lane_u16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_u32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_lane_u32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_high_lane_s16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_high_lane_s32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_u16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_high_lane_u16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_u32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_high_lane_u32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_laneq_s16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_laneq_s32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_u16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_laneq_u16:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_u32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_laneq_u32:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_high_laneq_s16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_high_laneq_s32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_u16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_high_laneq_u16:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_u32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_high_laneq_u32:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vqdmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlal_lane_s16:
+; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlal_lane_s32:
+; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlal_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlal_high_lane_s16:
+; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlal_high_lane_s32:
+; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlsl_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlsl_lane_s16:
+; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_lane_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlsl_lane_s32:
+; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_lane_s16(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlsl_high_lane_s16:
+; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_lane_s32(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlsl_high_lane_s32:
+; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmull_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmull_lane_s16:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmull_lane_s32:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_laneq_s16(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vqdmull_laneq_s16:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_laneq_s32(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vqdmull_laneq_s32:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmull_high_lane_s16:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmull_high_lane_s32:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_laneq_s16(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vqdmull_high_laneq_s16:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_laneq_s32(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vqdmull_high_laneq_s32:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> <i32 3, i32 3>
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i16> @test_vqdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmulh_lane_s16:
+; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqdmulh2.i
+}
+
+define <8 x i16> @test_vqdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmulhq_lane_s16:
+; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %vqdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqdmulh2.i
+}
+
+define <2 x i32> @test_vqdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmulh_lane_s32:
+; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqdmulh2.i
+}
+
+define <4 x i32> @test_vqdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmulhq_lane_s32:
+; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vqdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqdmulh2.i
+}
+
+define <4 x i16> @test_vqrdmulh_lane_s16(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqrdmulh_lane_s16:
+; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vqrdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqrdmulh2.i
+}
+
+define <8 x i16> @test_vqrdmulhq_lane_s16(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqrdmulhq_lane_s16:
+; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %vqrdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqrdmulh2.i
+}
+
+define <2 x i32> @test_vqrdmulh_lane_s32(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqrdmulh_lane_s32:
+; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ %vqrdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqrdmulh2.i
+}
+
+define <4 x i32> @test_vqrdmulhq_lane_s32(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqrdmulhq_lane_s32:
+; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vqrdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqrdmulh2.i
+}
+
+define <2 x float> @test_vmul_lane_f32(<2 x float> %a, <2 x float> %v) {
+; CHECK: test_vmul_lane_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <1 x double> @test_vmul_lane_f64(<1 x double> %a, <1 x double> %v) {
+; CHECK: test_vmul_lane_f64:
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
+entry:
+ %0 = bitcast <1 x double> %a to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to double
+ %extract = extractelement <1 x double> %v, i32 0
+ %2 = fmul double %1, %extract
+ %3 = insertelement <1 x double> undef, double %2, i32 0
+ ret <1 x double> %3
+}
+
+define <4 x float> @test_vmulq_lane_f32(<4 x float> %a, <2 x float> %v) {
+; CHECK: test_vmulq_lane_f32:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x double> @test_vmulq_lane_f64(<2 x double> %a, <1 x double> %v) {
+; CHECK: test_vmulq_lane_f64:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x double> %shuffle, %a
+ ret <2 x double> %mul
+}
+
+define <2 x float> @test_vmul_laneq_f32(<2 x float> %a, <4 x float> %v) {
+; CHECK: test_vmul_laneq_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <1 x double> @test_vmul_laneq_f64(<1 x double> %a, <2 x double> %v) {
+; CHECK: test_vmul_laneq_f64:
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+entry:
+ %0 = bitcast <1 x double> %a to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to double
+ %extract = extractelement <2 x double> %v, i32 1
+ %2 = fmul double %1, %extract
+ %3 = insertelement <1 x double> undef, double %2, i32 0
+ ret <1 x double> %3
+}
+
+define <4 x float> @test_vmulq_laneq_f32(<4 x float> %a, <4 x float> %v) {
+; CHECK: test_vmulq_laneq_f32:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x double> @test_vmulq_laneq_f64(<2 x double> %a, <2 x double> %v) {
+; CHECK: test_vmulq_laneq_f64:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %mul = fmul <2 x double> %shuffle, %a
+ ret <2 x double> %mul
+}
+
+define <2 x float> @test_vmulx_lane_f32(<2 x float> %a, <2 x float> %v) {
+; CHECK: test_vmulx_lane_f32:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_lane_f32(<4 x float> %a, <2 x float> %v) {
+; CHECK: test_vmulxq_lane_f32:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_lane_f64(<2 x double> %a, <1 x double> %v) {
+; CHECK: test_vmulxq_lane_f64:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
+define <2 x float> @test_vmulx_laneq_f32(<2 x float> %a, <4 x float> %v) {
+; CHECK: test_vmulx_laneq_f32:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> <i32 3, i32 3>
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_laneq_f32(<4 x float> %a, <4 x float> %v) {
+; CHECK: test_vmulxq_laneq_f32:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_laneq_f64(<2 x double> %a, <2 x double> %v) {
+; CHECK: test_vmulxq_laneq_f64:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 1>
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
+define <4 x i16> @test_vmla_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmla_lane_s16_0:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlaq_lane_s16_0:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmla_lane_s32_0:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlaq_lane_s32_0:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmla_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmla_laneq_s16_0:
+; CHECK: mla {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %add = add <4 x i16> %mul, %a
+ ret <4 x i16> %add
+}
+
+define <8 x i16> @test_vmlaq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlaq_laneq_s16_0:
+; CHECK: mla {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %add = add <8 x i16> %mul, %a
+ ret <8 x i16> %add
+}
+
+define <2 x i32> @test_vmla_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmla_laneq_s32_0:
+; CHECK: mla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %add = add <2 x i32> %mul, %a
+ ret <2 x i32> %add
+}
+
+define <4 x i32> @test_vmlaq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlaq_laneq_s32_0:
+; CHECK: mla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %add = add <4 x i32> %mul, %a
+ ret <4 x i32> %add
+}
+
+define <4 x i16> @test_vmls_lane_s16_0(<4 x i16> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmls_lane_s16_0:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_lane_s16_0(<8 x i16> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsq_lane_s16_0:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_lane_s32_0(<2 x i32> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmls_lane_s32_0:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_lane_s32_0(<4 x i32> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsq_lane_s32_0:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmls_laneq_s16_0(<4 x i16> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmls_laneq_s16_0:
+; CHECK: mls {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %b
+ %sub = sub <4 x i16> %a, %mul
+ ret <4 x i16> %sub
+}
+
+define <8 x i16> @test_vmlsq_laneq_s16_0(<8 x i16> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsq_laneq_s16_0:
+; CHECK: mls {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %b
+ %sub = sub <8 x i16> %a, %mul
+ ret <8 x i16> %sub
+}
+
+define <2 x i32> @test_vmls_laneq_s32_0(<2 x i32> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmls_laneq_s32_0:
+; CHECK: mls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %b
+ %sub = sub <2 x i32> %a, %mul
+ ret <2 x i32> %sub
+}
+
+define <4 x i32> @test_vmlsq_laneq_s32_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsq_laneq_s32_0:
+; CHECK: mls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %b
+ %sub = sub <4 x i32> %a, %mul
+ ret <4 x i32> %sub
+}
+
+define <4 x i16> @test_vmul_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmul_lane_s16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmulq_lane_s16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmul_lane_s32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmulq_lane_s32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_lane_u16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmul_lane_u16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_lane_u16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmulq_lane_u16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_lane_u32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmul_lane_u32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_lane_u32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmulq_lane_u32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmul_laneq_s16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmulq_laneq_s16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmul_laneq_s32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmulq_laneq_s32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <4 x i16> @test_vmul_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmul_laneq_u16_0:
+; CHECK: mul {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i16> %shuffle, %a
+ ret <4 x i16> %mul
+}
+
+define <8 x i16> @test_vmulq_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmulq_laneq_u16_0:
+; CHECK: mul {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <8 x i32> zeroinitializer
+ %mul = mul <8 x i16> %shuffle, %a
+ ret <8 x i16> %mul
+}
+
+define <2 x i32> @test_vmul_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmul_laneq_u32_0:
+; CHECK: mul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %mul = mul <2 x i32> %shuffle, %a
+ ret <2 x i32> %mul
+}
+
+define <4 x i32> @test_vmulq_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmulq_laneq_u32_0:
+; CHECK: mul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <4 x i32> zeroinitializer
+ %mul = mul <4 x i32> %shuffle, %a
+ ret <4 x i32> %mul
+}
+
+define <2 x float> @test_vfma_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK: test_vfma_lane_f32_0:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK: test_vfmaq_lane_f32_0:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %lane = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfma_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK: test_vfma_laneq_f32_0:
+; CHECK: fmla {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmaq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK: test_vfmaq_laneq_f32_0:
+; CHECK: fmla {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %lane = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_lane_f32_0(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK: test_vfms_lane_f32_0:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_lane_f32_0(<4 x float> %a, <4 x float> %b, <2 x float> %v) {
+; CHECK: test_vfmsq_lane_f32_0:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %sub = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <2 x float> %sub, <2 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x float> @test_vfms_laneq_f32_0(<2 x float> %a, <2 x float> %b, <4 x float> %v) {
+; CHECK: test_vfms_laneq_f32_0:
+; CHECK: fmls {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x float> @llvm.fma.v2f32(<2 x float> %lane, <2 x float> %b, <2 x float> %a)
+ ret <2 x float> %0
+}
+
+define <4 x float> @test_vfmsq_laneq_f32_0(<4 x float> %a, <4 x float> %b, <4 x float> %v) {
+; CHECK: test_vfmsq_laneq_f32_0:
+; CHECK: fmls {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %v
+ %lane = shufflevector <4 x float> %sub, <4 x float> undef, <4 x i32> zeroinitializer
+ %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %lane, <4 x float> %b, <4 x float> %a)
+ ret <4 x float> %0
+}
+
+define <2 x double> @test_vfmaq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK: test_vfmaq_laneq_f64_0:
+; CHECK: fmla {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %lane = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <2 x double> @test_vfmsq_laneq_f64_0(<2 x double> %a, <2 x double> %b, <2 x double> %v) {
+; CHECK: test_vfmsq_laneq_f64_0:
+; CHECK: fmls {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %v
+ %lane = shufflevector <2 x double> %sub, <2 x double> undef, <2 x i32> zeroinitializer
+ %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %lane, <2 x double> %b, <2 x double> %a)
+ ret <2 x double> %0
+}
+
+define <4 x i32> @test_vmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_lane_s16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_lane_s32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_laneq_s16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_laneq_s32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_high_lane_s16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_high_lane_s32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_high_laneq_s16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_high_laneq_s32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_lane_s16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_lane_s32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_s16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_laneq_s16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_s32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_laneq_s32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_high_lane_s16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_high_lane_s32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_s16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_high_laneq_s16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_s32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_high_laneq_s32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlal_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_lane_u16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_lane_u32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_laneq_u16_0:
+; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_laneq_u32_0:
+; CHECK: mlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlal_high_lane_u16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlal_high_lane_u32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlal_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlal_high_laneq_u16_0:
+; CHECK: mlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %add = add <4 x i32> %vmull2.i, %a
+ ret <4 x i32> %add
+}
+
+define <2 x i64> @test_vmlal_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlal_high_laneq_u32_0:
+; CHECK: mlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %add = add <2 x i64> %vmull2.i, %a
+ ret <2 x i64> %add
+}
+
+define <4 x i32> @test_vmlsl_lane_u16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_lane_u16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_lane_u32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_lane_u32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_laneq_u16_0(<4 x i32> %a, <4 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_laneq_u16_0:
+; CHECK: mlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_laneq_u32_0(<2 x i64> %a, <2 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_laneq_u32_0:
+; CHECK: mlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_lane_u16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vmlsl_high_lane_u16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_lane_u32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vmlsl_high_lane_u32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmlsl_high_laneq_u16_0(<4 x i32> %a, <8 x i16> %b, <8 x i16> %v) {
+; CHECK: test_vmlsl_high_laneq_u16_0:
+; CHECK: mlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %sub = sub <4 x i32> %a, %vmull2.i
+ ret <4 x i32> %sub
+}
+
+define <2 x i64> @test_vmlsl_high_laneq_u32_0(<2 x i64> %a, <4 x i32> %b, <4 x i32> %v) {
+; CHECK: test_vmlsl_high_laneq_u32_0:
+; CHECK: mlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %sub = sub <2 x i64> %a, %vmull2.i
+ ret <2 x i64> %sub
+}
+
+define <4 x i32> @test_vmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_lane_s16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_lane_s32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_lane_u16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_lane_u16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_lane_u32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_lane_u32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_high_lane_s16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_high_lane_s32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_lane_u16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vmull_high_lane_u16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_lane_u32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vmull_high_lane_u32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_laneq_s16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_laneq_s32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_laneq_u16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_laneq_u16_0:
+; CHECK: mull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_laneq_u32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_laneq_u32_0:
+; CHECK: mull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_high_laneq_s16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_high_laneq_s32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vmull_high_laneq_u16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vmull_high_laneq_u16_0:
+; CHECK: mull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_high_laneq_u32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vmull_high_laneq_u32_0:
+; CHECK: mull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vmull2.i
+}
+
+define <4 x i32> @test_vqdmlal_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlal_lane_s16_0:
+; CHECK: qdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlal_lane_s32_0:
+; CHECK: qdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlal_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlal_high_lane_s16_0:
+; CHECK: qdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlal_high_lane_s32_0:
+; CHECK: qdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlsl_lane_s16_0(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlsl_lane_s16_0:
+; CHECK: qdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_lane_s32_0(<2 x i64> %a, <2 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlsl_lane_s32_0:
+; CHECK: qdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_lane_s16_0(<4 x i32> %a, <8 x i16> %b, <4 x i16> %v) {
+; CHECK: test_vqdmlsl_high_lane_s16_0:
+; CHECK: qdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_lane_s32_0(<2 x i64> %a, <4 x i32> %b, <2 x i32> %v) {
+; CHECK: test_vqdmlsl_high_lane_s32_0:
+; CHECK: qdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmull_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmull_lane_s16_0:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmull_lane_s32_0:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_laneq_s16_0(<4 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vqdmull_laneq_s16_0:
+; CHECK: qdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_laneq_s32_0(<2 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vqdmull_laneq_s32_0:
+; CHECK: qdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmull_high_lane_s16_0:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmull_high_lane_s32_0:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmull_high_laneq_s16_0(<8 x i16> %a, <8 x i16> %v) {
+; CHECK: test_vqdmull_high_laneq_s16_0:
+; CHECK: qdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle = shufflevector <8 x i16> %v, <8 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i, <4 x i16> %shuffle)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_high_laneq_s32_0(<4 x i32> %a, <4 x i32> %v) {
+; CHECK: test_vqdmull_high_laneq_s32_0:
+; CHECK: qdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle = shufflevector <4 x i32> %v, <4 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i, <2 x i32> %shuffle)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i16> @test_vqdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmulh_lane_s16_0:
+; CHECK: qdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqdmulh2.i
+}
+
+define <8 x i16> @test_vqdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqdmulhq_lane_s16_0:
+; CHECK: qdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqdmulh2.i
+}
+
+define <2 x i32> @test_vqdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmulh_lane_s32_0:
+; CHECK: qdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqdmulh2.i
+}
+
+define <4 x i32> @test_vqdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqdmulhq_lane_s32_0:
+; CHECK: qdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %vqdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqdmulh2.i
+}
+
+define <4 x i16> @test_vqrdmulh_lane_s16_0(<4 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqrdmulh_lane_s16_0:
+; CHECK: qrdmulh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <4 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %a, <4 x i16> %shuffle)
+ ret <4 x i16> %vqrdmulh2.i
+}
+
+define <8 x i16> @test_vqrdmulhq_lane_s16_0(<8 x i16> %a, <4 x i16> %v) {
+; CHECK: test_vqrdmulhq_lane_s16_0:
+; CHECK: qrdmulh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[0]
+entry:
+ %shuffle = shufflevector <4 x i16> %v, <4 x i16> undef, <8 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %a, <8 x i16> %shuffle)
+ ret <8 x i16> %vqrdmulh2.i
+}
+
+define <2 x i32> @test_vqrdmulh_lane_s32_0(<2 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqrdmulh_lane_s32_0:
+; CHECK: qrdmulh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <2 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %a, <2 x i32> %shuffle)
+ ret <2 x i32> %vqrdmulh2.i
+}
+
+define <4 x i32> @test_vqrdmulhq_lane_s32_0(<4 x i32> %a, <2 x i32> %v) {
+; CHECK: test_vqrdmulhq_lane_s32_0:
+; CHECK: qrdmulh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x i32> %v, <2 x i32> undef, <4 x i32> zeroinitializer
+ %vqrdmulh2.i = tail call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %a, <4 x i32> %shuffle)
+ ret <4 x i32> %vqrdmulh2.i
+}
+
+define <2 x float> @test_vmul_lane_f32_0(<2 x float> %a, <2 x float> %v) {
+; CHECK: test_vmul_lane_f32_0:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <4 x float> @test_vmulq_lane_f32_0(<4 x float> %a, <2 x float> %v) {
+; CHECK: test_vmulq_lane_f32_0:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x float> @test_vmul_laneq_f32_0(<2 x float> %a, <4 x float> %v) {
+; CHECK: test_vmul_laneq_f32_0:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x float> %shuffle, %a
+ ret <2 x float> %mul
+}
+
+define <1 x double> @test_vmul_laneq_f64_0(<1 x double> %a, <2 x double> %v) {
+; CHECK: test_vmul_laneq_f64_0:
+; CHECK: fmul {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
+entry:
+ %0 = bitcast <1 x double> %a to <8 x i8>
+ %1 = bitcast <8 x i8> %0 to double
+ %extract = extractelement <2 x double> %v, i32 0
+ %2 = fmul double %1, %extract
+ %3 = insertelement <1 x double> undef, double %2, i32 0
+ ret <1 x double> %3
+}
+
+define <4 x float> @test_vmulq_laneq_f32_0(<4 x float> %a, <4 x float> %v) {
+; CHECK: test_vmulq_laneq_f32_0:
+; CHECK: fmul {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %mul = fmul <4 x float> %shuffle, %a
+ ret <4 x float> %mul
+}
+
+define <2 x double> @test_vmulq_laneq_f64_0(<2 x double> %a, <2 x double> %v) {
+; CHECK: test_vmulq_laneq_f64_0:
+; CHECK: fmul {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %mul = fmul <2 x double> %shuffle, %a
+ ret <2 x double> %mul
+}
+
+define <2 x float> @test_vmulx_lane_f32_0(<2 x float> %a, <2 x float> %v) {
+; CHECK: test_vmulx_lane_f32_0:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_lane_f32_0(<4 x float> %a, <2 x float> %v) {
+; CHECK: test_vmulxq_lane_f32_0:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <4 x i32> zeroinitializer
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_lane_f64_0(<2 x double> %a, <1 x double> %v) {
+; CHECK: test_vmulxq_lane_f64_0:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle = shufflevector <1 x double> %v, <1 x double> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
+define <2 x float> @test_vmulx_laneq_f32_0(<2 x float> %a, <4 x float> %v) {
+; CHECK: test_vmulx_laneq_f32_0:
+; CHECK: mulx {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %a, <2 x float> %shuffle)
+ ret <2 x float> %vmulx2.i
+}
+
+define <4 x float> @test_vmulxq_laneq_f32_0(<4 x float> %a, <4 x float> %v) {
+; CHECK: test_vmulxq_laneq_f32_0:
+; CHECK: mulx {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[0]
+entry:
+ %shuffle = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
+ %vmulx2.i = tail call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %a, <4 x float> %shuffle)
+ ret <4 x float> %vmulx2.i
+}
+
+define <2 x double> @test_vmulxq_laneq_f64_0(<2 x double> %a, <2 x double> %v) {
+; CHECK: test_vmulxq_laneq_f64_0:
+; CHECK: mulx {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+entry:
+ %shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer
+ %vmulx2.i = tail call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %a, <2 x double> %shuffle)
+ ret <2 x double> %vmulx2.i
+}
+
diff --git a/test/CodeGen/AArch64/neon-3vdiff.ll b/test/CodeGen/AArch64/neon-3vdiff.ll
new file mode 100644
index 000000000000..171e2b2edad0
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-3vdiff.ll
@@ -0,0 +1,1806 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>)
+
+declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>)
+
+declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>)
+
+declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>)
+
+declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>)
+
+declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>)
+
+declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>)
+
+declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>)
+
+declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>)
+
+declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>)
+
+declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>)
+
+declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>)
+
+declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>)
+
+declare <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64>, <2 x i64>)
+
+declare <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32>, <4 x i32>)
+
+declare <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16>, <8 x i16>)
+
+declare <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64>, <2 x i64>)
+
+declare <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32>, <4 x i32>)
+
+declare <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_vaddl_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vaddl_s8:
+; CHECK: saddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vaddl_s16:
+; CHECK: saddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vaddl_s32:
+; CHECK: saddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddl_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vaddl_u8:
+; CHECK: uaddl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vaddl_u16:
+; CHECK: uaddl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = zext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vaddl_u32:
+; CHECK: uaddl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = zext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddl_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vaddl_high_s8:
+; CHECK: saddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %1
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vaddl_high_s16:
+; CHECK: saddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %1
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vaddl_high_s32:
+; CHECK: saddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %1
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddl_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vaddl_high_u8:
+; CHECK: uaddl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %1
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddl_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vaddl_high_u16:
+; CHECK: uaddl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %1
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddl_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vaddl_high_u32:
+; CHECK: uaddl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %1
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_s8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK: test_vaddw_s8:
+; CHECK: saddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_s16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK: test_vaddw_s16:
+; CHECK: saddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_s32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK: test_vaddw_s32:
+; CHECK: saddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_u8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK: test_vaddw_u8:
+; CHECK: uaddw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_u16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK: test_vaddw_u16:
+; CHECK: uaddw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_u32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK: test_vaddw_u32:
+; CHECK: uaddw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_high_s8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK: test_vaddw_high_s8:
+; CHECK: saddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_high_s16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK: test_vaddw_high_s16:
+; CHECK: saddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_high_s32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK: test_vaddw_high_s32:
+; CHECK: saddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vaddw_high_u8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK: test_vaddw_high_u8:
+; CHECK: uaddw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %0, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vaddw_high_u16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK: test_vaddw_high_u16:
+; CHECK: uaddw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %0, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vaddw_high_u32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK: test_vaddw_high_u32:
+; CHECK: uaddw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %0, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vsubl_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsubl_s8:
+; CHECK: ssubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = sext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsubl_s16:
+; CHECK: ssubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = sext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsubl_s32:
+; CHECK: ssubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = sext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubl_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsubl_u8:
+; CHECK: usubl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
+ %vmovl.i2.i = zext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %vmovl.i.i, %vmovl.i2.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsubl_u16:
+; CHECK: usubl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
+ %vmovl.i2.i = zext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %vmovl.i.i, %vmovl.i2.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsubl_u32:
+; CHECK: usubl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
+ %vmovl.i2.i = zext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %vmovl.i.i, %vmovl.i2.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubl_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsubl_high_s8:
+; CHECK: ssubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = sext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %sub.i = sub <8 x i16> %0, %1
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsubl_high_s16:
+; CHECK: ssubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = sext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %sub.i = sub <4 x i32> %0, %1
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsubl_high_s32:
+; CHECK: ssubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = sext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %sub.i = sub <2 x i64> %0, %1
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubl_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsubl_high_u8:
+; CHECK: usubl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %shuffle.i.i2.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = zext <8 x i8> %shuffle.i.i2.i to <8 x i16>
+ %sub.i = sub <8 x i16> %0, %1
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubl_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsubl_high_u16:
+; CHECK: usubl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %shuffle.i.i2.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = zext <4 x i16> %shuffle.i.i2.i to <4 x i32>
+ %sub.i = sub <4 x i32> %0, %1
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubl_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsubl_high_u32:
+; CHECK: usubl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %shuffle.i.i2.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = zext <2 x i32> %shuffle.i.i2.i to <2 x i64>
+ %sub.i = sub <2 x i64> %0, %1
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_s8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK: test_vsubw_s8:
+; CHECK: ssubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = sext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %vmovl.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_s16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK: test_vsubw_s16:
+; CHECK: ssubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = sext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %vmovl.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_s32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK: test_vsubw_s32:
+; CHECK: ssubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = sext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %vmovl.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_u8(<8 x i16> %a, <8 x i8> %b) {
+; CHECK: test_vsubw_u8:
+; CHECK: usubw {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8b
+entry:
+ %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %vmovl.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_u16(<4 x i32> %a, <4 x i16> %b) {
+; CHECK: test_vsubw_u16:
+; CHECK: usubw {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4h
+entry:
+ %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %vmovl.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_u32(<2 x i64> %a, <2 x i32> %b) {
+; CHECK: test_vsubw_u32:
+; CHECK: usubw {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2s
+entry:
+ %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %vmovl.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_high_s8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK: test_vsubw_high_s8:
+; CHECK: ssubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = sext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %0
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_high_s16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK: test_vsubw_high_s16:
+; CHECK: ssubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = sext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %0
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_high_s32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK: test_vsubw_high_s32:
+; CHECK: ssubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = sext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %0
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vsubw_high_u8(<8 x i16> %a, <16 x i8> %b) {
+; CHECK: test_vsubw_high_u8:
+; CHECK: usubw2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %0 = zext <8 x i8> %shuffle.i.i.i to <8 x i16>
+ %sub.i = sub <8 x i16> %a, %0
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vsubw_high_u16(<4 x i32> %a, <8 x i16> %b) {
+; CHECK: test_vsubw_high_u16:
+; CHECK: usubw2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %0 = zext <4 x i16> %shuffle.i.i.i to <4 x i32>
+ %sub.i = sub <4 x i32> %a, %0
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vsubw_high_u32(<2 x i64> %a, <4 x i32> %b) {
+; CHECK: test_vsubw_high_u32:
+; CHECK: usubw2 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %0 = zext <2 x i32> %shuffle.i.i.i to <2 x i64>
+ %sub.i = sub <2 x i64> %a, %0
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i8> @test_vaddhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vaddhn_s16:
+; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i = add <8 x i16> %a, %b
+ %vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8>
+ ret <8 x i8> %vaddhn2.i
+}
+
+define <4 x i16> @test_vaddhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vaddhn_s32:
+; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i = add <4 x i32> %a, %b
+ %vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16>
+ ret <4 x i16> %vaddhn2.i
+}
+
+define <2 x i32> @test_vaddhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vaddhn_s64:
+; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i = add <2 x i64> %a, %b
+ %vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
+ %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32>
+ ret <2 x i32> %vaddhn2.i
+}
+
+define <8 x i8> @test_vaddhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vaddhn_u16:
+; CHECK: addhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i = add <8 x i16> %a, %b
+ %vaddhn1.i = lshr <8 x i16> %vaddhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i = trunc <8 x i16> %vaddhn1.i to <8 x i8>
+ ret <8 x i8> %vaddhn2.i
+}
+
+define <4 x i16> @test_vaddhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vaddhn_u32:
+; CHECK: addhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i = add <4 x i32> %a, %b
+ %vaddhn1.i = lshr <4 x i32> %vaddhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i = trunc <4 x i32> %vaddhn1.i to <4 x i16>
+ ret <4 x i16> %vaddhn2.i
+}
+
+define <2 x i32> @test_vaddhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vaddhn_u64:
+; CHECK: addhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i = add <2 x i64> %a, %b
+ %vaddhn1.i = lshr <2 x i64> %vaddhn.i, <i64 32, i64 32>
+ %vaddhn2.i = trunc <2 x i64> %vaddhn1.i to <2 x i32>
+ ret <2 x i32> %vaddhn2.i
+}
+
+define <16 x i8> @test_vaddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vaddhn_high_s16:
+; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i.i = add <8 x i16> %a, %b
+ %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vaddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vaddhn_high_s32:
+; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i.i = add <4 x i32> %a, %b
+ %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vaddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vaddhn_high_s64:
+; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i.i = add <2 x i64> %a, %b
+ %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, <i64 32, i64 32>
+ %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vaddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vaddhn_high_u16:
+; CHECK: addhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vaddhn.i.i = add <8 x i16> %a, %b
+ %vaddhn1.i.i = lshr <8 x i16> %vaddhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vaddhn2.i.i = trunc <8 x i16> %vaddhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vaddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vaddhn_high_u32:
+; CHECK: addhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vaddhn.i.i = add <4 x i32> %a, %b
+ %vaddhn1.i.i = lshr <4 x i32> %vaddhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vaddhn2.i.i = trunc <4 x i32> %vaddhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vaddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vaddhn_high_u64:
+; CHECK: addhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vaddhn.i.i = add <2 x i64> %a, %b
+ %vaddhn1.i.i = lshr <2 x i64> %vaddhn.i.i, <i64 32, i64 32>
+ %vaddhn2.i.i = trunc <2 x i64> %vaddhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vaddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i8> @test_vraddhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vraddhn_s16:
+; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vraddhn2.i
+}
+
+define <4 x i16> @test_vraddhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vraddhn_s32:
+; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vraddhn2.i
+}
+
+define <2 x i32> @test_vraddhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vraddhn_s64:
+; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vraddhn2.i
+}
+
+define <8 x i8> @test_vraddhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vraddhn_u16:
+; CHECK: raddhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vraddhn2.i
+}
+
+define <4 x i16> @test_vraddhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vraddhn_u32:
+; CHECK: raddhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vraddhn2.i
+}
+
+define <2 x i32> @test_vraddhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vraddhn_u64:
+; CHECK: raddhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vraddhn2.i
+}
+
+define <16 x i8> @test_vraddhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vraddhn_high_s16:
+; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vraddhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vraddhn_high_s32:
+; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vraddhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vraddhn_high_s64:
+; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vraddhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vraddhn_high_u16:
+; CHECK: raddhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vraddhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vraddhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vraddhn_high_u32:
+; CHECK: raddhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vraddhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vraddhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vraddhn_high_u64:
+; CHECK: raddhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vraddhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vraddhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i8> @test_vsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsubhn_s16:
+; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i = sub <8 x i16> %a, %b
+ %vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8>
+ ret <8 x i8> %vsubhn2.i
+}
+
+define <4 x i16> @test_vsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsubhn_s32:
+; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i = sub <4 x i32> %a, %b
+ %vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16>
+ ret <4 x i16> %vsubhn2.i
+}
+
+define <2 x i32> @test_vsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsubhn_s64:
+; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i = sub <2 x i64> %a, %b
+ %vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
+ %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32>
+ ret <2 x i32> %vsubhn2.i
+}
+
+define <8 x i8> @test_vsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsubhn_u16:
+; CHECK: subhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i = sub <8 x i16> %a, %b
+ %vsubhn1.i = lshr <8 x i16> %vsubhn.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i = trunc <8 x i16> %vsubhn1.i to <8 x i8>
+ ret <8 x i8> %vsubhn2.i
+}
+
+define <4 x i16> @test_vsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsubhn_u32:
+; CHECK: subhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i = sub <4 x i32> %a, %b
+ %vsubhn1.i = lshr <4 x i32> %vsubhn.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i = trunc <4 x i32> %vsubhn1.i to <4 x i16>
+ ret <4 x i16> %vsubhn2.i
+}
+
+define <2 x i32> @test_vsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsubhn_u64:
+; CHECK: subhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i = sub <2 x i64> %a, %b
+ %vsubhn1.i = lshr <2 x i64> %vsubhn.i, <i64 32, i64 32>
+ %vsubhn2.i = trunc <2 x i64> %vsubhn1.i to <2 x i32>
+ ret <2 x i32> %vsubhn2.i
+}
+
+define <16 x i8> @test_vsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsubhn_high_s16:
+; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i.i = sub <8 x i16> %a, %b
+ %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsubhn_high_s32:
+; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i.i = sub <4 x i32> %a, %b
+ %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsubhn_high_s64:
+; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i.i = sub <2 x i64> %a, %b
+ %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, <i64 32, i64 32>
+ %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsubhn_high_u16:
+; CHECK: subhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vsubhn.i.i = sub <8 x i16> %a, %b
+ %vsubhn1.i.i = lshr <8 x i16> %vsubhn.i.i, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %vsubhn2.i.i = trunc <8 x i16> %vsubhn1.i.i to <8 x i8>
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsubhn_high_u32:
+; CHECK: subhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vsubhn.i.i = sub <4 x i32> %a, %b
+ %vsubhn1.i.i = lshr <4 x i32> %vsubhn.i.i, <i32 16, i32 16, i32 16, i32 16>
+ %vsubhn2.i.i = trunc <4 x i32> %vsubhn1.i.i to <4 x i16>
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsubhn_high_u64:
+; CHECK: subhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vsubhn.i.i = sub <2 x i64> %a, %b
+ %vsubhn1.i.i = lshr <2 x i64> %vsubhn.i.i, <i64 32, i64 32>
+ %vsubhn2.i.i = trunc <2 x i64> %vsubhn1.i.i to <2 x i32>
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i8> @test_vrsubhn_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vrsubhn_s16:
+; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vrsubhn2.i
+}
+
+define <4 x i16> @test_vrsubhn_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vrsubhn_s32:
+; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vrsubhn2.i
+}
+
+define <2 x i32> @test_vrsubhn_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vrsubhn_s64:
+; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vrsubhn2.i
+}
+
+define <8 x i8> @test_vrsubhn_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vrsubhn_u16:
+; CHECK: rsubhn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ ret <8 x i8> %vrsubhn2.i
+}
+
+define <4 x i16> @test_vrsubhn_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vrsubhn_u32:
+; CHECK: rsubhn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ ret <4 x i16> %vrsubhn2.i
+}
+
+define <2 x i32> @test_vrsubhn_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vrsubhn_u64:
+; CHECK: rsubhn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i32> %vrsubhn2.i
+}
+
+define <16 x i8> @test_vrsubhn_high_s16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vrsubhn_high_s16:
+; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vrsubhn_high_s32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vrsubhn_high_s32:
+; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vrsubhn_high_s64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vrsubhn_high_s64:
+; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <16 x i8> @test_vrsubhn_high_u16(<8 x i8> %r, <8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vrsubhn_high_u16:
+; CHECK: rsubhn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vrsubhn2.i.i = tail call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %a, <8 x i16> %b)
+ %0 = bitcast <8 x i8> %r to <1 x i64>
+ %1 = bitcast <8 x i8> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <16 x i8>
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vrsubhn_high_u32(<4 x i16> %r, <4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vrsubhn_high_u32:
+; CHECK: rsubhn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vrsubhn2.i.i = tail call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %a, <4 x i32> %b)
+ %0 = bitcast <4 x i16> %r to <1 x i64>
+ %1 = bitcast <4 x i16> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <8 x i16>
+ ret <8 x i16> %2
+}
+
+define <4 x i32> @test_vrsubhn_high_u64(<2 x i32> %r, <2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vrsubhn_high_u64:
+; CHECK: rsubhn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+entry:
+ %vrsubhn2.i.i = tail call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %a, <2 x i64> %b)
+ %0 = bitcast <2 x i32> %r to <1 x i64>
+ %1 = bitcast <2 x i32> %vrsubhn2.i.i to <1 x i64>
+ %shuffle.i.i = shufflevector <1 x i64> %0, <1 x i64> %1, <2 x i32> <i32 0, i32 1>
+ %2 = bitcast <2 x i64> %shuffle.i.i to <4 x i32>
+ ret <4 x i32> %2
+}
+
+define <8 x i16> @test_vabdl_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vabdl_s8:
+; CHECK: sabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %a, <8 x i8> %b)
+ %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i
+}
+
+define <4 x i32> @test_vabdl_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vabdl_s16:
+; CHECK: sabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %a, <4 x i16> %b)
+ %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i
+}
+
+define <2 x i64> @test_vabdl_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vabdl_s32:
+; CHECK: sabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %a, <2 x i32> %b)
+ %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i
+}
+
+define <8 x i16> @test_vabdl_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vabdl_u8:
+; CHECK: uabdl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %a, <8 x i8> %b)
+ %vmovl.i.i = zext <8 x i8> %vabd.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i
+}
+
+define <4 x i32> @test_vabdl_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vabdl_u16:
+; CHECK: uabdl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %a, <4 x i16> %b)
+ %vmovl.i.i = zext <4 x i16> %vabd2.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i
+}
+
+define <2 x i64> @test_vabdl_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vabdl_u32:
+; CHECK: uabdl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %a, <2 x i32> %b)
+ %vmovl.i.i = zext <2 x i32> %vabd2.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i
+}
+
+define <8 x i16> @test_vabal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vabal_s8:
+; CHECK: sabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %b, <8 x i8> %c)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vabal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vabal_s16:
+; CHECK: sabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %b, <4 x i16> %c)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vabal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vabal_s32:
+; CHECK: sabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %b, <2 x i32> %c)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vabal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vabal_u8:
+; CHECK: uabal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %b, <8 x i8> %c)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ %add.i = add <8 x i16> %vmovl.i.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vabal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vabal_u16:
+; CHECK: uabal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %b, <4 x i16> %c)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ %add.i = add <4 x i32> %vmovl.i.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vabal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vabal_u32:
+; CHECK: uabal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %b, <2 x i32> %c)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ %add.i = add <2 x i64> %vmovl.i.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vabdl_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vabdl_high_s8:
+; CHECK: sabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i.i
+}
+
+define <4 x i32> @test_vabdl_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vabdl_high_s16:
+; CHECK: sabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i.i
+}
+
+define <2 x i64> @test_vabdl_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vabdl_high_s32:
+; CHECK: sabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i.i
+}
+
+define <8 x i16> @test_vabdl_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vabdl_high_u8:
+; CHECK: uabdl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <8 x i8> %vabd.i.i.i to <8 x i16>
+ ret <8 x i16> %vmovl.i.i.i
+}
+
+define <4 x i32> @test_vabdl_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vabdl_high_u16:
+; CHECK: uabdl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <4 x i16> %vabd2.i.i.i to <4 x i32>
+ ret <4 x i32> %vmovl.i.i.i
+}
+
+define <2 x i64> @test_vabdl_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vabdl_high_u32:
+; CHECK: uabdl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i = zext <2 x i32> %vabd2.i.i.i to <2 x i64>
+ ret <2 x i64> %vmovl.i.i.i
+}
+
+define <8 x i16> @test_vabal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vabal_high_s8:
+; CHECK: sabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16>
+ %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vabal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vabal_high_s16:
+; CHECK: sabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32>
+ %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vabal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vabal_high_s32:
+; CHECK: sabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64>
+ %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vabal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vabal_high_u8:
+; CHECK: uabal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vabd.i.i.i.i = tail call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <8 x i8> %vabd.i.i.i.i to <8 x i16>
+ %add.i.i = add <8 x i16> %vmovl.i.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vabal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vabal_high_u16:
+; CHECK: uabal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vabd2.i.i.i.i = tail call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <4 x i16> %vabd2.i.i.i.i to <4 x i32>
+ %add.i.i = add <4 x i32> %vmovl.i.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vabal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vabal_high_u32:
+; CHECK: uabal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vabd2.i.i.i.i = tail call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vmovl.i.i.i.i = zext <2 x i32> %vabd2.i.i.i.i to <2 x i64>
+ %add.i.i = add <2 x i64> %vmovl.i.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vmull_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vmull_s8:
+; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %a, <8 x i8> %b)
+ ret <8 x i16> %vmull.i
+}
+
+define <4 x i32> @test_vmull_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vmull_s16:
+; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %a, <4 x i16> %b)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vmull_s32:
+; CHECK: smull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i64> %vmull2.i
+}
+
+define <8 x i16> @test_vmull_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vmull_u8:
+; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %a, <8 x i8> %b)
+ ret <8 x i16> %vmull.i
+}
+
+define <4 x i32> @test_vmull_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vmull_u16:
+; CHECK: umull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %a, <4 x i16> %b)
+ ret <4 x i32> %vmull2.i
+}
+
+define <2 x i64> @test_vmull_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vmull_u32:
+; CHECK: umull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i64> %vmull2.i
+}
+
+define <8 x i16> @test_vmull_high_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vmull_high_s8:
+; CHECK: smull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ ret <8 x i16> %vmull.i.i
+}
+
+define <4 x i32> @test_vmull_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vmull_high_s16:
+; CHECK: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @test_vmull_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vmull_high_s32:
+; CHECK: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <8 x i16> @test_vmull_high_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vmull_high_u8:
+; CHECK: umull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ ret <8 x i16> %vmull.i.i
+}
+
+define <4 x i32> @test_vmull_high_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vmull_high_u16:
+; CHECK: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ ret <4 x i32> %vmull2.i.i
+}
+
+define <2 x i64> @test_vmull_high_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vmull_high_u32:
+; CHECK: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ ret <2 x i64> %vmull2.i.i
+}
+
+define <8 x i16> @test_vmlal_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vmlal_s8:
+; CHECK: smlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %add.i = add <8 x i16> %vmull.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vmlal_s16:
+; CHECK: smlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %add.i = add <4 x i32> %vmull2.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vmlal_s32:
+; CHECK: smlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %add.i = add <2 x i64> %vmull2.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vmlal_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vmlal_u8:
+; CHECK: umlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %add.i = add <8 x i16> %vmull.i.i, %a
+ ret <8 x i16> %add.i
+}
+
+define <4 x i32> @test_vmlal_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vmlal_u16:
+; CHECK: umlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %add.i = add <4 x i32> %vmull2.i.i, %a
+ ret <4 x i32> %add.i
+}
+
+define <2 x i64> @test_vmlal_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vmlal_u32:
+; CHECK: umlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %add.i = add <2 x i64> %vmull2.i.i, %a
+ ret <2 x i64> %add.i
+}
+
+define <8 x i16> @test_vmlal_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vmlal_high_s8:
+; CHECK: smlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %add.i.i = add <8 x i16> %vmull.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vmlal_high_s16:
+; CHECK: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vmlal_high_s32:
+; CHECK: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vmlal_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vmlal_high_u8:
+; CHECK: umlal2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %add.i.i = add <8 x i16> %vmull.i.i.i, %a
+ ret <8 x i16> %add.i.i
+}
+
+define <4 x i32> @test_vmlal_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vmlal_high_u16:
+; CHECK: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+ ret <4 x i32> %add.i.i
+}
+
+define <2 x i64> @test_vmlal_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vmlal_high_u32:
+; CHECK: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+ ret <2 x i64> %add.i.i
+}
+
+define <8 x i16> @test_vmlsl_s8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vmlsl_s8:
+; CHECK: smlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %sub.i = sub <8 x i16> %a, %vmull.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vmlsl_s16:
+; CHECK: smlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %sub.i = sub <4 x i32> %a, %vmull2.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vmlsl_s32:
+; CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %sub.i = sub <2 x i64> %a, %vmull2.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vmlsl_u8(<8 x i16> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vmlsl_u8:
+; CHECK: umlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %b, <8 x i8> %c)
+ %sub.i = sub <8 x i16> %a, %vmull.i.i
+ ret <8 x i16> %sub.i
+}
+
+define <4 x i32> @test_vmlsl_u16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vmlsl_u16:
+; CHECK: umlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %sub.i = sub <4 x i32> %a, %vmull2.i.i
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vmlsl_u32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vmlsl_u32:
+; CHECK: umlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %sub.i = sub <2 x i64> %a, %vmull2.i.i
+ ret <2 x i64> %sub.i
+}
+
+define <8 x i16> @test_vmlsl_high_s8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vmlsl_high_s8:
+; CHECK: smlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i
+ ret <8 x i16> %sub.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vmlsl_high_s16:
+; CHECK: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vmlsl_high_s32:
+; CHECK: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <8 x i16> @test_vmlsl_high_u8(<8 x i16> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vmlsl_high_u8:
+; CHECK: umlsl2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %c, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ %sub.i.i = sub <8 x i16> %a, %vmull.i.i.i
+ ret <8 x i16> %sub.i.i
+}
+
+define <4 x i32> @test_vmlsl_high_u16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vmlsl_high_u16:
+; CHECK: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vmull2.i.i.i = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+ ret <4 x i32> %sub.i.i
+}
+
+define <2 x i64> @test_vmlsl_high_u32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vmlsl_high_u32:
+; CHECK: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vmull2.i.i.i = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+ ret <2 x i64> %sub.i.i
+}
+
+define <4 x i32> @test_vqdmull_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vqdmull_s16:
+; CHECK: sqdmull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vqdmull2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %a, <4 x i16> %b)
+ ret <4 x i32> %vqdmull2.i
+}
+
+define <2 x i64> @test_vqdmull_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vqdmull_s32:
+; CHECK: sqdmull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vqdmull2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i64> %vqdmull2.i
+}
+
+define <4 x i32> @test_vqdmlal_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vqdmlal_s16:
+; CHECK: sqdmlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vqdmlal2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %vqdmlal4.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i)
+ ret <4 x i32> %vqdmlal4.i
+}
+
+define <2 x i64> @test_vqdmlal_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vqdmlal_s32:
+; CHECK: sqdmlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vqdmlal2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %vqdmlal4.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i)
+ ret <2 x i64> %vqdmlal4.i
+}
+
+define <4 x i32> @test_vqdmlsl_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %c) {
+; CHECK: test_vqdmlsl_s16:
+; CHECK: sqdmlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vqdmlsl2.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %b, <4 x i16> %c)
+ %vqdmlsl4.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i)
+ ret <4 x i32> %vqdmlsl4.i
+}
+
+define <2 x i64> @test_vqdmlsl_s32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) {
+; CHECK: test_vqdmlsl_s32:
+; CHECK: sqdmlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %vqdmlsl2.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %b, <2 x i32> %c)
+ %vqdmlsl4.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i)
+ ret <2 x i64> %vqdmlsl4.i
+}
+
+define <4 x i32> @test_vqdmull_high_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vqdmull_high_s16:
+; CHECK: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmull2.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ ret <4 x i32> %vqdmull2.i.i
+}
+
+define <2 x i64> @test_vqdmull_high_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vqdmull_high_s32:
+; CHECK: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vqdmull2.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ ret <2 x i64> %vqdmull2.i.i
+}
+
+define <4 x i32> @test_vqdmlal_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vqdmlal_high_s16:
+; CHECK: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmlal2.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vqdmlal4.i.i = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal2.i.i)
+ ret <4 x i32> %vqdmlal4.i.i
+}
+
+define <2 x i64> @test_vqdmlal_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vqdmlal_high_s32:
+; CHECK: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vqdmlal2.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vqdmlal4.i.i = tail call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal2.i.i)
+ ret <2 x i64> %vqdmlal4.i.i
+}
+
+define <4 x i32> @test_vqdmlsl_high_s16(<4 x i32> %a, <8 x i16> %b, <8 x i16> %c) {
+; CHECK: test_vqdmlsl_high_s16:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i3.i = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmlsl2.i.i = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+ %vqdmlsl4.i.i = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl2.i.i)
+ ret <4 x i32> %vqdmlsl4.i.i
+}
+
+define <2 x i64> @test_vqdmlsl_high_s32(<2 x i64> %a, <4 x i32> %b, <4 x i32> %c) {
+; CHECK: test_vqdmlsl_high_s32:
+; CHECK: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %shuffle.i3.i = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %vqdmlsl2.i.i = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> %shuffle.i3.i)
+ %vqdmlsl4.i.i = tail call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl2.i.i)
+ ret <2 x i64> %vqdmlsl4.i.i
+}
+
+define <8 x i16> @test_vmull_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vmull_p8:
+; CHECK: pmull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %a, <8 x i8> %b)
+ ret <8 x i16> %vmull.i
+}
+
+define <8 x i16> @test_vmull_high_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vmull_high_p8:
+; CHECK: pmull2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %shuffle.i3.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vmull.i.i = tail call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %shuffle.i.i, <8 x i8> %shuffle.i3.i)
+ ret <8 x i16> %vmull.i.i
+}
+
diff --git a/test/CodeGen/AArch64/neon-aba-abd.ll b/test/CodeGen/AArch64/neon-aba-abd.ll
new file mode 100644
index 000000000000..54009849ef60
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-aba-abd.ll
@@ -0,0 +1,236 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uabd_v8i8:
+ %abd = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uabd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %abd
+}
+
+define <8 x i8> @test_uaba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uaba_v8i8:
+ %abd = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+ %aba = add <8 x i8> %lhs, %abd
+; CHECK: uaba v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %aba
+}
+
+define <8 x i8> @test_sabd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sabd_v8i8:
+ %abd = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sabd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %abd
+}
+
+define <8 x i8> @test_saba_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_saba_v8i8:
+ %abd = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+ %aba = add <8 x i8> %lhs, %abd
+; CHECK: saba v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %aba
+}
+
+declare <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uabd_v16i8:
+ %abd = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uabd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %abd
+}
+
+define <16 x i8> @test_uaba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uaba_v16i8:
+ %abd = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+ %aba = add <16 x i8> %lhs, %abd
+; CHECK: uaba v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %aba
+}
+
+define <16 x i8> @test_sabd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sabd_v16i8:
+ %abd = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sabd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %abd
+}
+
+define <16 x i8> @test_saba_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_saba_v16i8:
+ %abd = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+ %aba = add <16 x i8> %lhs, %abd
+; CHECK: saba v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %aba
+}
+
+declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uabd_v4i16:
+ %abd = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uabd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %abd
+}
+
+define <4 x i16> @test_uaba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uaba_v4i16:
+ %abd = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %aba = add <4 x i16> %lhs, %abd
+; CHECK: uaba v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %aba
+}
+
+define <4 x i16> @test_sabd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sabd_v4i16:
+ %abd = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sabd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %abd
+}
+
+define <4 x i16> @test_saba_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_saba_v4i16:
+ %abd = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %aba = add <4 x i16> %lhs, %abd
+; CHECK: saba v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %aba
+}
+
+declare <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uabd_v8i16:
+ %abd = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uabd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %abd
+}
+
+define <8 x i16> @test_uaba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uaba_v8i16:
+ %abd = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %aba = add <8 x i16> %lhs, %abd
+; CHECK: uaba v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %aba
+}
+
+define <8 x i16> @test_sabd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sabd_v8i16:
+ %abd = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sabd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %abd
+}
+
+define <8 x i16> @test_saba_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_saba_v8i16:
+ %abd = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %aba = add <8 x i16> %lhs, %abd
+; CHECK: saba v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %aba
+}
+
+declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uabd_v2i32:
+ %abd = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uabd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %abd
+}
+
+define <2 x i32> @test_uaba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uaba_v2i32:
+ %abd = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ %aba = add <2 x i32> %lhs, %abd
+; CHECK: uaba v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %aba
+}
+
+define <2 x i32> @test_sabd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sabd_v2i32:
+ %abd = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sabd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %abd
+}
+
+define <2 x i32> @test_sabd_v2i32_const() {
+; CHECK: test_sabd_v2i32_const:
+; CHECK: movi d1, #0xffffffff0000
+; CHECK-NEXT: sabd v0.2s, v0.2s, v1.2s
+ %1 = tail call <2 x i32> @llvm.arm.neon.vabds.v2i32(
+ <2 x i32> <i32 -2147483648, i32 2147450880>,
+ <2 x i32> <i32 -65536, i32 65535>)
+ ret <2 x i32> %1
+}
+
+define <2 x i32> @test_saba_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_saba_v2i32:
+ %abd = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ %aba = add <2 x i32> %lhs, %abd
+; CHECK: saba v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %aba
+}
+
+declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uabd_v4i32:
+ %abd = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uabd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %abd
+}
+
+define <4 x i32> @test_uaba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uaba_v4i32:
+ %abd = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ %aba = add <4 x i32> %lhs, %abd
+; CHECK: uaba v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %aba
+}
+
+define <4 x i32> @test_sabd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sabd_v4i32:
+ %abd = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sabd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %abd
+}
+
+define <4 x i32> @test_saba_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_saba_v4i32:
+ %abd = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ %aba = add <4 x i32> %lhs, %abd
+; CHECK: saba v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %aba
+}
+
+declare <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float>, <2 x float>)
+
+define <2 x float> @test_fabd_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fabd_v2f32:
+ %abd = call <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fabd v0.2s, v0.2s, v1.2s
+ ret <2 x float> %abd
+}
+
+declare <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float>, <4 x float>)
+
+define <4 x float> @test_fabd_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fabd_v4f32:
+ %abd = call <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fabd v0.4s, v0.4s, v1.4s
+ ret <4 x float> %abd
+}
+
+declare <2 x double> @llvm.arm.neon.vabds.v2f64(<2 x double>, <2 x double>)
+
+define <2 x double> @test_fabd_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fabd_v2f64:
+ %abd = call <2 x double> @llvm.arm.neon.vabds.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fabd v0.2d, v0.2d, v1.2d
+ ret <2 x double> %abd
+}
diff --git a/test/CodeGen/AArch64/neon-across.ll b/test/CodeGen/AArch64/neon-across.ll
new file mode 100644
index 000000000000..733db970cf33
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-across.ll
@@ -0,0 +1,476 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float>)
+
+declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32>)
+
+declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8>)
+
+declare <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32>)
+
+declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8>)
+
+define i16 @test_vaddlv_s8(<8 x i8> %a) {
+; CHECK: test_vaddlv_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i16> %saddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_s16(<4 x i16> %a) {
+; CHECK: test_vaddlv_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i32> %saddlv.i, i32 0
+ ret i32 %0
+}
+
+define i16 @test_vaddlv_u8(<8 x i8> %a) {
+; CHECK: test_vaddlv_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i16> %uaddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_u16(<4 x i16> %a) {
+; CHECK: test_vaddlv_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i32> %uaddlv.i, i32 0
+ ret i32 %0
+}
+
+define i16 @test_vaddlvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i16> %saddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i32> %saddlv.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vaddlvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_s32:
+; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %saddlv.i = tail call <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i64> %saddlv.i, i32 0
+ ret i64 %0
+}
+
+define i16 @test_vaddlvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i16> %uaddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i32> %uaddlv.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vaddlvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_u32:
+; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uaddlv.i = tail call <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i64> %uaddlv.i, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vmaxv_s8(<8 x i8> %a) {
+; CHECK: test_vmaxv_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %smaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_s16(<4 x i16> %a) {
+; CHECK: test_vmaxv_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %smaxv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vmaxv_u8(<8 x i8> %a) {
+; CHECK: test_vmaxv_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %umaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_u16(<4 x i16> %a) {
+; CHECK: test_vmaxv_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %umaxv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vmaxvq_s8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %smaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_s16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %smaxv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_s32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_s32:
+; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %smaxv.i = tail call <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %smaxv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vmaxvq_u8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %umaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_u16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %umaxv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_u32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_u32:
+; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %umaxv.i = tail call <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %umaxv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vminv_s8(<8 x i8> %a) {
+; CHECK: test_vminv_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %sminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminv_s16(<4 x i16> %a) {
+; CHECK: test_vminv_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %sminv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vminv_u8(<8 x i8> %a) {
+; CHECK: test_vminv_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %uminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminv_u16(<4 x i16> %a) {
+; CHECK: test_vminv_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %uminv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vminvq_s8(<16 x i8> %a) {
+; CHECK: test_vminvq_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %sminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminvq_s16(<8 x i16> %a) {
+; CHECK: test_vminvq_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %sminv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vminvq_s32(<4 x i32> %a) {
+; CHECK: test_vminvq_s32:
+; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sminv.i = tail call <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %sminv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vminvq_u8(<16 x i8> %a) {
+; CHECK: test_vminvq_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %uminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminvq_u16(<8 x i16> %a) {
+; CHECK: test_vminvq_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %uminv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vminvq_u32(<4 x i32> %a) {
+; CHECK: test_vminvq_u32:
+; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uminv.i = tail call <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %uminv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vaddv_s8(<8 x i8> %a) {
+; CHECK: test_vaddv_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddv_s16(<4 x i16> %a) {
+; CHECK: test_vaddv_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vaddv_u8(<8 x i8> %a) {
+; CHECK: test_vaddv_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddv_u16(<4 x i16> %a) {
+; CHECK: test_vaddv_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vaddvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddvq_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddvq_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddvq_s32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %vaddv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vaddvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddvq_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddvq_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddvq_u32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %vaddv.i, i32 0
+ ret i32 %0
+}
+
+define float @test_vmaxvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxvq_f32:
+; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vmaxv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vmaxv.i, i32 0
+ ret float %0
+}
+
+define float @test_vminvq_f32(<4 x float> %a) {
+; CHECK: test_vminvq_f32:
+; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vminv.i = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vminv.i, i32 0
+ ret float %0
+}
+
+define float @test_vmaxnmvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxnmvq_f32:
+; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vmaxnmv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vmaxnmv.i, i32 0
+ ret float %0
+}
+
+define float @test_vminnmvq_f32(<4 x float> %a) {
+; CHECK: test_vminnmvq_f32:
+; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vminnmv.i = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vminnmv.i, i32 0
+ ret float %0
+}
+
diff --git a/test/CodeGen/AArch64/neon-add-pairwise.ll b/test/CodeGen/AArch64/neon-add-pairwise.ll
new file mode 100644
index 000000000000..1abfed31908c
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-add-pairwise.ll
@@ -0,0 +1,92 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_addp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: test_addp_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: addp v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vpadd.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_addp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_addp_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vpadd.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: addp v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_addp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_addp_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: addp v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vpadd.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_addp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_addp_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vpadd.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: addp v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_addp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_addp_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: addp v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vpadd.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_addp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_addp_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vpadd.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: addp v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+
+declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_addp_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_addp_v2i64:
+ %val = call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: addp v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %val
+}
+
+declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vpadd.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vpadd.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_faddp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_faddp_v2f32:
+ %val = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: faddp v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_faddp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_faddp_v4f32:
+ %val = call <4 x float> @llvm.arm.neon.vpadd.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: faddp v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_faddp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_faddp_v2f64:
+ %val = call <2 x double> @llvm.arm.neon.vpadd.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: faddp v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
diff --git a/test/CodeGen/AArch64/neon-add-sub.ll b/test/CodeGen/AArch64/neon-add-sub.ll
new file mode 100644
index 000000000000..078ba14bd87a
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-add-sub.ll
@@ -0,0 +1,237 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: add {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp3 = add <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: add {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp3 = add <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: add {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+ %tmp3 = add <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: add {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+ %tmp3 = add <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: add {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = add <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: add {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = add <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: add {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp3 = add <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fadd {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = fadd <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fadd {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = fadd <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: add {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp3 = fadd <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: sub {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp3 = sub <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: sub {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp3 = sub <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: sub {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+ %tmp3 = sub <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: sub {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+ %tmp3 = sub <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: sub {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = sub <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: sub {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = sub <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: sub {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp3 = sub <2 x i64> %A, %B;
+ ret <2 x i64> %tmp3
+}
+
+define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fsub {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = fsub <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fsub {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = fsub <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: sub {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp3 = fsub <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vadd_f64
+; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fadd <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmul_f64
+; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fmul <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vdiv_f64
+; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fdiv <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vmla_f64
+; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fmul <1 x double> %b, %c
+ %2 = fadd <1 x double> %1, %a
+ ret <1 x double> %2
+}
+
+define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vmls_f64
+; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fmul <1 x double> %b, %c
+ %2 = fsub <1 x double> %a, %1
+ ret <1 x double> %2
+}
+
+define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vfms_f64
+; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fsub <1 x double> <double -0.000000e+00>, %b
+ %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
+ ret <1 x double> %2
+}
+
+define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
+; CHECK-LABEL: test_vfma_f64
+; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vsub_f64
+; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fsub <1 x double> %a, %b
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vabd_f64
+; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vabds.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmax_f64
+; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vmaxs.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmin_f64
+; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vmins.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vmaxnm_f64
+; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vminnm_f64
+; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vminnm.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vabs_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vabs_f64
+; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vneg_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vneg_f64
+; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fsub <1 x double> <double -0.000000e+00>, %a
+ ret <1 x double> %1
+}
+
+declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
+declare <1 x double> @llvm.aarch64.neon.vminnm.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.aarch64.neon.vmaxnm.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.arm.neon.vmins.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.arm.neon.vmaxs.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.arm.neon.vabds.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-bitcast.ll b/test/CodeGen/AArch64/neon-bitcast.ll
new file mode 100644
index 000000000000..f9ec70484024
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-bitcast.ll
@@ -0,0 +1,574 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon -verify-machineinstrs < %s | FileCheck %s
+
+; From <8 x i8>
+
+define <1 x i64> @test_v8i8_to_v1i64(<8 x i8> %in) nounwind {
+; CHECK: test_v8i8_to_v1i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i8> %in to <1 x i64>
+ ret <1 x i64> %val
+}
+
+define <2 x i32> @test_v8i8_to_v2i32(<8 x i8> %in) nounwind {
+; CHECK: test_v8i8_to_v2i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i8> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x float> @test_v8i8_to_v1f32(<8 x i8> %in) nounwind{
+; CHECK: test_v8i8_to_v1f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i8> %in to <2 x float>
+ ret <2 x float> %val
+}
+
+define <4 x i16> @test_v8i8_to_v4i16(<8 x i8> %in) nounwind{
+; CHECK: test_v8i8_to_v4i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i8> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <8 x i8> @test_v8i8_to_v8i8(<8 x i8> %in) nounwind{
+; CHECK: test_v8i8_to_v8i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i8> %in to <8 x i8>
+ ret <8 x i8> %val
+}
+
+; From <4 x i16>
+
+define <1 x i64> @test_v4i16_to_v1i64(<4 x i16> %in) nounwind {
+; CHECK: test_v4i16_to_v1i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i16> %in to <1 x i64>
+ ret <1 x i64> %val
+}
+
+define <2 x i32> @test_v4i16_to_v2i32(<4 x i16> %in) nounwind {
+; CHECK: test_v4i16_to_v2i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i16> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x float> @test_v4i16_to_v1f32(<4 x i16> %in) nounwind{
+; CHECK: test_v4i16_to_v1f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i16> %in to <2 x float>
+ ret <2 x float> %val
+}
+
+define <4 x i16> @test_v4i16_to_v4i16(<4 x i16> %in) nounwind{
+; CHECK: test_v4i16_to_v4i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i16> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <8 x i8> @test_v4i16_to_v8i8(<4 x i16> %in) nounwind{
+; CHECK: test_v4i16_to_v8i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i16> %in to <8 x i8>
+ ret <8 x i8> %val
+}
+
+; From <2 x i32>
+
+define <1 x i64> @test_v2i32_to_v1i64(<2 x i32> %in) nounwind {
+; CHECK: test_v2i32_to_v1i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i32> %in to <1 x i64>
+ ret <1 x i64> %val
+}
+
+define <2 x i32> @test_v2i32_to_v2i32(<2 x i32> %in) nounwind {
+; CHECK: test_v2i32_to_v2i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i32> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x float> @test_v2i32_to_v1f32(<2 x i32> %in) nounwind{
+; CHECK: test_v2i32_to_v1f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i32> %in to <2 x float>
+ ret <2 x float> %val
+}
+
+define <4 x i16> @test_v2i32_to_v4i16(<2 x i32> %in) nounwind{
+; CHECK: test_v2i32_to_v4i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i32> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <8 x i8> @test_v2i32_to_v8i8(<2 x i32> %in) nounwind{
+; CHECK: test_v2i32_to_v8i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i32> %in to <8 x i8>
+ ret <8 x i8> %val
+}
+
+; From <2 x float>
+
+define <1 x i64> @test_v2f32_to_v1i64(<2 x float> %in) nounwind {
+; CHECK: test_v2f32_to_v1i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x float> %in to <1 x i64>
+ ret <1 x i64> %val
+}
+
+define <2 x i32> @test_v2f32_to_v2i32(<2 x float> %in) nounwind {
+; CHECK: test_v2f32_to_v2i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x float> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x float> @test_v2f32_to_v2f32(<2 x float> %in) nounwind{
+; CHECK: test_v2f32_to_v2f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x float> %in to <2 x float>
+ ret <2 x float> %val
+}
+
+define <4 x i16> @test_v2f32_to_v4i16(<2 x float> %in) nounwind{
+; CHECK: test_v2f32_to_v4i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x float> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <8 x i8> @test_v2f32_to_v8i8(<2 x float> %in) nounwind{
+; CHECK: test_v2f32_to_v8i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x float> %in to <8 x i8>
+ ret <8 x i8> %val
+}
+
+; From <1 x i64>
+
+define <1 x i64> @test_v1i64_to_v1i64(<1 x i64> %in) nounwind {
+; CHECK: test_v1i64_to_v1i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <1 x i64> %in to <1 x i64>
+ ret <1 x i64> %val
+}
+
+define <2 x i32> @test_v1i64_to_v2i32(<1 x i64> %in) nounwind {
+; CHECK: test_v1i64_to_v2i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <1 x i64> %in to <2 x i32>
+ ret <2 x i32> %val
+}
+
+define <2 x float> @test_v1i64_to_v2f32(<1 x i64> %in) nounwind{
+; CHECK: test_v1i64_to_v2f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <1 x i64> %in to <2 x float>
+ ret <2 x float> %val
+}
+
+define <4 x i16> @test_v1i64_to_v4i16(<1 x i64> %in) nounwind{
+; CHECK: test_v1i64_to_v4i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <1 x i64> %in to <4 x i16>
+ ret <4 x i16> %val
+}
+
+define <8 x i8> @test_v1i64_to_v8i8(<1 x i64> %in) nounwind{
+; CHECK: test_v1i64_to_v8i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <1 x i64> %in to <8 x i8>
+ ret <8 x i8> %val
+}
+
+
+; From <16 x i8>
+
+define <2 x double> @test_v16i8_to_v2f64(<16 x i8> %in) nounwind {
+; CHECK: test_v16i8_to_v2f64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <16 x i8> %in to <2 x double>
+ ret <2 x double> %val
+}
+
+define <2 x i64> @test_v16i8_to_v2i64(<16 x i8> %in) nounwind {
+; CHECK: test_v16i8_to_v2i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <16 x i8> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <4 x i32> @test_v16i8_to_v4i32(<16 x i8> %in) nounwind {
+; CHECK: test_v16i8_to_v4i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <16 x i8> %in to <4 x i32>
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v16i8_to_v2f32(<16 x i8> %in) nounwind{
+; CHECK: test_v16i8_to_v2f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <16 x i8> %in to <4 x float>
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v16i8_to_v8i16(<16 x i8> %in) nounwind{
+; CHECK: test_v16i8_to_v8i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <16 x i8> %in to <8 x i16>
+ ret <8 x i16> %val
+}
+
+define <16 x i8> @test_v16i8_to_v16i8(<16 x i8> %in) nounwind{
+; CHECK: test_v16i8_to_v16i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <16 x i8> %in to <16 x i8>
+ ret <16 x i8> %val
+}
+
+; From <8 x i16>
+
+define <2 x double> @test_v8i16_to_v2f64(<8 x i16> %in) nounwind {
+; CHECK: test_v8i16_to_v2f64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i16> %in to <2 x double>
+ ret <2 x double> %val
+}
+
+define <2 x i64> @test_v8i16_to_v2i64(<8 x i16> %in) nounwind {
+; CHECK: test_v8i16_to_v2i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i16> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <4 x i32> @test_v8i16_to_v4i32(<8 x i16> %in) nounwind {
+; CHECK: test_v8i16_to_v4i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i16> %in to <4 x i32>
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v8i16_to_v2f32(<8 x i16> %in) nounwind{
+; CHECK: test_v8i16_to_v2f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i16> %in to <4 x float>
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v8i16_to_v8i16(<8 x i16> %in) nounwind{
+; CHECK: test_v8i16_to_v8i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i16> %in to <8 x i16>
+ ret <8 x i16> %val
+}
+
+define <16 x i8> @test_v8i16_to_v16i8(<8 x i16> %in) nounwind{
+; CHECK: test_v8i16_to_v16i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <8 x i16> %in to <16 x i8>
+ ret <16 x i8> %val
+}
+
+; From <4 x i32>
+
+define <2 x double> @test_v4i32_to_v2f64(<4 x i32> %in) nounwind {
+; CHECK: test_v4i32_to_v2f64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i32> %in to <2 x double>
+ ret <2 x double> %val
+}
+
+define <2 x i64> @test_v4i32_to_v2i64(<4 x i32> %in) nounwind {
+; CHECK: test_v4i32_to_v2i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i32> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <4 x i32> @test_v4i32_to_v4i32(<4 x i32> %in) nounwind {
+; CHECK: test_v4i32_to_v4i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i32> %in to <4 x i32>
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v4i32_to_v2f32(<4 x i32> %in) nounwind{
+; CHECK: test_v4i32_to_v2f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i32> %in to <4 x float>
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v4i32_to_v8i16(<4 x i32> %in) nounwind{
+; CHECK: test_v4i32_to_v8i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i32> %in to <8 x i16>
+ ret <8 x i16> %val
+}
+
+define <16 x i8> @test_v4i32_to_v16i8(<4 x i32> %in) nounwind{
+; CHECK: test_v4i32_to_v16i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x i32> %in to <16 x i8>
+ ret <16 x i8> %val
+}
+
+; From <4 x float>
+
+define <2 x double> @test_v4f32_to_v2f64(<4 x float> %in) nounwind {
+; CHECK: test_v4f32_to_v2f64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x float> %in to <2 x double>
+ ret <2 x double> %val
+}
+
+define <2 x i64> @test_v4f32_to_v2i64(<4 x float> %in) nounwind {
+; CHECK: test_v4f32_to_v2i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x float> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <4 x i32> @test_v4f32_to_v4i32(<4 x float> %in) nounwind {
+; CHECK: test_v4f32_to_v4i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x float> %in to <4 x i32>
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v4f32_to_v4f32(<4 x float> %in) nounwind{
+; CHECK: test_v4f32_to_v4f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x float> %in to <4 x float>
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v4f32_to_v8i16(<4 x float> %in) nounwind{
+; CHECK: test_v4f32_to_v8i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x float> %in to <8 x i16>
+ ret <8 x i16> %val
+}
+
+define <16 x i8> @test_v4f32_to_v16i8(<4 x float> %in) nounwind{
+; CHECK: test_v4f32_to_v16i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <4 x float> %in to <16 x i8>
+ ret <16 x i8> %val
+}
+
+; From <2 x i64>
+
+define <2 x double> @test_v2i64_to_v2f64(<2 x i64> %in) nounwind {
+; CHECK: test_v2i64_to_v2f64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i64> %in to <2 x double>
+ ret <2 x double> %val
+}
+
+define <2 x i64> @test_v2i64_to_v2i64(<2 x i64> %in) nounwind {
+; CHECK: test_v2i64_to_v2i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i64> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <4 x i32> @test_v2i64_to_v4i32(<2 x i64> %in) nounwind {
+; CHECK: test_v2i64_to_v4i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i64> %in to <4 x i32>
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v2i64_to_v4f32(<2 x i64> %in) nounwind{
+; CHECK: test_v2i64_to_v4f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i64> %in to <4 x float>
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v2i64_to_v8i16(<2 x i64> %in) nounwind{
+; CHECK: test_v2i64_to_v8i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i64> %in to <8 x i16>
+ ret <8 x i16> %val
+}
+
+define <16 x i8> @test_v2i64_to_v16i8(<2 x i64> %in) nounwind{
+; CHECK: test_v2i64_to_v16i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x i64> %in to <16 x i8>
+ ret <16 x i8> %val
+}
+
+; From <2 x double>
+
+define <2 x double> @test_v2f64_to_v2f64(<2 x double> %in) nounwind {
+; CHECK: test_v2f64_to_v2f64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x double> %in to <2 x double>
+ ret <2 x double> %val
+}
+
+define <2 x i64> @test_v2f64_to_v2i64(<2 x double> %in) nounwind {
+; CHECK: test_v2f64_to_v2i64:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x double> %in to <2 x i64>
+ ret <2 x i64> %val
+}
+
+define <4 x i32> @test_v2f64_to_v4i32(<2 x double> %in) nounwind {
+; CHECK: test_v2f64_to_v4i32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x double> %in to <4 x i32>
+ ret <4 x i32> %val
+}
+
+define <4 x float> @test_v2f64_to_v4f32(<2 x double> %in) nounwind{
+; CHECK: test_v2f64_to_v4f32:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x double> %in to <4 x float>
+ ret <4 x float> %val
+}
+
+define <8 x i16> @test_v2f64_to_v8i16(<2 x double> %in) nounwind{
+; CHECK: test_v2f64_to_v8i16:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x double> %in to <8 x i16>
+ ret <8 x i16> %val
+}
+
+define <16 x i8> @test_v2f64_to_v16i8(<2 x double> %in) nounwind{
+; CHECK: test_v2f64_to_v16i8:
+; CHECK-NEXT: // BB#0:
+; CHECK-NEXT: ret
+
+ %val = bitcast <2 x double> %in to <16 x i8>
+ ret <16 x i8> %val
+}
+
diff --git a/test/CodeGen/AArch64/neon-bitwise-instructions.ll b/test/CodeGen/AArch64/neon-bitwise-instructions.ll
new file mode 100644
index 000000000000..1c43b979fc44
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-bitwise-instructions.ll
@@ -0,0 +1,594 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+
+define <8 x i8> @and8xi8(<8 x i8> %a, <8 x i8> %b) {
+;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <8 x i8> %a, %b;
+ ret <8 x i8> %tmp1
+}
+
+define <16 x i8> @and16xi8(<16 x i8> %a, <16 x i8> %b) {
+;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <16 x i8> %a, %b;
+ ret <16 x i8> %tmp1
+}
+
+
+define <8 x i8> @orr8xi8(<8 x i8> %a, <8 x i8> %b) {
+;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = or <8 x i8> %a, %b;
+ ret <8 x i8> %tmp1
+}
+
+define <16 x i8> @orr16xi8(<16 x i8> %a, <16 x i8> %b) {
+;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = or <16 x i8> %a, %b;
+ ret <16 x i8> %tmp1
+}
+
+
+define <8 x i8> @xor8xi8(<8 x i8> %a, <8 x i8> %b) {
+;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <8 x i8> %a, %b;
+ ret <8 x i8> %tmp1
+}
+
+define <16 x i8> @xor16xi8(<16 x i8> %a, <16 x i8> %b) {
+;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <16 x i8> %a, %b;
+ ret <16 x i8> %tmp1
+}
+
+define <8 x i8> @bsl8xi8_const(<8 x i8> %a, <8 x i8> %b) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <8 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = and <8 x i8> %b, < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0 >
+ %tmp3 = or <8 x i8> %tmp1, %tmp2
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @bsl16xi8_const(<16 x i8> %a, <16 x i8> %b) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <16 x i8> %a, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = and <16 x i8> %b, < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0 >
+ %tmp3 = or <16 x i8> %tmp1, %tmp2
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i8> @orn8xi8(<8 x i8> %a, <8 x i8> %b) {
+;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <8 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = or <8 x i8> %a, %tmp1
+ ret <8 x i8> %tmp2
+}
+
+define <16 x i8> @orn16xi8(<16 x i8> %a, <16 x i8> %b) {
+;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <16 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = or <16 x i8> %a, %tmp1
+ ret <16 x i8> %tmp2
+}
+
+define <8 x i8> @bic8xi8(<8 x i8> %a, <8 x i8> %b) {
+;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <8 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = and <8 x i8> %a, %tmp1
+ ret <8 x i8> %tmp2
+}
+
+define <16 x i8> @bic16xi8(<16 x i8> %a, <16 x i8> %b) {
+;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <16 x i8> %b, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
+ %tmp2 = and <16 x i8> %a, %tmp1
+ ret <16 x i8> %tmp2
+}
+
+define <2 x i32> @orrimm2s_lsl0(<2 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.2s, #0xff
+ %tmp1 = or <2 x i32> %a, < i32 255, i32 255>
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @orrimm2s_lsl8(<2 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.2s, #0xff, lsl #8
+ %tmp1 = or <2 x i32> %a, < i32 65280, i32 65280>
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @orrimm2s_lsl16(<2 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.2s, #0xff, lsl #16
+ %tmp1 = or <2 x i32> %a, < i32 16711680, i32 16711680>
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @orrimm2s_lsl24(<2 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.2s, #0xff, lsl #24
+ %tmp1 = or <2 x i32> %a, < i32 4278190080, i32 4278190080>
+ ret <2 x i32> %tmp1
+}
+
+define <4 x i32> @orrimm4s_lsl0(<4 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.4s, #0xff
+ %tmp1 = or <4 x i32> %a, < i32 255, i32 255, i32 255, i32 255>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @orrimm4s_lsl8(<4 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.4s, #0xff, lsl #8
+ %tmp1 = or <4 x i32> %a, < i32 65280, i32 65280, i32 65280, i32 65280>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @orrimm4s_lsl16(<4 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.4s, #0xff, lsl #16
+ %tmp1 = or <4 x i32> %a, < i32 16711680, i32 16711680, i32 16711680, i32 16711680>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @orrimm4s_lsl24(<4 x i32> %a) {
+;CHECK: orr {{v[0-31]+}}.4s, #0xff, lsl #24
+ %tmp1 = or <4 x i32> %a, < i32 4278190080, i32 4278190080, i32 4278190080, i32 4278190080>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i16> @orrimm4h_lsl0(<4 x i16> %a) {
+;CHECK: orr {{v[0-31]+}}.4h, #0xff
+ %tmp1 = or <4 x i16> %a, < i16 255, i16 255, i16 255, i16 255 >
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @orrimm4h_lsl8(<4 x i16> %a) {
+;CHECK: orr {{v[0-31]+}}.4h, #0xff, lsl #8
+ %tmp1 = or <4 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280 >
+ ret <4 x i16> %tmp1
+}
+
+define <8 x i16> @orrimm8h_lsl0(<8 x i16> %a) {
+;CHECK: orr {{v[0-31]+}}.8h, #0xff
+ %tmp1 = or <8 x i16> %a, < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255 >
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @orrimm8h_lsl8(<8 x i16> %a) {
+;CHECK: orr {{v[0-31]+}}.8h, #0xff, lsl #8
+ %tmp1 = or <8 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280 >
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i32> @bicimm2s_lsl0(<2 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.2s, #0x10
+ %tmp1 = and <2 x i32> %a, < i32 4294967279, i32 4294967279 >
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @bicimm2s_lsl8(<2 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.2s, #0x10, lsl #8
+ %tmp1 = and <2 x i32> %a, < i32 18446744073709547519, i32 18446744073709547519 >
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @bicimm2s_lsl16(<2 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.2s, #0x10, lsl #16
+ %tmp1 = and <2 x i32> %a, < i32 18446744073708503039, i32 18446744073708503039 >
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @bicimm2s_lsl124(<2 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.2s, #0x10, lsl #24
+ %tmp1 = and <2 x i32> %a, < i32 18446744073441116159, i32 18446744073441116159>
+ ret <2 x i32> %tmp1
+}
+
+define <4 x i32> @bicimm4s_lsl0(<4 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.4s, #0x10
+ %tmp1 = and <4 x i32> %a, < i32 4294967279, i32 4294967279, i32 4294967279, i32 4294967279 >
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @bicimm4s_lsl8(<4 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.4s, #0x10, lsl #8
+ %tmp1 = and <4 x i32> %a, < i32 18446744073709547519, i32 18446744073709547519, i32 18446744073709547519, i32 18446744073709547519 >
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @bicimm4s_lsl16(<4 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.4s, #0x10, lsl #16
+ %tmp1 = and <4 x i32> %a, < i32 18446744073708503039, i32 18446744073708503039, i32 18446744073708503039, i32 18446744073708503039 >
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @bicimm4s_lsl124(<4 x i32> %a) {
+;CHECK: bic {{v[0-31]+}}.4s, #0x10, lsl #24
+ %tmp1 = and <4 x i32> %a, < i32 18446744073441116159, i32 18446744073441116159, i32 18446744073441116159, i32 18446744073441116159>
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i16> @bicimm4h_lsl0_a(<4 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.4h, #0x10
+ %tmp1 = and <4 x i16> %a, < i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599 >
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @bicimm4h_lsl0_b(<4 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.4h, #0x0
+ %tmp1 = and <4 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280 >
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @bicimm4h_lsl8_a(<4 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.4h, #0x10, lsl #8
+ %tmp1 = and <4 x i16> %a, < i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519>
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @bicimm4h_lsl8_b(<4 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.4h, #0x0, lsl #8
+ %tmp1 = and <4 x i16> %a, < i16 255, i16 255, i16 255, i16 255>
+ ret <4 x i16> %tmp1
+}
+
+define <8 x i16> @bicimm8h_lsl0_a(<8 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.8h, #0x10
+ %tmp1 = and <8 x i16> %a, < i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599,
+ i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599, i16 18446744073709551599 >
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @bicimm8h_lsl0_b(<8 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.8h, #0x0
+ %tmp1 = and <8 x i16> %a, < i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280 >
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @bicimm8h_lsl8_a(<8 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.8h, #0x10, lsl #8
+ %tmp1 = and <8 x i16> %a, < i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519,
+ i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519, i16 18446744073709547519>
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @bicimm8h_lsl8_b(<8 x i16> %a) {
+;CHECK: bic {{v[0-31]+}}.8h, #0x0, lsl #8
+ %tmp1 = and <8 x i16> %a, < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i32> @and2xi32(<2 x i32> %a, <2 x i32> %b) {
+;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <2 x i32> %a, %b;
+ ret <2 x i32> %tmp1
+}
+
+define <4 x i16> @and4xi16(<4 x i16> %a, <4 x i16> %b) {
+;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <4 x i16> %a, %b;
+ ret <4 x i16> %tmp1
+}
+
+define <1 x i64> @and1xi64(<1 x i64> %a, <1 x i64> %b) {
+;CHECK: and {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <1 x i64> %a, %b;
+ ret <1 x i64> %tmp1
+}
+
+define <4 x i32> @and4xi32(<4 x i32> %a, <4 x i32> %b) {
+;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <4 x i32> %a, %b;
+ ret <4 x i32> %tmp1
+}
+
+define <8 x i16> @and8xi16(<8 x i16> %a, <8 x i16> %b) {
+;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <8 x i16> %a, %b;
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @and2xi64(<2 x i64> %a, <2 x i64> %b) {
+;CHECK: and {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <2 x i64> %a, %b;
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i32> @orr2xi32(<2 x i32> %a, <2 x i32> %b) {
+;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = or <2 x i32> %a, %b;
+ ret <2 x i32> %tmp1
+}
+
+define <4 x i16> @orr4xi16(<4 x i16> %a, <4 x i16> %b) {
+;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = or <4 x i16> %a, %b;
+ ret <4 x i16> %tmp1
+}
+
+define <1 x i64> @orr1xi64(<1 x i64> %a, <1 x i64> %b) {
+;CHECK: orr {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = or <1 x i64> %a, %b;
+ ret <1 x i64> %tmp1
+}
+
+define <4 x i32> @orr4xi32(<4 x i32> %a, <4 x i32> %b) {
+;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = or <4 x i32> %a, %b;
+ ret <4 x i32> %tmp1
+}
+
+define <8 x i16> @orr8xi16(<8 x i16> %a, <8 x i16> %b) {
+;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = or <8 x i16> %a, %b;
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @orr2xi64(<2 x i64> %a, <2 x i64> %b) {
+;CHECK: orr {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = or <2 x i64> %a, %b;
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i32> @eor2xi32(<2 x i32> %a, <2 x i32> %b) {
+;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <2 x i32> %a, %b;
+ ret <2 x i32> %tmp1
+}
+
+define <4 x i16> @eor4xi16(<4 x i16> %a, <4 x i16> %b) {
+;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <4 x i16> %a, %b;
+ ret <4 x i16> %tmp1
+}
+
+define <1 x i64> @eor1xi64(<1 x i64> %a, <1 x i64> %b) {
+;CHECK: eor {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <1 x i64> %a, %b;
+ ret <1 x i64> %tmp1
+}
+
+define <4 x i32> @eor4xi32(<4 x i32> %a, <4 x i32> %b) {
+;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <4 x i32> %a, %b;
+ ret <4 x i32> %tmp1
+}
+
+define <8 x i16> @eor8xi16(<8 x i16> %a, <8 x i16> %b) {
+;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <8 x i16> %a, %b;
+ ret <8 x i16> %tmp1
+}
+
+define <2 x i64> @eor2xi64(<2 x i64> %a, <2 x i64> %b) {
+;CHECK: eor {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <2 x i64> %a, %b;
+ ret <2 x i64> %tmp1
+}
+
+
+define <2 x i32> @bic2xi32(<2 x i32> %a, <2 x i32> %b) {
+;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <2 x i32> %b, < i32 -1, i32 -1 >
+ %tmp2 = and <2 x i32> %a, %tmp1
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i16> @bic4xi16(<4 x i16> %a, <4 x i16> %b) {
+;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <4 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1 >
+ %tmp2 = and <4 x i16> %a, %tmp1
+ ret <4 x i16> %tmp2
+}
+
+define <1 x i64> @bic1xi64(<1 x i64> %a, <1 x i64> %b) {
+;CHECK: bic {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <1 x i64> %b, < i64 -1>
+ %tmp2 = and <1 x i64> %a, %tmp1
+ ret <1 x i64> %tmp2
+}
+
+define <4 x i32> @bic4xi32(<4 x i32> %a, <4 x i32> %b) {
+;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <4 x i32> %b, < i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp2 = and <4 x i32> %a, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+define <8 x i16> @bic8xi16(<8 x i16> %a, <8 x i16> %b) {
+;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <8 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1, i16 -1, i16 -1, i16 -1, i16 -1 >
+ %tmp2 = and <8 x i16> %a, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+define <2 x i64> @bic2xi64(<2 x i64> %a, <2 x i64> %b) {
+;CHECK: bic {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <2 x i64> %b, < i64 -1, i64 -1>
+ %tmp2 = and <2 x i64> %a, %tmp1
+ ret <2 x i64> %tmp2
+}
+
+define <2 x i32> @orn2xi32(<2 x i32> %a, <2 x i32> %b) {
+;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <2 x i32> %b, < i32 -1, i32 -1 >
+ %tmp2 = or <2 x i32> %a, %tmp1
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i16> @orn4xi16(<4 x i16> %a, <4 x i16> %b) {
+;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <4 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1 >
+ %tmp2 = or <4 x i16> %a, %tmp1
+ ret <4 x i16> %tmp2
+}
+
+define <1 x i64> @orn1xi64(<1 x i64> %a, <1 x i64> %b) {
+;CHECK: orn {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = xor <1 x i64> %b, < i64 -1>
+ %tmp2 = or <1 x i64> %a, %tmp1
+ ret <1 x i64> %tmp2
+}
+
+define <4 x i32> @orn4xi32(<4 x i32> %a, <4 x i32> %b) {
+;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <4 x i32> %b, < i32 -1, i32 -1, i32 -1, i32 -1>
+ %tmp2 = or <4 x i32> %a, %tmp1
+ ret <4 x i32> %tmp2
+}
+
+define <8 x i16> @orn8xi16(<8 x i16> %a, <8 x i16> %b) {
+;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <8 x i16> %b, < i16 -1, i16 -1, i16 -1, i16-1, i16 -1, i16 -1, i16 -1, i16 -1 >
+ %tmp2 = or <8 x i16> %a, %tmp1
+ ret <8 x i16> %tmp2
+}
+
+define <2 x i64> @orn2xi64(<2 x i64> %a, <2 x i64> %b) {
+;CHECK: orn {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = xor <2 x i64> %b, < i64 -1, i64 -1>
+ %tmp2 = or <2 x i64> %a, %tmp1
+ ret <2 x i64> %tmp2
+}
+define <2 x i32> @bsl2xi32_const(<2 x i32> %a, <2 x i32> %b) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <2 x i32> %a, < i32 -1, i32 -1 >
+ %tmp2 = and <2 x i32> %b, < i32 0, i32 0 >
+ %tmp3 = or <2 x i32> %tmp1, %tmp2
+ ret <2 x i32> %tmp3
+}
+
+
+define <4 x i16> @bsl4xi16_const(<4 x i16> %a, <4 x i16> %b) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <4 x i16> %a, < i16 -1, i16 -1, i16 -1,i16 -1 >
+ %tmp2 = and <4 x i16> %b, < i16 0, i16 0,i16 0, i16 0 >
+ %tmp3 = or <4 x i16> %tmp1, %tmp2
+ ret <4 x i16> %tmp3
+}
+
+define <1 x i64> @bsl1xi64_const(<1 x i64> %a, <1 x i64> %b) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = and <1 x i64> %a, < i64 -1 >
+ %tmp2 = and <1 x i64> %b, < i64 0 >
+ %tmp3 = or <1 x i64> %tmp1, %tmp2
+ ret <1 x i64> %tmp3
+}
+
+define <4 x i32> @bsl4xi32_const(<4 x i32> %a, <4 x i32> %b) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <4 x i32> %a, < i32 -1, i32 -1, i32 -1, i32 -1 >
+ %tmp2 = and <4 x i32> %b, < i32 0, i32 0, i32 0, i32 0 >
+ %tmp3 = or <4 x i32> %tmp1, %tmp2
+ ret <4 x i32> %tmp3
+}
+
+define <8 x i16> @bsl8xi16_const(<8 x i16> %a, <8 x i16> %b) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <8 x i16> %a, < i16 -1, i16 -1, i16 -1,i16 -1, i16 -1, i16 -1, i16 -1,i16 -1 >
+ %tmp2 = and <8 x i16> %b, < i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0 >
+ %tmp3 = or <8 x i16> %tmp1, %tmp2
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i64> @bsl2xi64_const(<2 x i64> %a, <2 x i64> %b) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = and <2 x i64> %a, < i64 -1, i64 -1 >
+ %tmp2 = and <2 x i64> %b, < i64 0, i64 0 >
+ %tmp3 = or <2 x i64> %tmp1, %tmp2
+ ret <2 x i64> %tmp3
+}
+
+
+define <8 x i8> @bsl8xi8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %1 = and <8 x i8> %v1, %v2
+ %2 = xor <8 x i8> %v1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <8 x i8> %2, %v3
+ %4 = or <8 x i8> %1, %3
+ ret <8 x i8> %4
+}
+
+define <4 x i16> @bsl4xi16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %1 = and <4 x i16> %v1, %v2
+ %2 = xor <4 x i16> %v1, <i16 -1, i16 -1, i16 -1, i16 -1>
+ %3 = and <4 x i16> %2, %v3
+ %4 = or <4 x i16> %1, %3
+ ret <4 x i16> %4
+}
+
+define <2 x i32> @bsl2xi32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %1 = and <2 x i32> %v1, %v2
+ %2 = xor <2 x i32> %v1, <i32 -1, i32 -1>
+ %3 = and <2 x i32> %2, %v3
+ %4 = or <2 x i32> %1, %3
+ ret <2 x i32> %4
+}
+
+define <1 x i64> @bsl1xi64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
+;CHECK: bsl {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %1 = and <1 x i64> %v1, %v2
+ %2 = xor <1 x i64> %v1, <i64 -1>
+ %3 = and <1 x i64> %2, %v3
+ %4 = or <1 x i64> %1, %3
+ ret <1 x i64> %4
+}
+
+define <16 x i8> @bsl16xi8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %1 = and <16 x i8> %v1, %v2
+ %2 = xor <16 x i8> %v1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %3 = and <16 x i8> %2, %v3
+ %4 = or <16 x i8> %1, %3
+ ret <16 x i8> %4
+}
+
+define <8 x i16> @bsl8xi16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %1 = and <8 x i16> %v1, %v2
+ %2 = xor <8 x i16> %v1, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %3 = and <8 x i16> %2, %v3
+ %4 = or <8 x i16> %1, %3
+ ret <8 x i16> %4
+}
+
+define <4 x i32> @bsl4xi32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %1 = and <4 x i32> %v1, %v2
+ %2 = xor <4 x i32> %v1, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %3 = and <4 x i32> %2, %v3
+ %4 = or <4 x i32> %1, %3
+ ret <4 x i32> %4
+}
+
+define <2 x i64> @bsl2xi64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
+;CHECK: bsl {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %1 = and <2 x i64> %v1, %v2
+ %2 = xor <2 x i64> %v1, <i64 -1, i64 -1>
+ %3 = and <2 x i64> %2, %v3
+ %4 = or <2 x i64> %1, %3
+ ret <2 x i64> %4
+}
+
+define <8 x i8> @orrimm8b_as_orrimm4h_lsl0(<8 x i8> %a) {
+;CHECK: orr {{v[0-31]+}}.4h, #0xff
+ %val = or <8 x i8> %a, <i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+ ret <8 x i8> %val
+}
+
+define <8 x i8> @orrimm8b_as_orimm4h_lsl8(<8 x i8> %a) {
+;CHECK: orr {{v[0-31]+}}.4h, #0xff, lsl #8
+ %val = or <8 x i8> %a, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <8 x i8> %val
+}
+
+define <16 x i8> @orimm16b_as_orrimm8h_lsl0(<16 x i8> %a) {
+;CHECK: orr {{v[0-31]+}}.8h, #0xff
+ %val = or <16 x i8> %a, <i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0>
+ ret <16 x i8> %val
+}
+
+define <16 x i8> @orimm16b_as_orrimm8h_lsl8(<16 x i8> %a) {
+;CHECK: orr {{v[0-31]+}}.8h, #0xff, lsl #8
+ %val = or <16 x i8> %a, <i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255, i8 0, i8 255>
+ ret <16 x i8> %val
+}
+
+
diff --git a/test/CodeGen/AArch64/neon-bsl.ll b/test/CodeGen/AArch64/neon-bsl.ll
new file mode 100644
index 000000000000..6bd923dc2cca
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-bsl.ll
@@ -0,0 +1,222 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <2 x double> @llvm.arm.neon.vbsl.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+declare <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
+
+declare <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float>, <4 x float>, <4 x float>)
+
+declare <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+
+declare <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16>, <4 x i16>, <4 x i16>)
+
+declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>)
+
+declare <1 x double> @llvm.arm.neon.vbsl.v1f64(<1 x double>, <1 x double>, <1 x double>)
+
+declare <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float>, <2 x float>, <2 x float>)
+
+declare <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64>, <1 x i64>, <1 x i64>)
+
+declare <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>)
+
+define <8 x i8> @test_vbsl_s8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+; CHECK-LABEL: test_vbsl_s8:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vbsl_s16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
+; CHECK-LABEL: test_vbsl_s16:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3)
+ %0 = bitcast <4 x i16> %vbsl3.i to <8 x i8>
+ ret <8 x i8> %0
+}
+
+define <2 x i32> @test_vbsl_s32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+; CHECK-LABEL: test_vbsl_s32:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3)
+ ret <2 x i32> %vbsl3.i
+}
+
+define <1 x i64> @test_vbsl_s64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
+; CHECK-LABEL: test_vbsl_s64:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3)
+ ret <1 x i64> %vbsl3.i
+}
+
+define <8 x i8> @test_vbsl_u8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+; CHECK-LABEL: test_vbsl_u8:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3)
+ ret <8 x i8> %vbsl.i
+}
+
+define <4 x i16> @test_vbsl_u16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
+; CHECK-LABEL: test_vbsl_u16:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3)
+ ret <4 x i16> %vbsl3.i
+}
+
+define <2 x i32> @test_vbsl_u32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3) {
+; CHECK-LABEL: test_vbsl_u32:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %v1, <2 x i32> %v2, <2 x i32> %v3)
+ ret <2 x i32> %vbsl3.i
+}
+
+define <1 x i64> @test_vbsl_u64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3) {
+; CHECK-LABEL: test_vbsl_u64:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %v1, <1 x i64> %v2, <1 x i64> %v3)
+ ret <1 x i64> %vbsl3.i
+}
+
+define <2 x float> @test_vbsl_f32(<2 x float> %v1, <2 x float> %v2, <2 x float> %v3) {
+; CHECK-LABEL: test_vbsl_f32:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %v1, <2 x float> %v2, <2 x float> %v3)
+ ret <2 x float> %vbsl3.i
+}
+
+define <1 x double> @test_vbsl_f64(<1 x i64> %v1, <1 x double> %v2, <1 x double> %v3) {
+; CHECK-LABEL: test_vbsl_f64:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl.i = bitcast <1 x i64> %v1 to <1 x double>
+ %vbsl3.i = tail call <1 x double> @llvm.arm.neon.vbsl.v1f64(<1 x double> %vbsl.i, <1 x double> %v2, <1 x double> %v3)
+ ret <1 x double> %vbsl3.i
+}
+
+define <8 x i8> @test_vbsl_p8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3) {
+; CHECK-LABEL: test_vbsl_p8:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3)
+ ret <8 x i8> %vbsl.i
+}
+
+define <4 x i16> @test_vbsl_p16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3) {
+; CHECK-LABEL: test_vbsl_p16:
+; CHECK: bsl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %v1, <4 x i16> %v2, <4 x i16> %v3)
+ ret <4 x i16> %vbsl3.i
+}
+
+define <16 x i8> @test_vbslq_s8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
+; CHECK-LABEL: test_vbslq_s8:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3)
+ ret <16 x i8> %vbsl.i
+}
+
+define <8 x i16> @test_vbslq_s16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
+; CHECK-LABEL: test_vbslq_s16:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3)
+ ret <8 x i16> %vbsl3.i
+}
+
+define <4 x i32> @test_vbslq_s32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+; CHECK-LABEL: test_vbslq_s32:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3)
+ ret <4 x i32> %vbsl3.i
+}
+
+define <2 x i64> @test_vbslq_s64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
+; CHECK-LABEL: test_vbslq_s64:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3)
+ ret <2 x i64> %vbsl3.i
+}
+
+define <16 x i8> @test_vbslq_u8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
+; CHECK-LABEL: test_vbslq_u8:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3)
+ ret <16 x i8> %vbsl.i
+}
+
+define <8 x i16> @test_vbslq_u16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
+; CHECK-LABEL: test_vbslq_u16:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3)
+ ret <8 x i16> %vbsl3.i
+}
+
+define <4 x i32> @test_vbslq_u32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+; CHECK-LABEL: test_vbslq_u32:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3)
+ ret <4 x i32> %vbsl3.i
+}
+
+define <2 x i64> @test_vbslq_u64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3) {
+; CHECK-LABEL: test_vbslq_u64:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %v3)
+ ret <2 x i64> %vbsl3.i
+}
+
+define <4 x float> @test_vbslq_f32(<4 x i32> %v1, <4 x float> %v2, <4 x float> %v3) {
+; CHECK-LABEL: test_vbslq_f32:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl.i = bitcast <4 x i32> %v1 to <4 x float>
+ %vbsl3.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %vbsl.i, <4 x float> %v2, <4 x float> %v3)
+ ret <4 x float> %vbsl3.i
+}
+
+define <16 x i8> @test_vbslq_p8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3) {
+; CHECK-LABEL: test_vbslq_p8:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %v1, <16 x i8> %v2, <16 x i8> %v3)
+ ret <16 x i8> %vbsl.i
+}
+
+define <8 x i16> @test_vbslq_p16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3) {
+; CHECK-LABEL: test_vbslq_p16:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %v1, <8 x i16> %v2, <8 x i16> %v3)
+ ret <8 x i16> %vbsl3.i
+}
+
+define <2 x double> @test_vbslq_f64(<2 x i64> %v1, <2 x double> %v2, <2 x double> %v3) {
+; CHECK-LABEL: test_vbslq_f64:
+; CHECK: bsl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vbsl.i = bitcast <2 x i64> %v1 to <2 x double>
+ %vbsl3.i = tail call <2 x double> @llvm.arm.neon.vbsl.v2f64(<2 x double> %vbsl.i, <2 x double> %v2, <2 x double> %v3)
+ ret <2 x double> %vbsl3.i
+}
+
diff --git a/test/CodeGen/AArch64/neon-compare-instructions.ll b/test/CodeGen/AArch64/neon-compare-instructions.ll
new file mode 100644
index 000000000000..68f03425b276
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-compare-instructions.ll
@@ -0,0 +1,1926 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+define <8 x i8> @cmeq8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp eq <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmeq16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp eq <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmeq4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp eq <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmeq8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp eq <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmeq2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp eq <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmeq4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp eq <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmeq2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp eq <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmne8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmne16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmne4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmne8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmne2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmne4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmne2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmgt8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp sgt <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgt16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp sgt <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgt4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp sgt <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgt8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp sgt <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgt2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp sgt <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgt4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp sgt <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgt2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp sgt <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlt8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp slt <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlt16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp slt <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlt4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp slt <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlt8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp slt <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlt2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp slt <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlt4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp slt <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlt2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LT implemented as GT, so check reversed operands.
+;CHECK: cmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp slt <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmge8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp sge <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmge16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp sge <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmge4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp sge <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmge8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp sge <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmge2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp sge <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmge4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp sge <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmge2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp sge <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmle8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp sle <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmle16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp sle <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmle4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp sle <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmle8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp sle <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmle2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp sle <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmle4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp sle <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmle2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LE implemented as GE, so check reversed operands.
+;CHECK: cmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp sle <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmhi8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ugt <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhi16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ugt <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhi4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp ugt <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhi8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp ugt <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhi2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp ugt <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhi4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp ugt <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhi2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp ugt <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlo8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp ult <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlo16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp ult <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlo4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp ult <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlo8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp ult <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlo2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp ult <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlo4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp ult <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlo2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp ult <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmhs8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp uge <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhs16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp uge <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhs4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp uge <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhs8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp uge <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhs2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp uge <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhs4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp uge <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhs2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp uge <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmls8xi8(<8 x i8> %A, <8 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp ule <8 x i8> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmls16xi8(<16 x i8> %A, <16 x i8> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp ule <16 x i8> %A, %B;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmls4xi16(<4 x i16> %A, <4 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp ule <4 x i16> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmls8xi16(<8 x i16> %A, <8 x i16> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp ule <8 x i16> %A, %B;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmls2xi32(<2 x i32> %A, <2 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp ule <2 x i32> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmls4xi32(<4 x i32> %A, <4 x i32> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp ule <4 x i32> %A, %B;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmls2xi64(<2 x i64> %A, <2 x i64> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp ule <2 x i64> %A, %B;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmtst8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: cmtst {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = and <8 x i8> %A, %B
+ %tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
+ %tmp5 = sext <8 x i1> %tmp4 to <8 x i8>
+ ret <8 x i8> %tmp5
+}
+
+define <16 x i8> @cmtst16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: cmtst {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = and <16 x i8> %A, %B
+ %tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer
+ %tmp5 = sext <16 x i1> %tmp4 to <16 x i8>
+ ret <16 x i8> %tmp5
+}
+
+define <4 x i16> @cmtst4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: cmtst {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = and <4 x i16> %A, %B
+ %tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer
+ %tmp5 = sext <4 x i1> %tmp4 to <4 x i16>
+ ret <4 x i16> %tmp5
+}
+
+define <8 x i16> @cmtst8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: cmtst {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = and <8 x i16> %A, %B
+ %tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer
+ %tmp5 = sext <8 x i1> %tmp4 to <8 x i16>
+ ret <8 x i16> %tmp5
+}
+
+define <2 x i32> @cmtst2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: cmtst {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = and <2 x i32> %A, %B
+ %tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer
+ %tmp5 = sext <2 x i1> %tmp4 to <2 x i32>
+ ret <2 x i32> %tmp5
+}
+
+define <4 x i32> @cmtst4xi32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: cmtst {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = and <4 x i32> %A, %B
+ %tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer
+ %tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
+ ret <4 x i32> %tmp5
+}
+
+define <2 x i64> @cmtst2xi64(<2 x i64> %A, <2 x i64> %B) {
+;CHECK: cmtst {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = and <2 x i64> %A, %B
+ %tmp4 = icmp ne <2 x i64> %tmp3, zeroinitializer
+ %tmp5 = sext <2 x i1> %tmp4 to <2 x i64>
+ ret <2 x i64> %tmp5
+}
+
+
+
+define <8 x i8> @cmeqz8xi8(<8 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+ %tmp3 = icmp eq <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmeqz16xi8(<16 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+ %tmp3 = icmp eq <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmeqz4xi16(<4 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+ %tmp3 = icmp eq <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmeqz8xi16(<8 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+ %tmp3 = icmp eq <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmeqz2xi32(<2 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+ %tmp3 = icmp eq <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmeqz4xi32(<4 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+ %tmp3 = icmp eq <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmeqz2xi64(<2 x i64> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+ %tmp3 = icmp eq <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmgez8xi8(<8 x i8> %A) {
+;CHECK: cmge {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+ %tmp3 = icmp sge <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgez16xi8(<16 x i8> %A) {
+;CHECK: cmge {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+ %tmp3 = icmp sge <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgez4xi16(<4 x i16> %A) {
+;CHECK: cmge {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+ %tmp3 = icmp sge <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgez8xi16(<8 x i16> %A) {
+;CHECK: cmge {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+ %tmp3 = icmp sge <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgez2xi32(<2 x i32> %A) {
+;CHECK: cmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+ %tmp3 = icmp sge <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgez4xi32(<4 x i32> %A) {
+;CHECK: cmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+ %tmp3 = icmp sge <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgez2xi64(<2 x i64> %A) {
+;CHECK: cmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+ %tmp3 = icmp sge <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmgtz8xi8(<8 x i8> %A) {
+;CHECK: cmgt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+ %tmp3 = icmp sgt <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmgtz16xi8(<16 x i8> %A) {
+;CHECK: cmgt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+ %tmp3 = icmp sgt <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmgtz4xi16(<4 x i16> %A) {
+;CHECK: cmgt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+ %tmp3 = icmp sgt <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmgtz8xi16(<8 x i16> %A) {
+;CHECK: cmgt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+ %tmp3 = icmp sgt <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmgtz2xi32(<2 x i32> %A) {
+;CHECK: cmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+ %tmp3 = icmp sgt <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmgtz4xi32(<4 x i32> %A) {
+;CHECK: cmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+ %tmp3 = icmp sgt <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmgtz2xi64(<2 x i64> %A) {
+;CHECK: cmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+ %tmp3 = icmp sgt <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlez8xi8(<8 x i8> %A) {
+;CHECK: cmle {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+ %tmp3 = icmp sle <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlez16xi8(<16 x i8> %A) {
+;CHECK: cmle {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+ %tmp3 = icmp sle <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlez4xi16(<4 x i16> %A) {
+;CHECK: cmle {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+ %tmp3 = icmp sle <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlez8xi16(<8 x i16> %A) {
+;CHECK: cmle {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+ %tmp3 = icmp sle <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlez2xi32(<2 x i32> %A) {
+;CHECK: cmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+ %tmp3 = icmp sle <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlez4xi32(<4 x i32> %A) {
+;CHECK: cmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+ %tmp3 = icmp sle <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlez2xi64(<2 x i64> %A) {
+;CHECK: cmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+ %tmp3 = icmp sle <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmltz8xi8(<8 x i8> %A) {
+;CHECK: cmlt {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+ %tmp3 = icmp slt <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmltz16xi8(<16 x i8> %A) {
+;CHECK: cmlt {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+ %tmp3 = icmp slt <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmltz4xi16(<4 x i16> %A) {
+;CHECK: cmlt {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+ %tmp3 = icmp slt <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmltz8xi16(<8 x i16> %A) {
+;CHECK: cmlt {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+ %tmp3 = icmp slt <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmltz2xi32(<2 x i32> %A) {
+;CHECK: cmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+ %tmp3 = icmp slt <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmltz4xi32(<4 x i32> %A) {
+;CHECK: cmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+ %tmp3 = icmp slt <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmltz2xi64(<2 x i64> %A) {
+;CHECK: cmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+ %tmp3 = icmp slt <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmneqz8xi8(<8 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmneqz16xi8(<16 x i8> %A) {
+;CHECK: cmeq {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmneqz4xi16(<4 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmneqz8xi16(<8 x i16> %A) {
+;CHECK: cmeq {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmneqz2xi32(<2 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ne <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmneqz4xi32(<4 x i32> %A) {
+;CHECK: cmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmneqz2xi64(<2 x i64> %A) {
+;CHECK: cmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0x0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ne <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmhsz8xi8(<8 x i8> %A) {
+;CHECK: movi {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp uge <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhsz16xi8(<16 x i8> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp uge <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhsz4xi16(<4 x i16> %A) {
+;CHECK: movi {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp uge <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhsz8xi16(<8 x i16> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp uge <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhsz2xi32(<2 x i32> %A) {
+;CHECK: movi {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp uge <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhsz4xi32(<4 x i32> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp uge <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhsz2xi64(<2 x i64> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp uge <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <8 x i8> @cmhiz8xi8(<8 x i8> %A) {
+;CHECK: movi {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ugt <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmhiz16xi8(<16 x i8> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = icmp ugt <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmhiz4xi16(<4 x i16> %A) {
+;CHECK: movi {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+ %tmp3 = icmp ugt <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmhiz8xi16(<8 x i16> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+ %tmp3 = icmp ugt <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmhiz2xi32(<2 x i32> %A) {
+;CHECK: movi {{v[0-9]+}}.8b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = icmp ugt <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmhiz4xi32(<4 x i32> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = icmp ugt <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmhiz2xi64(<2 x i64> %A) {
+;CHECK: movi {{v[0-9]+}}.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = icmp ugt <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmlsz8xi8(<8 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.8b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8b, v1.8b, v0.8b
+ %tmp3 = icmp ule <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmlsz16xi8(<16 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp ule <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmlsz4xi16(<4 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.8b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp ule <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmlsz8xi16(<8 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp ule <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmlsz2xi32(<2 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.8b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp ule <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmlsz4xi32(<4 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp ule <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmlsz2xi64(<2 x i64> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LS implemented as HS, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhs {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp ule <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <8 x i8> @cmloz8xi8(<8 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.8b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8b, v1.8b, {{v[0-9]+}}.8b
+ %tmp3 = icmp ult <8 x i8> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
+ ret <8 x i8> %tmp4
+}
+
+define <16 x i8> @cmloz16xi8(<16 x i8> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.16b, v1.16b, v0.16b
+ %tmp3 = icmp ult <16 x i8> %A, zeroinitializer;
+ %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
+ ret <16 x i8> %tmp4
+}
+
+define <4 x i16> @cmloz4xi16(<4 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.8b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4h, v1.4h, v0.4h
+ %tmp3 = icmp ult <4 x i16> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
+ ret <4 x i16> %tmp4
+}
+
+define <8 x i16> @cmloz8xi16(<8 x i16> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.8h, v1.8h, v0.8h
+ %tmp3 = icmp ult <8 x i16> %A, zeroinitializer;
+ %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
+ ret <8 x i16> %tmp4
+}
+
+define <2 x i32> @cmloz2xi32(<2 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.8b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = icmp ult <2 x i32> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @cmloz4xi32(<4 x i32> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = icmp ult <4 x i32> %A, zeroinitializer;
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @cmloz2xi64(<2 x i64> %A) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; LO implemented as HI, so check reversed operands.
+;CHECK: movi v1.16b, #0x0
+;CHECK-NEXT: cmhi {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = icmp ult <2 x i64> %A, zeroinitializer;
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <2 x i32> @fcmoeq2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fcmp oeq <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmoeq4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fcmp oeq <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmoeq2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fcmp oeq <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmoge2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fcmp oge <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmoge4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fcmp oge <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmoge2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fcmp oge <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmogt2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ %tmp3 = fcmp ogt <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmogt4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ %tmp3 = fcmp ogt <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmogt2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %tmp3 = fcmp ogt <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmole2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; OLE implemented as OGE, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = fcmp ole <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmole4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; OLE implemented as OGE, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = fcmp ole <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmole2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; OLE implemented as OGE, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = fcmp ole <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmolt2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; OLE implemented as OGE, so check reversed operands.
+;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+ %tmp3 = fcmp olt <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmolt4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; OLE implemented as OGE, so check reversed operands.
+;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+ %tmp3 = fcmp olt <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmolt2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; OLE implemented as OGE, so check reversed operands.
+;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+ %tmp3 = fcmp olt <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmone2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
+;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp one <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmone4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
+;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp one <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmone2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ONE = OGT | OLT, OLT implemented as OGT so check reversed operands
+;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; todo check reversed operands
+ %tmp3 = fcmp one <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <2 x i32> @fcmord2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ord <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+
+define <4 x i32> @fcmord4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ord <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmord2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ORD = OGE | OLT, OLT implemented as OGT, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ord <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <2 x i32> @fcmuno2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp uno <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmuno4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uno <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmuno2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UNO = !(OGE | OLT), OLT implemented as OGT, so check reversed operands.
+;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uno <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmueq2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
+;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ueq <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmueq4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
+;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ueq <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmueq2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UEQ = !ONE = !(OGT | OLT), OLT implemented as OGT so check reversed operands
+;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ueq <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmuge2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UGE = ULE with swapped operands, ULE implemented as !OGT.
+;CHECK: fcmgt {{v[0-9]+}}.2s, v1.2s, v0.2s
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp uge <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmuge4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UGE = ULE with swapped operands, ULE implemented as !OGT.
+;CHECK: fcmgt {{v[0-9]+}}.4s, v1.4s, v0.4s
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uge <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmuge2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UGE = ULE with swapped operands, ULE implemented as !OGT.
+;CHECK: fcmgt {{v[0-9]+}}.2d, v1.2d, v0.2d
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uge <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmugt2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UGT = ULT with swapped operands, ULT implemented as !OGE.
+;CHECK: fcmge {{v[0-9]+}}.2s, v1.2s, v0.2s
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ugt <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmugt4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UGT = ULT with swapped operands, ULT implemented as !OGE.
+;CHECK: fcmge {{v[0-9]+}}.4s, v1.4s, v0.4s
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ugt <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmugt2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: fcmge {{v[0-9]+}}.2d, v1.2d, v0.2d
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ugt <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmule2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ULE implemented as !OGT.
+;CHECK: fcmgt {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ule <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmule4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ULE implemented as !OGT.
+;CHECK: fcmgt {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ule <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmule2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ULE implemented as !OGT.
+;CHECK: fcmgt {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ule <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmult2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ULT implemented as !OGE.
+;CHECK: fcmge {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ult <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmult4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ULT implemented as !OGE.
+;CHECK: fcmge {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ult <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmult2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; ULT implemented as !OGE.
+;CHECK: fcmge {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ult <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmune2xfloat(<2 x float> %A, <2 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UNE = !OEQ.
+;CHECK: fcmeq {{v[0-9]+}}.2s, v0.2s, v1.2s
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp une <2 x float> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmune4xfloat(<4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UNE = !OEQ.
+;CHECK: fcmeq {{v[0-9]+}}.4s, v0.4s, v1.4s
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp une <4 x float> %A, %B
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmune2xdouble(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; UNE = !OEQ.
+;CHECK: fcmeq {{v[0-9]+}}.2d, v0.2d, v1.2d
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp une <2 x double> %A, %B
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmoeqz2xfloat(<2 x float> %A) {
+;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+ %tmp3 = fcmp oeq <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmoeqz4xfloat(<4 x float> %A) {
+;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+ %tmp3 = fcmp oeq <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmoeqz2xdouble(<2 x double> %A) {
+;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+ %tmp3 = fcmp oeq <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <2 x i32> @fcmogez2xfloat(<2 x float> %A) {
+;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+ %tmp3 = fcmp oge <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmogez4xfloat(<4 x float> %A) {
+;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+ %tmp3 = fcmp oge <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmogez2xdouble(<2 x double> %A) {
+;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+ %tmp3 = fcmp oge <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmogtz2xfloat(<2 x float> %A) {
+;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+ %tmp3 = fcmp ogt <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmogtz4xfloat(<4 x float> %A) {
+;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+ %tmp3 = fcmp ogt <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmogtz2xdouble(<2 x double> %A) {
+;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+ %tmp3 = fcmp ogt <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmoltz2xfloat(<2 x float> %A) {
+;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+ %tmp3 = fcmp olt <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmoltz4xfloat(<4 x float> %A) {
+;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+ %tmp3 = fcmp olt <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmoltz2xdouble(<2 x double> %A) {
+;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+ %tmp3 = fcmp olt <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmolez2xfloat(<2 x float> %A) {
+;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+ %tmp3 = fcmp ole <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmolez4xfloat(<4 x float> %A) {
+;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+ %tmp3 = fcmp ole <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmolez2xdouble(<2 x double> %A) {
+;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+ %tmp3 = fcmp ole <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmonez2xfloat(<2 x float> %A) {
+; ONE with zero = OLT | OGT
+;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp one <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmonez4xfloat(<4 x float> %A) {
+; ONE with zero = OLT | OGT
+;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp one <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmonez2xdouble(<2 x double> %A) {
+; ONE with zero = OLT | OGT
+;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp one <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmordz2xfloat(<2 x float> %A) {
+; ORD with zero = OLT | OGE
+;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ord <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmordz4xfloat(<4 x float> %A) {
+; ORD with zero = OLT | OGE
+;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ord <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmordz2xdouble(<2 x double> %A) {
+; ORD with zero = OLT | OGE
+;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ord <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmueqz2xfloat(<2 x float> %A) {
+; UEQ with zero = !ONE = !(OLT |OGT)
+;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ueq <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmueqz4xfloat(<4 x float> %A) {
+; UEQ with zero = !ONE = !(OLT |OGT)
+;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ueq <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmueqz2xdouble(<2 x double> %A) {
+; UEQ with zero = !ONE = !(OLT |OGT)
+;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ueq <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmugez2xfloat(<2 x float> %A) {
+; UGE with zero = !OLT
+;CHECK: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp uge <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmugez4xfloat(<4 x float> %A) {
+; UGE with zero = !OLT
+;CHECK: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uge <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmugez2xdouble(<2 x double> %A) {
+; UGE with zero = !OLT
+;CHECK: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uge <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmugtz2xfloat(<2 x float> %A) {
+; UGT with zero = !OLE
+;CHECK: fcmle {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ugt <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmugtz4xfloat(<4 x float> %A) {
+; UGT with zero = !OLE
+;CHECK: fcmle {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ugt <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmugtz2xdouble(<2 x double> %A) {
+; UGT with zero = !OLE
+;CHECK: fcmle {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ugt <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmultz2xfloat(<2 x float> %A) {
+; ULT with zero = !OGE
+;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ult <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmultz4xfloat(<4 x float> %A) {
+;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ult <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmultz2xdouble(<2 x double> %A) {
+;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ult <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <2 x i32> @fcmulez2xfloat(<2 x float> %A) {
+; ULE with zero = !OGT
+;CHECK: fcmgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp ule <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmulez4xfloat(<4 x float> %A) {
+; ULE with zero = !OGT
+;CHECK: fcmgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ule <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmulez2xdouble(<2 x double> %A) {
+; ULE with zero = !OGT
+;CHECK: fcmgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp ule <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+define <2 x i32> @fcmunez2xfloat(<2 x float> %A) {
+; UNE with zero = !OEQ with zero
+;CHECK: fcmeq {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp une <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmunez4xfloat(<4 x float> %A) {
+; UNE with zero = !OEQ with zero
+;CHECK: fcmeq {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp une <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+define <2 x i64> @fcmunez2xdouble(<2 x double> %A) {
+; UNE with zero = !OEQ with zero
+;CHECK: fcmeq {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp une <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+}
+
+
+define <2 x i32> @fcmunoz2xfloat(<2 x float> %A) {
+; UNO with zero = !ORD = !(OLT | OGE)
+;CHECK: fcmge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+;CHECK-NEXT: not {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+ %tmp3 = fcmp uno <2 x float> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
+}
+
+define <4 x i32> @fcmunoz4xfloat(<4 x float> %A) {
+; UNO with zero = !ORD = !(OLT | OGE)
+;CHECK: fcmge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uno <4 x float> %A, zeroinitializer
+ %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @fcmunoz2xdouble(<2 x double> %A) {
+; UNO with zero = !ORD = !(OLT | OGE)
+;CHECK: fcmge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: fcmlt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0.0
+;CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+;CHECK-NEXT: not {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+ %tmp3 = fcmp uno <2 x double> %A, zeroinitializer
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i64>
+ ret <2 x i64> %tmp4
+
+}
diff --git a/test/CodeGen/AArch64/neon-copy.ll b/test/CodeGen/AArch64/neon-copy.ll
new file mode 100644
index 000000000000..e18530e6ff8e
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-copy.ll
@@ -0,0 +1,615 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+
+define <16 x i8> @ins16bw(<16 x i8> %tmp1, i8 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.b[15], {{w[0-31]+}}
+ %tmp3 = insertelement <16 x i8> %tmp1, i8 %tmp2, i32 15
+ ret <16 x i8> %tmp3
+}
+
+define <8 x i16> @ins8hw(<8 x i16> %tmp1, i16 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.h[6], {{w[0-31]+}}
+ %tmp3 = insertelement <8 x i16> %tmp1, i16 %tmp2, i32 6
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @ins4sw(<4 x i32> %tmp1, i32 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[2], {{w[0-31]+}}
+ %tmp3 = insertelement <4 x i32> %tmp1, i32 %tmp2, i32 2
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @ins2dw(<2 x i64> %tmp1, i64 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[1], {{x[0-31]+}}
+ %tmp3 = insertelement <2 x i64> %tmp1, i64 %tmp2, i32 1
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i8> @ins8bw(<8 x i8> %tmp1, i8 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.b[5], {{w[0-31]+}}
+ %tmp3 = insertelement <8 x i8> %tmp1, i8 %tmp2, i32 5
+ ret <8 x i8> %tmp3
+}
+
+define <4 x i16> @ins4hw(<4 x i16> %tmp1, i16 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.h[3], {{w[0-31]+}}
+ %tmp3 = insertelement <4 x i16> %tmp1, i16 %tmp2, i32 3
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @ins2sw(<2 x i32> %tmp1, i32 %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{w[0-31]+}}
+ %tmp3 = insertelement <2 x i32> %tmp1, i32 %tmp2, i32 1
+ ret <2 x i32> %tmp3
+}
+
+define <16 x i8> @ins16b16(<16 x i8> %tmp1, <16 x i8> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.b[15], {{v[0-31]+}}.b[2]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @ins8h8(<8 x i16> %tmp1, <8 x i16> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.h[7], {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @ins4s4(<4 x i32> %tmp1, <4 x i32> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @ins2d2(<2 x i64> %tmp1, <2 x i64> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <2 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
+ ret <2 x i64> %tmp4
+}
+
+define <4 x float> @ins4f4(<4 x float> %tmp1, <4 x float> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
+ %tmp3 = extractelement <4 x float> %tmp1, i32 2
+ %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @ins2df2(<2 x double> %tmp1, <2 x double> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <2 x double> %tmp1, i32 0
+ %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
+ ret <2 x double> %tmp4
+}
+
+define <16 x i8> @ins8b16(<8 x i8> %tmp1, <16 x i8> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.b[15], {{v[0-31]+}}.b[2]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <16 x i8> %tmp2, i8 %tmp3, i32 15
+ ret <16 x i8> %tmp4
+}
+
+define <8 x i16> @ins4h8(<4 x i16> %tmp1, <8 x i16> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.h[7], {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i16> %tmp2, i16 %tmp3, i32 7
+ ret <8 x i16> %tmp4
+}
+
+define <4 x i32> @ins2s4(<2 x i32> %tmp1, <4 x i32> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[1]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 1
+ %tmp4 = insertelement <4 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <4 x i32> %tmp4
+}
+
+define <2 x i64> @ins1d2(<1 x i64> %tmp1, <2 x i64> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <1 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <2 x i64> %tmp2, i64 %tmp3, i32 1
+ ret <2 x i64> %tmp4
+}
+
+define <4 x float> @ins2f4(<2 x float> %tmp1, <4 x float> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[1]
+ %tmp3 = extractelement <2 x float> %tmp1, i32 1
+ %tmp4 = insertelement <4 x float> %tmp2, float %tmp3, i32 1
+ ret <4 x float> %tmp4
+}
+
+define <2 x double> @ins1f2(<1 x double> %tmp1, <2 x double> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[1], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <1 x double> %tmp1, i32 0
+ %tmp4 = insertelement <2 x double> %tmp2, double %tmp3, i32 1
+ ret <2 x double> %tmp4
+}
+
+define <8 x i8> @ins16b8(<16 x i8> %tmp1, <8 x i8> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.b[7], {{v[0-31]+}}.b[2]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 7
+ ret <8 x i8> %tmp4
+}
+
+define <4 x i16> @ins8h4(<8 x i16> %tmp1, <4 x i16> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.h[3], {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @ins4s2(<4 x i32> %tmp1, <2 x i32> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <2 x i32> %tmp4
+}
+
+define <1 x i64> @ins2d1(<2 x i64> %tmp1, <1 x i64> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <2 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
+ ret <1 x i64> %tmp4
+}
+
+define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[2]
+ %tmp3 = extractelement <4 x float> %tmp1, i32 2
+ %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
+ ret <2 x float> %tmp4
+}
+
+define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <2 x double> %tmp1, i32 0
+ %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
+ ret <1 x double> %tmp4
+}
+
+define <8 x i8> @ins8b8(<8 x i8> %tmp1, <8 x i8> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.b[4], {{v[0-31]+}}.b[2]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 2
+ %tmp4 = insertelement <8 x i8> %tmp2, i8 %tmp3, i32 4
+ ret <8 x i8> %tmp4
+}
+
+define <4 x i16> @ins4h4(<4 x i16> %tmp1, <4 x i16> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.h[3], {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = insertelement <4 x i16> %tmp2, i16 %tmp3, i32 3
+ ret <4 x i16> %tmp4
+}
+
+define <2 x i32> @ins2s2(<2 x i32> %tmp1, <2 x i32> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[0]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 0
+ %tmp4 = insertelement <2 x i32> %tmp2, i32 %tmp3, i32 1
+ ret <2 x i32> %tmp4
+}
+
+define <1 x i64> @ins1d1(<1 x i64> %tmp1, <1 x i64> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <1 x i64> %tmp1, i32 0
+ %tmp4 = insertelement <1 x i64> %tmp2, i64 %tmp3, i32 0
+ ret <1 x i64> %tmp4
+}
+
+define <2 x float> @ins2f2(<2 x float> %tmp1, <2 x float> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.s[1], {{v[0-31]+}}.s[0]
+ %tmp3 = extractelement <2 x float> %tmp1, i32 0
+ %tmp4 = insertelement <2 x float> %tmp2, float %tmp3, i32 1
+ ret <2 x float> %tmp4
+}
+
+define <1 x double> @ins1df1(<1 x double> %tmp1, <1 x double> %tmp2) {
+;CHECK: ins {{v[0-31]+}}.d[0], {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <1 x double> %tmp1, i32 0
+ %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0
+ ret <1 x double> %tmp4
+}
+
+define i32 @umovw16b(<16 x i8> %tmp1) {
+;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.b[8]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 8
+ %tmp4 = zext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw8h(<8 x i16> %tmp1) {
+;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = zext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw4s(<4 x i32> %tmp1) {
+;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ ret i32 %tmp3
+}
+
+define i64 @umovx2d(<2 x i64> %tmp1) {
+;CHECK: umov {{x[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp3 = extractelement <2 x i64> %tmp1, i32 0
+ ret i64 %tmp3
+}
+
+define i32 @umovw8b(<8 x i8> %tmp1) {
+;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.b[7]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 7
+ %tmp4 = zext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw4h(<4 x i16> %tmp1) {
+;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = zext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @umovw2s(<2 x i32> %tmp1) {
+;CHECK: umov {{w[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 1
+ ret i32 %tmp3
+}
+
+define i64 @umovx1d(<1 x i64> %tmp1) {
+;CHECK: fmov {{x[0-31]+}}, {{d[0-31]+}}
+ %tmp3 = extractelement <1 x i64> %tmp1, i32 0
+ ret i64 %tmp3
+}
+
+define i32 @smovw16b(<16 x i8> %tmp1) {
+;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.b[8]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 8
+ %tmp4 = sext i8 %tmp3 to i32
+ %tmp5 = add i32 5, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovw8h(<8 x i16> %tmp1) {
+;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ %tmp5 = add i32 5, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovx16b(<16 x i8> %tmp1) {
+;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.b[8]
+ %tmp3 = extractelement <16 x i8> %tmp1, i32 8
+ %tmp4 = sext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @smovx8h(<8 x i16> %tmp1) {
+;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <8 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i64 @smovx4s(<4 x i32> %tmp1) {
+;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.s[2]
+ %tmp3 = extractelement <4 x i32> %tmp1, i32 2
+ %tmp4 = sext i32 %tmp3 to i64
+ ret i64 %tmp4
+}
+
+define i32 @smovw8b(<8 x i8> %tmp1) {
+;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.b[4]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 4
+ %tmp4 = sext i8 %tmp3 to i32
+ %tmp5 = add i32 5, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovw4h(<4 x i16> %tmp1) {
+;CHECK: smov {{w[0-31]+}}, {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ %tmp5 = add i32 5, %tmp4
+ ret i32 %tmp5
+}
+
+define i32 @smovx8b(<8 x i8> %tmp1) {
+;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.b[6]
+ %tmp3 = extractelement <8 x i8> %tmp1, i32 6
+ %tmp4 = sext i8 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i32 @smovx4h(<4 x i16> %tmp1) {
+;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.h[2]
+ %tmp3 = extractelement <4 x i16> %tmp1, i32 2
+ %tmp4 = sext i16 %tmp3 to i32
+ ret i32 %tmp4
+}
+
+define i64 @smovx2s(<2 x i32> %tmp1) {
+;CHECK: smov {{x[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp3 = extractelement <2 x i32> %tmp1, i32 1
+ %tmp4 = sext i32 %tmp3 to i64
+ ret i64 %tmp4
+}
+
+define <8 x i8> @test_vcopy_lane_s8(<8 x i8> %v1, <8 x i8> %v2) {
+;CHECK: ins {{v[0-9]+}}.b[5], {{v[0-9]+}}.b[3]
+ %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 11, i32 6, i32 7>
+ ret <8 x i8> %vset_lane
+}
+
+define <16 x i8> @test_vcopyq_laneq_s8(<16 x i8> %v1, <16 x i8> %v2) {
+;CHECK: ins {{v[0-9]+}}.b[14], {{v[0-9]+}}.b[6]
+ %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 22, i32 15>
+ ret <16 x i8> %vset_lane
+}
+
+define <8 x i8> @test_vcopy_lane_swap_s8(<8 x i8> %v1, <8 x i8> %v2) {
+;CHECK: ins {{v[0-9]+}}.b[7], {{v[0-9]+}}.b[0]
+ %vset_lane = shufflevector <8 x i8> %v1, <8 x i8> %v2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 0>
+ ret <8 x i8> %vset_lane
+}
+
+define <16 x i8> @test_vcopyq_laneq_swap_s8(<16 x i8> %v1, <16 x i8> %v2) {
+;CHECK: ins {{v[0-9]+}}.b[0], {{v[0-9]+}}.b[15]
+ %vset_lane = shufflevector <16 x i8> %v1, <16 x i8> %v2, <16 x i32> <i32 15, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ ret <16 x i8> %vset_lane
+}
+
+define <8 x i8> @test_vdup_n_u8(i8 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.8b, {{w[0-9]+}}
+ %vecinit.i = insertelement <8 x i8> undef, i8 %v1, i32 0
+ %vecinit1.i = insertelement <8 x i8> %vecinit.i, i8 %v1, i32 1
+ %vecinit2.i = insertelement <8 x i8> %vecinit1.i, i8 %v1, i32 2
+ %vecinit3.i = insertelement <8 x i8> %vecinit2.i, i8 %v1, i32 3
+ %vecinit4.i = insertelement <8 x i8> %vecinit3.i, i8 %v1, i32 4
+ %vecinit5.i = insertelement <8 x i8> %vecinit4.i, i8 %v1, i32 5
+ %vecinit6.i = insertelement <8 x i8> %vecinit5.i, i8 %v1, i32 6
+ %vecinit7.i = insertelement <8 x i8> %vecinit6.i, i8 %v1, i32 7
+ ret <8 x i8> %vecinit7.i
+}
+
+define <4 x i16> @test_vdup_n_u16(i16 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.4h, {{w[0-9]+}}
+ %vecinit.i = insertelement <4 x i16> undef, i16 %v1, i32 0
+ %vecinit1.i = insertelement <4 x i16> %vecinit.i, i16 %v1, i32 1
+ %vecinit2.i = insertelement <4 x i16> %vecinit1.i, i16 %v1, i32 2
+ %vecinit3.i = insertelement <4 x i16> %vecinit2.i, i16 %v1, i32 3
+ ret <4 x i16> %vecinit3.i
+}
+
+define <2 x i32> @test_vdup_n_u32(i32 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.2s, {{w[0-9]+}}
+ %vecinit.i = insertelement <2 x i32> undef, i32 %v1, i32 0
+ %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %v1, i32 1
+ ret <2 x i32> %vecinit1.i
+}
+
+define <1 x i64> @test_vdup_n_u64(i64 %v1) #0 {
+;CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ %vecinit.i = insertelement <1 x i64> undef, i64 %v1, i32 0
+ ret <1 x i64> %vecinit.i
+}
+
+define <16 x i8> @test_vdupq_n_u8(i8 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.16b, {{w[0-9]+}}
+ %vecinit.i = insertelement <16 x i8> undef, i8 %v1, i32 0
+ %vecinit1.i = insertelement <16 x i8> %vecinit.i, i8 %v1, i32 1
+ %vecinit2.i = insertelement <16 x i8> %vecinit1.i, i8 %v1, i32 2
+ %vecinit3.i = insertelement <16 x i8> %vecinit2.i, i8 %v1, i32 3
+ %vecinit4.i = insertelement <16 x i8> %vecinit3.i, i8 %v1, i32 4
+ %vecinit5.i = insertelement <16 x i8> %vecinit4.i, i8 %v1, i32 5
+ %vecinit6.i = insertelement <16 x i8> %vecinit5.i, i8 %v1, i32 6
+ %vecinit7.i = insertelement <16 x i8> %vecinit6.i, i8 %v1, i32 7
+ %vecinit8.i = insertelement <16 x i8> %vecinit7.i, i8 %v1, i32 8
+ %vecinit9.i = insertelement <16 x i8> %vecinit8.i, i8 %v1, i32 9
+ %vecinit10.i = insertelement <16 x i8> %vecinit9.i, i8 %v1, i32 10
+ %vecinit11.i = insertelement <16 x i8> %vecinit10.i, i8 %v1, i32 11
+ %vecinit12.i = insertelement <16 x i8> %vecinit11.i, i8 %v1, i32 12
+ %vecinit13.i = insertelement <16 x i8> %vecinit12.i, i8 %v1, i32 13
+ %vecinit14.i = insertelement <16 x i8> %vecinit13.i, i8 %v1, i32 14
+ %vecinit15.i = insertelement <16 x i8> %vecinit14.i, i8 %v1, i32 15
+ ret <16 x i8> %vecinit15.i
+}
+
+define <8 x i16> @test_vdupq_n_u16(i16 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.8h, {{w[0-9]+}}
+ %vecinit.i = insertelement <8 x i16> undef, i16 %v1, i32 0
+ %vecinit1.i = insertelement <8 x i16> %vecinit.i, i16 %v1, i32 1
+ %vecinit2.i = insertelement <8 x i16> %vecinit1.i, i16 %v1, i32 2
+ %vecinit3.i = insertelement <8 x i16> %vecinit2.i, i16 %v1, i32 3
+ %vecinit4.i = insertelement <8 x i16> %vecinit3.i, i16 %v1, i32 4
+ %vecinit5.i = insertelement <8 x i16> %vecinit4.i, i16 %v1, i32 5
+ %vecinit6.i = insertelement <8 x i16> %vecinit5.i, i16 %v1, i32 6
+ %vecinit7.i = insertelement <8 x i16> %vecinit6.i, i16 %v1, i32 7
+ ret <8 x i16> %vecinit7.i
+}
+
+define <4 x i32> @test_vdupq_n_u32(i32 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.4s, {{w[0-9]+}}
+ %vecinit.i = insertelement <4 x i32> undef, i32 %v1, i32 0
+ %vecinit1.i = insertelement <4 x i32> %vecinit.i, i32 %v1, i32 1
+ %vecinit2.i = insertelement <4 x i32> %vecinit1.i, i32 %v1, i32 2
+ %vecinit3.i = insertelement <4 x i32> %vecinit2.i, i32 %v1, i32 3
+ ret <4 x i32> %vecinit3.i
+}
+
+define <2 x i64> @test_vdupq_n_u64(i64 %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.2d, {{x[0-9]+}}
+ %vecinit.i = insertelement <2 x i64> undef, i64 %v1, i32 0
+ %vecinit1.i = insertelement <2 x i64> %vecinit.i, i64 %v1, i32 1
+ ret <2 x i64> %vecinit1.i
+}
+
+define <8 x i8> @test_vdup_lane_s8(<8 x i8> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <8 x i8> %shuffle
+}
+
+define <4 x i16> @test_vdup_lane_s16(<4 x i16> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x i16> %shuffle
+}
+
+define <2 x i32> @test_vdup_lane_s32(<2 x i32> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x i32> %shuffle
+}
+
+define <16 x i8> @test_vdupq_lane_s8(<8 x i8> %v1) #0 {
+;CHECK: {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <8 x i8> %v1, <8 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <16 x i8> %shuffle
+}
+
+define <8 x i16> @test_vdupq_lane_s16(<4 x i16> %v1) #0 {
+;CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <4 x i16> %v1, <4 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ ret <8 x i16> %shuffle
+}
+
+define <4 x i32> @test_vdupq_lane_s32(<2 x i32> %v1) #0 {
+;CHECK: {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %shuffle
+}
+
+define <2 x i64> @test_vdupq_lane_s64(<1 x i64> %v1) #0 {
+;CHECK: {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+ %shuffle = shufflevector <1 x i64> %v1, <1 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %shuffle
+}
+
+define <8 x i8> @test_vdup_laneq_s8(<16 x i8> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.8b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <8 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <8 x i8> %shuffle
+}
+
+define <4 x i16> @test_vdup_laneq_s16(<8 x i16> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.4h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+ ret <4 x i16> %shuffle
+}
+
+define <2 x i32> @test_vdup_laneq_s32(<4 x i32> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <2 x i32> <i32 1, i32 1>
+ ret <2 x i32> %shuffle
+}
+
+define <16 x i8> @test_vdupq_laneq_s8(<16 x i8> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.16b, {{v[0-9]+}}.b[5]
+ %shuffle = shufflevector <16 x i8> %v1, <16 x i8> undef, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
+ ret <16 x i8> %shuffle
+}
+
+define <8 x i16> @test_vdupq_laneq_s16(<8 x i16> %v1) #0 {
+;CHECK: {{v[0-9]+}}.8h, {{v[0-9]+}}.h[2]
+ %shuffle = shufflevector <8 x i16> %v1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+ ret <8 x i16> %shuffle
+}
+
+define <4 x i32> @test_vdupq_laneq_s32(<4 x i32> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
+ %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %shuffle
+}
+
+define <2 x i64> @test_vdupq_laneq_s64(<2 x i64> %v1) #0 {
+;CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[0]
+ %shuffle = shufflevector <2 x i64> %v1, <2 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %shuffle
+}
+
+define i64 @test_bitcastv8i8toi64(<8 x i8> %in) {
+; CHECK-LABEL: test_bitcastv8i8toi64:
+ %res = bitcast <8 x i8> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv4i16toi64(<4 x i16> %in) {
+; CHECK-LABEL: test_bitcastv4i16toi64:
+ %res = bitcast <4 x i16> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv2i32toi64(<2 x i32> %in) {
+; CHECK-LABEL: test_bitcastv2i32toi64:
+ %res = bitcast <2 x i32> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv2f32toi64(<2 x float> %in) {
+; CHECK-LABEL: test_bitcastv2f32toi64:
+ %res = bitcast <2 x float> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv1i64toi64(<1 x i64> %in) {
+; CHECK-LABEL: test_bitcastv1i64toi64:
+ %res = bitcast <1 x i64> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define i64 @test_bitcastv1f64toi64(<1 x double> %in) {
+; CHECK-LABEL: test_bitcastv1f64toi64:
+ %res = bitcast <1 x double> %in to i64
+; CHECK: fmov {{x[0-9]+}}, {{d[0-9]+}}
+ ret i64 %res
+}
+
+define <8 x i8> @test_bitcasti64tov8i8(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov8i8:
+ %res = bitcast i64 %in to <8 x i8>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <8 x i8> %res
+}
+
+define <4 x i16> @test_bitcasti64tov4i16(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov4i16:
+ %res = bitcast i64 %in to <4 x i16>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <4 x i16> %res
+}
+
+define <2 x i32> @test_bitcasti64tov2i32(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov2i32:
+ %res = bitcast i64 %in to <2 x i32>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <2 x i32> %res
+}
+
+define <2 x float> @test_bitcasti64tov2f32(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov2f32:
+ %res = bitcast i64 %in to <2 x float>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <2 x float> %res
+}
+
+define <1 x i64> @test_bitcasti64tov1i64(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov1i64:
+ %res = bitcast i64 %in to <1 x i64>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <1 x i64> %res
+}
+
+define <1 x double> @test_bitcasti64tov1f64(i64 %in) {
+; CHECK-LABEL: test_bitcasti64tov1f64:
+ %res = bitcast i64 %in to <1 x double>
+; CHECK: fmov {{d[0-9]+}}, {{x[0-9]+}}
+ ret <1 x double> %res
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-crypto.ll b/test/CodeGen/AArch64/neon-crypto.ll
new file mode 100644
index 000000000000..0283e0e7ca2e
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-crypto.ll
@@ -0,0 +1,149 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -mattr=+crypto | FileCheck %s
+; RUN: not llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon 2>&1 | FileCheck --check-prefix=CHECK-NO-CRYPTO %s
+
+declare <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.aarch64.neon.sha1m(<4 x i32>, <1 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.aarch64.neon.sha1p(<4 x i32>, <1 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.aarch64.neon.sha1c(<4 x i32>, <1 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32>, <4 x i32>) #1
+
+declare <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32>, <4 x i32>) #1
+
+declare <1 x i32> @llvm.arm.neon.sha1h.v1i32(<1 x i32>) #1
+
+declare <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8>) #1
+
+declare <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8>) #1
+
+declare <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8>, <16 x i8>) #1
+
+declare <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8>, <16 x i8>) #1
+
+define <16 x i8> @test_vaeseq_u8(<16 x i8> %data, <16 x i8> %key) {
+; CHECK: test_vaeseq_u8:
+; CHECK: aese {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK-NO-CRYPTO: Cannot select: intrinsic %llvm.arm.neon.aese
+entry:
+ %aese.i = tail call <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8> %data, <16 x i8> %key)
+ ret <16 x i8> %aese.i
+}
+
+define <16 x i8> @test_vaesdq_u8(<16 x i8> %data, <16 x i8> %key) {
+; CHECK: test_vaesdq_u8:
+; CHECK: aesd {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %aesd.i = tail call <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8> %data, <16 x i8> %key)
+ ret <16 x i8> %aesd.i
+}
+
+define <16 x i8> @test_vaesmcq_u8(<16 x i8> %data) {
+; CHECK: test_vaesmcq_u8:
+; CHECK: aesmc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %aesmc.i = tail call <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8> %data)
+ ret <16 x i8> %aesmc.i
+}
+
+define <16 x i8> @test_vaesimcq_u8(<16 x i8> %data) {
+; CHECK: test_vaesimcq_u8:
+; CHECK: aesimc {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %aesimc.i = tail call <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8> %data)
+ ret <16 x i8> %aesimc.i
+}
+
+define i32 @test_vsha1h_u32(i32 %hash_e) {
+; CHECK: test_vsha1h_u32:
+; CHECK: sha1h {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %sha1h.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
+ %sha1h1.i = tail call <1 x i32> @llvm.arm.neon.sha1h.v1i32(<1 x i32> %sha1h.i)
+ %0 = extractelement <1 x i32> %sha1h1.i, i32 0
+ ret i32 %0
+}
+
+define <4 x i32> @test_vsha1su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w12_15) {
+; CHECK: test_vsha1su1q_u32:
+; CHECK: sha1su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %sha1su12.i = tail call <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32> %tw0_3, <4 x i32> %w12_15)
+ ret <4 x i32> %sha1su12.i
+}
+
+define <4 x i32> @test_vsha256su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7) {
+; CHECK: test_vsha256su0q_u32:
+; CHECK: sha256su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %sha256su02.i = tail call <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32> %w0_3, <4 x i32> %w4_7)
+ ret <4 x i32> %sha256su02.i
+}
+
+define <4 x i32> @test_vsha1cq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK: test_vsha1cq_u32:
+; CHECK: sha1c {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sha1c.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
+ %sha1c1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1c(<4 x i32> %hash_abcd, <1 x i32> %sha1c.i, <4 x i32> %wk)
+ ret <4 x i32> %sha1c1.i
+}
+
+define <4 x i32> @test_vsha1pq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK: test_vsha1pq_u32:
+; CHECK: sha1p {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sha1p.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
+ %sha1p1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1p(<4 x i32> %hash_abcd, <1 x i32> %sha1p.i, <4 x i32> %wk)
+ ret <4 x i32> %sha1p1.i
+}
+
+define <4 x i32> @test_vsha1mq_u32(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
+; CHECK: test_vsha1mq_u32:
+; CHECK: sha1m {{q[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sha1m.i = insertelement <1 x i32> undef, i32 %hash_e, i32 0
+ %sha1m1.i = tail call <4 x i32> @llvm.aarch64.neon.sha1m(<4 x i32> %hash_abcd, <1 x i32> %sha1m.i, <4 x i32> %wk)
+ ret <4 x i32> %sha1m1.i
+}
+
+define <4 x i32> @test_vsha1su0q_u32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11) {
+; CHECK: test_vsha1su0q_u32:
+; CHECK: sha1su0 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %sha1su03.i = tail call <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32> %w0_3, <4 x i32> %w4_7, <4 x i32> %w8_11)
+ ret <4 x i32> %sha1su03.i
+}
+
+define <4 x i32> @test_vsha256hq_u32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) {
+; CHECK: test_vsha256hq_u32:
+; CHECK: sha256h {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sha256h3.i = tail call <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
+ ret <4 x i32> %sha256h3.i
+}
+
+define <4 x i32> @test_vsha256h2q_u32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) {
+; CHECK: test_vsha256h2q_u32:
+; CHECK: sha256h2 {{q[0-9]+}}, {{q[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sha256h23.i = tail call <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
+ ret <4 x i32> %sha256h23.i
+}
+
+define <4 x i32> @test_vsha256su1q_u32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) {
+; CHECK: test_vsha256su1q_u32:
+; CHECK: sha256su1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %sha256su13.i = tail call <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32> %tw0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
+ ret <4 x i32> %sha256su13.i
+}
+
diff --git a/test/CodeGen/AArch64/neon-diagnostics.ll b/test/CodeGen/AArch64/neon-diagnostics.ll
new file mode 100644
index 000000000000..f546aa7d3341
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-diagnostics.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <2 x float> @test_vfma_lane_f32(<2 x float> %a, <2 x float> %b, <2 x float> %v) {
+; CHECK: test_vfma_lane_f32:
+; CHECK: fmul {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[{{[0-9]+}}]
+; CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+ %shuffle = shufflevector <2 x float> %v, <2 x float> undef, <2 x i32> <i32 1, i32 1>
+ %mul = fmul <2 x float> %shuffle, %b
+ %add = fadd <2 x float> %mul, %a
+ ret <2 x float> %add
+}
+
+define <4 x i32> @test_vshrn_not_match(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vshrn_not_match
+; CHECK-NOT: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #35
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %2 = ashr <2 x i64> %b, <i64 35, i64 35>
+ %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
+ %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %4
+} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-extract.ll b/test/CodeGen/AArch64/neon-extract.ll
new file mode 100644
index 000000000000..5c52cd30676a
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-extract.ll
@@ -0,0 +1,190 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vext_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vext_s8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <4 x i16> @test_vext_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vext_s16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %vext
+}
+
+define <2 x i32> @test_vext_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vext_s32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+entry:
+ %vext = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i32> %vext
+}
+
+define <1 x i64> @test_vext_s64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK: test_vext_s64:
+entry:
+ %vext = shufflevector <1 x i64> %a, <1 x i64> %b, <1 x i32> <i32 0>
+ ret <1 x i64> %vext
+}
+
+define <16 x i8> @test_vextq_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vextq_s8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
+ ret <16 x i8> %vext
+}
+
+define <8 x i16> @test_vextq_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vextq_s16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
+
+define <4 x i32> @test_vextq_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vextq_s32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+entry:
+ %vext = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %vext
+}
+
+define <2 x i64> @test_vextq_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vextq_s64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+entry:
+ %vext = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %vext
+}
+
+define <8 x i8> @test_vext_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vext_u8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <4 x i16> @test_vext_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vext_u16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %vext
+}
+
+define <2 x i32> @test_vext_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vext_u32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+entry:
+ %vext = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i32> %vext
+}
+
+define <1 x i64> @test_vext_u64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK: test_vext_u64:
+entry:
+ %vext = shufflevector <1 x i64> %a, <1 x i64> %b, <1 x i32> <i32 0>
+ ret <1 x i64> %vext
+}
+
+define <16 x i8> @test_vextq_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vextq_u8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
+ ret <16 x i8> %vext
+}
+
+define <8 x i16> @test_vextq_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vextq_u16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
+
+define <4 x i32> @test_vextq_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vextq_u32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+entry:
+ %vext = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %vext
+}
+
+define <2 x i64> @test_vextq_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vextq_u64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+entry:
+ %vext = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x i64> %vext
+}
+
+define <2 x float> @test_vext_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vext_f32:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x4
+entry:
+ %vext = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x float> %vext
+}
+
+define <1 x double> @test_vext_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK: test_vext_f64:
+entry:
+ %vext = shufflevector <1 x double> %a, <1 x double> %b, <1 x i32> <i32 0>
+ ret <1 x double> %vext
+}
+
+define <4 x float> @test_vextq_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vextq_f32:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x4
+entry:
+ %vext = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x float> %vext
+}
+
+define <2 x double> @test_vextq_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vextq_f64:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x8
+entry:
+ %vext = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 2>
+ ret <2 x double> %vext
+}
+
+define <8 x i8> @test_vext_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vext_p8:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2
+entry:
+ %vext = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+ ret <8 x i8> %vext
+}
+
+define <4 x i16> @test_vext_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vext_p16:
+; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x6
+entry:
+ %vext = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i16> %vext
+}
+
+define <16 x i8> @test_vextq_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vextq_p8:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x2
+entry:
+ %vext = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
+ ret <16 x i8> %vext
+}
+
+define <8 x i16> @test_vextq_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vextq_p16:
+; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6
+entry:
+ %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
+ ret <8 x i16> %vext
+}
diff --git a/test/CodeGen/AArch64/neon-facge-facgt.ll b/test/CodeGen/AArch64/neon-facge-facgt.ll
new file mode 100644
index 000000000000..146256e4be11
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-facge-facgt.ll
@@ -0,0 +1,56 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <2 x i32> @llvm.arm.neon.vacged(<2 x float>, <2 x float>)
+declare <4 x i32> @llvm.arm.neon.vacgeq(<4 x float>, <4 x float>)
+declare <2 x i64> @llvm.aarch64.neon.vacgeq(<2 x double>, <2 x double>)
+
+define <2 x i32> @facge_from_intr_v2i32(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: facge_from_intr_v2i32:
+ %val = call <2 x i32> @llvm.arm.neon.vacged(<2 x float> %A, <2 x float> %B)
+; CHECK: facge {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ ret <2 x i32> %val
+}
+define <4 x i32> @facge_from_intr_v4i32( <4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: facge_from_intr_v4i32:
+ %val = call <4 x i32> @llvm.arm.neon.vacgeq(<4 x float> %A, <4 x float> %B)
+; CHECK: facge {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ ret <4 x i32> %val
+}
+
+define <2 x i64> @facge_from_intr_v2i64(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: facge_from_intr_v2i64:
+ %val = call <2 x i64> @llvm.aarch64.neon.vacgeq(<2 x double> %A, <2 x double> %B)
+; CHECK: facge {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ ret <2 x i64> %val
+}
+
+declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>)
+declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>)
+declare <2 x i64> @llvm.aarch64.neon.vacgtq(<2 x double>, <2 x double>)
+
+define <2 x i32> @facgt_from_intr_v2i32(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: facgt_from_intr_v2i32:
+ %val = call <2 x i32> @llvm.arm.neon.vacgtd(<2 x float> %A, <2 x float> %B)
+; CHECK: facgt {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+ ret <2 x i32> %val
+}
+define <4 x i32> @facgt_from_intr_v4i32( <4 x float> %A, <4 x float> %B) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: facgt_from_intr_v4i32:
+ %val = call <4 x i32> @llvm.arm.neon.vacgtq(<4 x float> %A, <4 x float> %B)
+; CHECK: facgt {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+ ret <4 x i32> %val
+}
+
+define <2 x i64> @facgt_from_intr_v2i64(<2 x double> %A, <2 x double> %B) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: facgt_from_intr_v2i64:
+ %val = call <2 x i64> @llvm.aarch64.neon.vacgtq(<2 x double> %A, <2 x double> %B)
+; CHECK: facgt {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ ret <2 x i64> %val
+}
+
diff --git a/test/CodeGen/AArch64/neon-fma.ll b/test/CodeGen/AArch64/neon-fma.ll
new file mode 100644
index 000000000000..dcf4e2878068
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-fma.ll
@@ -0,0 +1,112 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+define <2 x float> @fmla2xfloat(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+;CHECK: fmla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp1 = fmul <2 x float> %A, %B;
+ %tmp2 = fadd <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <4 x float> @fmla4xfloat(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
+;CHECK: fmla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp1 = fmul <4 x float> %A, %B;
+ %tmp2 = fadd <4 x float> %C, %tmp1;
+ ret <4 x float> %tmp2
+}
+
+define <2 x double> @fmla2xdouble(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
+;CHECK: fmla {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp1 = fmul <2 x double> %A, %B;
+ %tmp2 = fadd <2 x double> %C, %tmp1;
+ ret <2 x double> %tmp2
+}
+
+
+define <2 x float> @fmls2xfloat(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+;CHECK: fmls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp1 = fmul <2 x float> %A, %B;
+ %tmp2 = fsub <2 x float> %C, %tmp1;
+ ret <2 x float> %tmp2
+}
+
+define <4 x float> @fmls4xfloat(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
+;CHECK: fmls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp1 = fmul <4 x float> %A, %B;
+ %tmp2 = fsub <4 x float> %C, %tmp1;
+ ret <4 x float> %tmp2
+}
+
+define <2 x double> @fmls2xdouble(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
+;CHECK: fmls {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp1 = fmul <2 x double> %A, %B;
+ %tmp2 = fsub <2 x double> %C, %tmp1;
+ ret <2 x double> %tmp2
+}
+
+
+; Another set of tests for when the intrinsic is used.
+
+declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x float> @fmla2xfloat_fused(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+;CHECK: fmla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %val = call <2 x float> @llvm.fma.v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C)
+ ret <2 x float> %val
+}
+
+define <4 x float> @fmla4xfloat_fused(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
+;CHECK: fmla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %val = call <4 x float> @llvm.fma.v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C)
+ ret <4 x float> %val
+}
+
+define <2 x double> @fmla2xdouble_fused(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
+;CHECK: fmla {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %val = call <2 x double> @llvm.fma.v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C)
+ ret <2 x double> %val
+}
+
+define <2 x float> @fmls2xfloat_fused(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+;CHECK: fmls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %negA = fsub <2 x float> <float -0.0, float -0.0>, %A
+ %val = call <2 x float> @llvm.fma.v2f32(<2 x float> %negA, <2 x float> %B, <2 x float> %C)
+ ret <2 x float> %val
+}
+
+define <4 x float> @fmls4xfloat_fused(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
+;CHECK: fmls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %negA = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %A
+ %val = call <4 x float> @llvm.fma.v4f32(<4 x float> %negA, <4 x float> %B, <4 x float> %C)
+ ret <4 x float> %val
+}
+
+define <2 x double> @fmls2xdouble_fused(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
+;CHECK: fmls {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %negA = fsub <2 x double> <double -0.0, double -0.0>, %A
+ %val = call <2 x double> @llvm.fma.v2f64(<2 x double> %negA, <2 x double> %B, <2 x double> %C)
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>)
+declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define <2 x float> @fmuladd2xfloat(<2 x float> %A, <2 x float> %B, <2 x float> %C) {
+;CHECK: fmla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %val = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %A, <2 x float> %B, <2 x float> %C)
+ ret <2 x float> %val
+}
+
+define <4 x float> @fmuladd4xfloat_fused(<4 x float> %A, <4 x float> %B, <4 x float> %C) {
+;CHECK: fmla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %val = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %A, <4 x float> %B, <4 x float> %C)
+ ret <4 x float> %val
+}
+
+define <2 x double> @fmuladd2xdouble_fused(<2 x double> %A, <2 x double> %B, <2 x double> %C) {
+;CHECK: fmla {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %val = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %A, <2 x double> %B, <2 x double> %C)
+ ret <2 x double> %val
+}
diff --git a/test/CodeGen/AArch64/neon-frsqrt-frecp.ll b/test/CodeGen/AArch64/neon-frsqrt-frecp.ll
new file mode 100644
index 000000000000..46fe25d74d9d
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-frsqrt-frecp.ll
@@ -0,0 +1,54 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+; Set of tests for when the intrinsic is used.
+
+declare <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vrsqrts.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @frsqrts_from_intr_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: frsqrts v0.2s, v0.2s, v1.2s
+ %val = call <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+ ret <2 x float> %val
+}
+
+define <4 x float> @frsqrts_from_intr_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: frsqrts v0.4s, v0.4s, v1.4s
+ %val = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+ ret <4 x float> %val
+}
+
+define <2 x double> @frsqrts_from_intr_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: frsqrts v0.2d, v0.2d, v1.2d
+ %val = call <2 x double> @llvm.arm.neon.vrsqrts.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vrecps.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @frecps_from_intr_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: frecps v0.2s, v0.2s, v1.2s
+ %val = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+ ret <2 x float> %val
+}
+
+define <4 x float> @frecps_from_intr_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: frecps v0.4s, v0.4s, v1.4s
+ %val = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+ ret <4 x float> %val
+}
+
+define <2 x double> @frecps_from_intr_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: frecps v0.2d, v0.2d, v1.2d
+ %val = call <2 x double> @llvm.arm.neon.vrecps.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+ ret <2 x double> %val
+}
+
diff --git a/test/CodeGen/AArch64/neon-halving-add-sub.ll b/test/CodeGen/AArch64/neon-halving-add-sub.ll
new file mode 100644
index 000000000000..a8f59dbdb0ad
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-halving-add-sub.ll
@@ -0,0 +1,207 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uhadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uhadd_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uhadd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_shadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_shadd_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: shadd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uhadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uhadd_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uhadd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_shadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_shadd_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: shadd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uhadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uhadd_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uhadd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_shadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_shadd_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: shadd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uhadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uhadd_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uhadd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_shadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_shadd_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: shadd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uhadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uhadd_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uhadd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_shadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_shadd_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: shadd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uhadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uhadd_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uhadd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_shadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_shadd_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: shadd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+
+declare <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uhsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uhsub_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uhsub v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_shsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_shsub_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: shsub v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uhsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uhsub_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uhsub v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_shsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_shsub_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: shsub v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uhsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uhsub_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uhsub v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_shsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_shsub_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: shsub v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uhsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uhsub_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uhsub v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_shsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_shsub_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: shsub v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uhsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uhsub_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uhsub v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_shsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_shsub_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: shsub v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uhsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uhsub_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uhsub v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_shsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_shsub_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: shsub v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
diff --git a/test/CodeGen/AArch64/neon-max-min-pairwise.ll b/test/CodeGen/AArch64/neon-max-min-pairwise.ll
new file mode 100644
index 000000000000..d757aca86a69
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-max-min-pairwise.ll
@@ -0,0 +1,310 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_smaxp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: test_smaxp_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: smaxp v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_umaxp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: umaxp v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vpmaxs.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vpmaxu.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_smaxp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_smaxp_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vpmaxs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: smaxp v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_umaxp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_umaxp_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vpmaxu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: umaxp v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_smaxp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_smaxp_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: smaxp v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_umaxp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_umaxp_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: umaxp v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+
+declare <8 x i16> @llvm.arm.neon.vpmaxs.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vpmaxu.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_smaxp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_smaxp_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vpmaxs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: smaxp v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_umaxp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_umaxp_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vpmaxu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: umaxp v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+
+declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_smaxp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_smaxp_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: smaxp v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_umaxp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_umaxp_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: umaxp v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vpmaxs.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vpmaxu.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_smaxp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_smaxp_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vpmaxs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: smaxp v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_umaxp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_umaxp_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vpmaxu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: umaxp v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_sminp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: test_sminp_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sminp v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_uminp_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uminp v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vpmins.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vpminu.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_sminp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sminp_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vpmins.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sminp v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_uminp_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uminp_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vpminu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uminp v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_sminp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sminp_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sminp v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_uminp_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uminp_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uminp v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+
+declare <8 x i16> @llvm.arm.neon.vpmins.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vpminu.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_sminp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sminp_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vpmins.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sminp v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_uminp_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uminp_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vpminu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uminp v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+
+declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_sminp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sminp_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sminp v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_uminp_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uminp_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uminp v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vpmins.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vpminu.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_sminp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sminp_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vpmins.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sminp v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_uminp_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uminp_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vpminu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uminp v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vpmaxs.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vpmaxs.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fmaxp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fmaxp_v2f32:
+ %val = call <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fmaxp v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fmaxp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fmaxp_v4f32:
+ %val = call <4 x float> @llvm.arm.neon.vpmaxs.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fmaxp v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fmaxp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fmaxp_v2f64:
+ %val = call <2 x double> @llvm.arm.neon.vpmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fmaxp v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vpmins.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vpmins.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fminp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fminp_v2f32:
+ %val = call <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fminp v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fminp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fminp_v4f32:
+ %val = call <4 x float> @llvm.arm.neon.vpmins.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fminp v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fminp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fminp_v2f64:
+ %val = call <2 x double> @llvm.arm.neon.vpmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fminp v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.aarch64.neon.vpmaxnm.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.vpmaxnm.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.vpmaxnm.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fmaxnmp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fmaxnmp_v2f32:
+ %val = call <2 x float> @llvm.aarch64.neon.vpmaxnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fmaxnmp v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fmaxnmp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fmaxnmp_v4f32:
+ %val = call <4 x float> @llvm.aarch64.neon.vpmaxnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fmaxnmp v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fmaxnmp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fmaxnmp_v2f64:
+ %val = call <2 x double> @llvm.aarch64.neon.vpmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fmaxnmp v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.aarch64.neon.vpminnm.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.vpminnm.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.vpminnm.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fminnmp_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fminnmp_v2f32:
+ %val = call <2 x float> @llvm.aarch64.neon.vpminnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fminnmp v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fminnmp_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fminnmp_v4f32:
+ %val = call <4 x float> @llvm.aarch64.neon.vpminnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fminnmp v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fminnmp_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fminnmp_v2f64:
+ %val = call <2 x double> @llvm.aarch64.neon.vpminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fminnmp v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
diff --git a/test/CodeGen/AArch64/neon-max-min.ll b/test/CodeGen/AArch64/neon-max-min.ll
new file mode 100644
index 000000000000..7889c77e37f1
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-max-min.ll
@@ -0,0 +1,310 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_smax_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: test_smax_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: smax v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_umax_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: umax v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_smax_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_smax_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: smax v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_umax_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_umax_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: umax v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_smax_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_smax_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: smax v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_umax_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_umax_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: umax v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+
+declare <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_smax_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_smax_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: smax v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_umax_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_umax_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: umax v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+
+declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_smax_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_smax_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: smax v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_umax_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_umax_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: umax v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_smax_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_smax_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: smax v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_umax_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_umax_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: umax v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_smin_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; Using registers other than v0, v1 are possible, but would be odd.
+; CHECK: test_smin_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: smin v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_umin_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: umin v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_smin_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_smin_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: smin v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_umin_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_umin_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: umin v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_smin_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_smin_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: smin v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_umin_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_umin_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: umin v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+
+declare <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_smin_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_smin_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: smin v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_umin_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_umin_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: umin v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+
+declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_smin_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_smin_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: smin v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_umin_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_umin_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: umin v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_smin_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_smin_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: smin v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_umin_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_umin_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: umin v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fmax_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fmax_v2f32:
+ %val = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fmax v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fmax_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fmax_v4f32:
+ %val = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fmax v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fmax_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fmax_v2f64:
+ %val = call <2 x double> @llvm.arm.neon.vmaxs.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fmax v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fmin_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fmin_v2f32:
+ %val = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fmin v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fmin_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fmin_v4f32:
+ %val = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fmin v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fmin_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fmin_v2f64:
+ %val = call <2 x double> @llvm.arm.neon.vmins.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fmin v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+
+declare <2 x float> @llvm.aarch64.neon.vmaxnm.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.vmaxnm.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fmaxnm_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fmaxnm_v2f32:
+ %val = call <2 x float> @llvm.aarch64.neon.vmaxnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fmaxnm v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fmaxnm_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fmaxnm_v4f32:
+ %val = call <4 x float> @llvm.aarch64.neon.vmaxnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fmaxnm v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fmaxnm_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fmaxnm_v2f64:
+ %val = call <2 x double> @llvm.aarch64.neon.vmaxnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fmaxnm v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
+
+declare <2 x float> @llvm.aarch64.neon.vminnm.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.vminnm.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @test_fminnm_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; CHECK: test_fminnm_v2f32:
+ %val = call <2 x float> @llvm.aarch64.neon.vminnm.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+; CHECK: fminnm v0.2s, v0.2s, v1.2s
+ ret <2 x float> %val
+}
+
+define <4 x float> @test_fminnm_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; CHECK: test_fminnm_v4f32:
+ %val = call <4 x float> @llvm.aarch64.neon.vminnm.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+; CHECK: fminnm v0.4s, v0.4s, v1.4s
+ ret <4 x float> %val
+}
+
+define <2 x double> @test_fminnm_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; CHECK: test_fminnm_v2f64:
+ %val = call <2 x double> @llvm.aarch64.neon.vminnm.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+; CHECK: fminnm v0.2d, v0.2d, v1.2d
+ ret <2 x double> %val
+}
diff --git a/test/CodeGen/AArch64/neon-misc-scalar.ll b/test/CodeGen/AArch64/neon-misc-scalar.ll
new file mode 100644
index 000000000000..cca8deb45cba
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-misc-scalar.ll
@@ -0,0 +1,60 @@
+;RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64>)
+
+declare <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64>)
+
+declare <1 x i64> @llvm.arm.neon.vabs.v1i64(<1 x i64>)
+
+declare <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64>, <1 x i64>)
+
+declare <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_vuqadd_s64(<1 x i64> %a, <1 x i64> %b) {
+entry:
+ ; CHECK: test_vuqadd_s64
+ %vuqadd2.i = tail call <1 x i64> @llvm.aarch64.neon.suqadd.v1i64(<1 x i64> %a, <1 x i64> %b)
+ ; CHECK: suqadd d{{[0-9]+}}, d{{[0-9]+}}
+ ret <1 x i64> %vuqadd2.i
+}
+
+define <1 x i64> @test_vsqadd_u64(<1 x i64> %a, <1 x i64> %b) {
+entry:
+ ; CHECK: test_vsqadd_u64
+ %vsqadd2.i = tail call <1 x i64> @llvm.aarch64.neon.usqadd.v1i64(<1 x i64> %a, <1 x i64> %b)
+ ; CHECK: usqadd d{{[0-9]+}}, d{{[0-9]+}}
+ ret <1 x i64> %vsqadd2.i
+}
+
+define <1 x i64> @test_vabs_s64(<1 x i64> %a) {
+ ; CHECK: test_vabs_s64
+entry:
+ %vabs1.i = tail call <1 x i64> @llvm.arm.neon.vabs.v1i64(<1 x i64> %a)
+ ; CHECK: abs d{{[0-9]+}}, d{{[0-9]+}}
+ ret <1 x i64> %vabs1.i
+}
+
+define <1 x i64> @test_vqabs_s64(<1 x i64> %a) {
+ ; CHECK: test_vqabs_s64
+entry:
+ %vqabs1.i = tail call <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64> %a)
+ ; CHECK: sqabs d{{[0-9]+}}, d{{[0-9]+}}
+ ret <1 x i64> %vqabs1.i
+}
+
+define <1 x i64> @test_vqneg_s64(<1 x i64> %a) {
+ ; CHECK: test_vqneg_s64
+entry:
+ %vqneg1.i = tail call <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64> %a)
+ ; CHECK: sqneg d{{[0-9]+}}, d{{[0-9]+}}
+ ret <1 x i64> %vqneg1.i
+}
+
+define <1 x i64> @test_vneg_s64(<1 x i64> %a) {
+ ; CHECK: test_vneg_s64
+entry:
+ %sub.i = sub <1 x i64> zeroinitializer, %a
+ ; CHECK: neg d{{[0-9]+}}, d{{[0-9]+}}
+ ret <1 x i64> %sub.i
+}
+
diff --git a/test/CodeGen/AArch64/neon-misc.ll b/test/CodeGen/AArch64/neon-misc.ll
new file mode 100644
index 000000000000..9660bf2c7a30
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-misc.ll
@@ -0,0 +1,1799 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+
+define <8 x i8> @test_vrev16_s8(<8 x i8> %a) #0 {
+; CHECK: rev16 v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vrev16q_s8(<16 x i8> %a) #0 {
+; CHECK: rev16 v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i8> @test_vrev32_s8(<8 x i8> %a) #0 {
+; CHECK: rev32 v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vrev32_s16(<4 x i16> %a) #0 {
+; CHECK: rev32 v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x i16> %shuffle.i
+}
+
+define <16 x i8> @test_vrev32q_s8(<16 x i8> %a) #0 {
+; CHECK: rev32 v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i16> @test_vrev32q_s16(<8 x i16> %a) #0 {
+; CHECK: rev32 v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_vrev64_s8(<8 x i8> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vrev64_s16(<4 x i16> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vrev64_s32(<2 x i32> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x i32> %shuffle.i
+}
+
+define <2 x float> @test_vrev64_f32(<2 x float> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x float> %shuffle.i
+}
+
+define <16 x i8> @test_vrev64q_s8(<16 x i8> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i16> @test_vrev64q_s16(<8 x i16> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_vrev64q_s32(<4 x i32> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x i32> %shuffle.i
+}
+
+define <4 x float> @test_vrev64q_f32(<4 x float> %a) #0 {
+; CHECK: rev64 v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ ret <4 x float> %shuffle.i
+}
+
+define <4 x i16> @test_vpaddl_s8(<8 x i8> %a) #0 {
+; CHECK: saddlp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
+ %vpaddl.i = tail call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %a) #4
+ ret <4 x i16> %vpaddl.i
+}
+
+define <2 x i32> @test_vpaddl_s16(<4 x i16> %a) #0 {
+; CHECK: saddlp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
+ %vpaddl1.i = tail call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %a) #4
+ ret <2 x i32> %vpaddl1.i
+}
+
+define <1 x i64> @test_vpaddl_s32(<2 x i32> %a) #0 {
+; CHECK: saddlp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
+ %vpaddl1.i = tail call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %a) #4
+ ret <1 x i64> %vpaddl1.i
+}
+
+define <4 x i16> @test_vpaddl_u8(<8 x i8> %a) #0 {
+; CHECK: uaddlp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
+ %vpaddl.i = tail call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %a) #4
+ ret <4 x i16> %vpaddl.i
+}
+
+define <2 x i32> @test_vpaddl_u16(<4 x i16> %a) #0 {
+; CHECK: uaddlp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
+ %vpaddl1.i = tail call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %a) #4
+ ret <2 x i32> %vpaddl1.i
+}
+
+define <1 x i64> @test_vpaddl_u32(<2 x i32> %a) #0 {
+; CHECK: uaddlp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
+ %vpaddl1.i = tail call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %a) #4
+ ret <1 x i64> %vpaddl1.i
+}
+
+define <8 x i16> @test_vpaddlq_s8(<16 x i8> %a) #0 {
+; CHECK: saddlp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
+ %vpaddl.i = tail call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %a) #4
+ ret <8 x i16> %vpaddl.i
+}
+
+define <4 x i32> @test_vpaddlq_s16(<8 x i16> %a) #0 {
+; CHECK: saddlp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
+ %vpaddl1.i = tail call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %a) #4
+ ret <4 x i32> %vpaddl1.i
+}
+
+define <2 x i64> @test_vpaddlq_s32(<4 x i32> %a) #0 {
+; CHECK: saddlp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
+ %vpaddl1.i = tail call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %a) #4
+ ret <2 x i64> %vpaddl1.i
+}
+
+define <8 x i16> @test_vpaddlq_u8(<16 x i8> %a) #0 {
+; CHECK: uaddlp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
+ %vpaddl.i = tail call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %a) #4
+ ret <8 x i16> %vpaddl.i
+}
+
+define <4 x i32> @test_vpaddlq_u16(<8 x i16> %a) #0 {
+; CHECK: uaddlp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
+ %vpaddl1.i = tail call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %a) #4
+ ret <4 x i32> %vpaddl1.i
+}
+
+define <2 x i64> @test_vpaddlq_u32(<4 x i32> %a) #0 {
+; CHECK: uaddlp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
+ %vpaddl1.i = tail call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %a) #4
+ ret <2 x i64> %vpaddl1.i
+}
+
+define <4 x i16> @test_vpadal_s8(<4 x i16> %a, <8 x i8> %b) #0 {
+; CHECK: sadalp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
+ %vpadal1.i = tail call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %a, <8 x i8> %b) #4
+ ret <4 x i16> %vpadal1.i
+}
+
+define <2 x i32> @test_vpadal_s16(<2 x i32> %a, <4 x i16> %b) #0 {
+; CHECK: sadalp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
+ %vpadal2.i = tail call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %a, <4 x i16> %b) #4
+ ret <2 x i32> %vpadal2.i
+}
+
+define <1 x i64> @test_vpadal_s32(<1 x i64> %a, <2 x i32> %b) #0 {
+; CHECK: sadalp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
+ %vpadal2.i = tail call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %a, <2 x i32> %b) #4
+ ret <1 x i64> %vpadal2.i
+}
+
+define <4 x i16> @test_vpadal_u8(<4 x i16> %a, <8 x i8> %b) #0 {
+; CHECK: uadalp v{{[0-9]+}}.4h, v{{[0-9]+}}.8b
+ %vpadal1.i = tail call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %a, <8 x i8> %b) #4
+ ret <4 x i16> %vpadal1.i
+}
+
+define <2 x i32> @test_vpadal_u16(<2 x i32> %a, <4 x i16> %b) #0 {
+; CHECK: uadalp v{{[0-9]+}}.2s, v{{[0-9]+}}.4h
+ %vpadal2.i = tail call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %a, <4 x i16> %b) #4
+ ret <2 x i32> %vpadal2.i
+}
+
+define <1 x i64> @test_vpadal_u32(<1 x i64> %a, <2 x i32> %b) #0 {
+; CHECK: uadalp v{{[0-9]+}}.1d, v{{[0-9]+}}.2s
+ %vpadal2.i = tail call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %a, <2 x i32> %b) #4
+ ret <1 x i64> %vpadal2.i
+}
+
+define <8 x i16> @test_vpadalq_s8(<8 x i16> %a, <16 x i8> %b) #0 {
+; CHECK: sadalp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
+ %vpadal1.i = tail call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %a, <16 x i8> %b) #4
+ ret <8 x i16> %vpadal1.i
+}
+
+define <4 x i32> @test_vpadalq_s16(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK: sadalp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
+ %vpadal2.i = tail call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %a, <8 x i16> %b) #4
+ ret <4 x i32> %vpadal2.i
+}
+
+define <2 x i64> @test_vpadalq_s32(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK: sadalp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
+ %vpadal2.i = tail call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %a, <4 x i32> %b) #4
+ ret <2 x i64> %vpadal2.i
+}
+
+define <8 x i16> @test_vpadalq_u8(<8 x i16> %a, <16 x i8> %b) #0 {
+; CHECK: uadalp v{{[0-9]+}}.8h, v{{[0-9]+}}.16b
+ %vpadal1.i = tail call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %a, <16 x i8> %b) #4
+ ret <8 x i16> %vpadal1.i
+}
+
+define <4 x i32> @test_vpadalq_u16(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK: uadalp v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
+ %vpadal2.i = tail call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %a, <8 x i16> %b) #4
+ ret <4 x i32> %vpadal2.i
+}
+
+define <2 x i64> @test_vpadalq_u32(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK: uadalp v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
+ %vpadal2.i = tail call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %a, <4 x i32> %b) #4
+ ret <2 x i64> %vpadal2.i
+}
+
+define <8 x i8> @test_vqabs_s8(<8 x i8> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vqabs.i = tail call <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8> %a) #4
+ ret <8 x i8> %vqabs.i
+}
+
+define <16 x i8> @test_vqabsq_s8(<16 x i8> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vqabs.i = tail call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %a) #4
+ ret <16 x i8> %vqabs.i
+}
+
+define <4 x i16> @test_vqabs_s16(<4 x i16> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %vqabs1.i = tail call <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16> %a) #4
+ ret <4 x i16> %vqabs1.i
+}
+
+define <8 x i16> @test_vqabsq_s16(<8 x i16> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %vqabs1.i = tail call <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16> %a) #4
+ ret <8 x i16> %vqabs1.i
+}
+
+define <2 x i32> @test_vqabs_s32(<2 x i32> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vqabs1.i = tail call <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32> %a) #4
+ ret <2 x i32> %vqabs1.i
+}
+
+define <4 x i32> @test_vqabsq_s32(<4 x i32> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vqabs1.i = tail call <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32> %a) #4
+ ret <4 x i32> %vqabs1.i
+}
+
+define <2 x i64> @test_vqabsq_s64(<2 x i64> %a) #0 {
+; CHECK: sqabs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vqabs1.i = tail call <2 x i64> @llvm.arm.neon.vqabs.v2i64(<2 x i64> %a) #4
+ ret <2 x i64> %vqabs1.i
+}
+
+define <8 x i8> @test_vqneg_s8(<8 x i8> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vqneg.i = tail call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %a) #4
+ ret <8 x i8> %vqneg.i
+}
+
+define <16 x i8> @test_vqnegq_s8(<16 x i8> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vqneg.i = tail call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %a) #4
+ ret <16 x i8> %vqneg.i
+}
+
+define <4 x i16> @test_vqneg_s16(<4 x i16> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %vqneg1.i = tail call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %a) #4
+ ret <4 x i16> %vqneg1.i
+}
+
+define <8 x i16> @test_vqnegq_s16(<8 x i16> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %vqneg1.i = tail call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %a) #4
+ ret <8 x i16> %vqneg1.i
+}
+
+define <2 x i32> @test_vqneg_s32(<2 x i32> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vqneg1.i = tail call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %a) #4
+ ret <2 x i32> %vqneg1.i
+}
+
+define <4 x i32> @test_vqnegq_s32(<4 x i32> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vqneg1.i = tail call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %a) #4
+ ret <4 x i32> %vqneg1.i
+}
+
+define <2 x i64> @test_vqnegq_s64(<2 x i64> %a) #0 {
+; CHECK: sqneg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vqneg1.i = tail call <2 x i64> @llvm.arm.neon.vqneg.v2i64(<2 x i64> %a) #4
+ ret <2 x i64> %vqneg1.i
+}
+
+define <8 x i8> @test_vneg_s8(<8 x i8> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %sub.i = sub <8 x i8> zeroinitializer, %a
+ ret <8 x i8> %sub.i
+}
+
+define <16 x i8> @test_vnegq_s8(<16 x i8> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %sub.i = sub <16 x i8> zeroinitializer, %a
+ ret <16 x i8> %sub.i
+}
+
+define <4 x i16> @test_vneg_s16(<4 x i16> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %sub.i = sub <4 x i16> zeroinitializer, %a
+ ret <4 x i16> %sub.i
+}
+
+define <8 x i16> @test_vnegq_s16(<8 x i16> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %sub.i = sub <8 x i16> zeroinitializer, %a
+ ret <8 x i16> %sub.i
+}
+
+define <2 x i32> @test_vneg_s32(<2 x i32> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %sub.i = sub <2 x i32> zeroinitializer, %a
+ ret <2 x i32> %sub.i
+}
+
+define <4 x i32> @test_vnegq_s32(<4 x i32> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %sub.i = sub <4 x i32> zeroinitializer, %a
+ ret <4 x i32> %sub.i
+}
+
+define <2 x i64> @test_vnegq_s64(<2 x i64> %a) #0 {
+; CHECK: neg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %sub.i = sub <2 x i64> zeroinitializer, %a
+ ret <2 x i64> %sub.i
+}
+
+define <2 x float> @test_vneg_f32(<2 x float> %a) #0 {
+; CHECK: fneg v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %sub.i = fsub <2 x float> <float -0.000000e+00, float -0.000000e+00>, %a
+ ret <2 x float> %sub.i
+}
+
+define <4 x float> @test_vnegq_f32(<4 x float> %a) #0 {
+; CHECK: fneg v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %sub.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
+ ret <4 x float> %sub.i
+}
+
+define <2 x double> @test_vnegq_f64(<2 x double> %a) #0 {
+; CHECK: fneg v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
+ ret <2 x double> %sub.i
+}
+
+define <8 x i8> @test_vabs_s8(<8 x i8> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vabs.i = tail call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %a) #4
+ ret <8 x i8> %vabs.i
+}
+
+define <16 x i8> @test_vabsq_s8(<16 x i8> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vabs.i = tail call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %a) #4
+ ret <16 x i8> %vabs.i
+}
+
+define <4 x i16> @test_vabs_s16(<4 x i16> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %vabs1.i = tail call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %a) #4
+ ret <4 x i16> %vabs1.i
+}
+
+define <8 x i16> @test_vabsq_s16(<8 x i16> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %vabs1.i = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %a) #4
+ ret <8 x i16> %vabs1.i
+}
+
+define <2 x i32> @test_vabs_s32(<2 x i32> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vabs1.i = tail call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %a) #4
+ ret <2 x i32> %vabs1.i
+}
+
+define <4 x i32> @test_vabsq_s32(<4 x i32> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vabs1.i = tail call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %a) #4
+ ret <4 x i32> %vabs1.i
+}
+
+define <2 x i64> @test_vabsq_s64(<2 x i64> %a) #0 {
+; CHECK: abs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vabs1.i = tail call <2 x i64> @llvm.arm.neon.vabs.v2i64(<2 x i64> %a) #4
+ ret <2 x i64> %vabs1.i
+}
+
+define <2 x float> @test_vabs_f32(<2 x float> %a) #1 {
+; CHECK: fabs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vabs1.i = tail call <2 x float> @llvm.fabs.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vabs1.i
+}
+
+define <4 x float> @test_vabsq_f32(<4 x float> %a) #1 {
+; CHECK: fabs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vabs1.i = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vabs1.i
+}
+
+define <2 x double> @test_vabsq_f64(<2 x double> %a) #1 {
+; CHECK: fabs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vabs1.i = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vabs1.i
+}
+
+define <8 x i8> @test_vuqadd_s8(<8 x i8> %a, <8 x i8> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vuqadd.i = tail call <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8> %a, <8 x i8> %b) #4
+ ret <8 x i8> %vuqadd.i
+}
+
+define <16 x i8> @test_vuqaddq_s8(<16 x i8> %a, <16 x i8> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vuqadd.i = tail call <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8> %a, <16 x i8> %b) #4
+ ret <16 x i8> %vuqadd.i
+}
+
+define <4 x i16> @test_vuqadd_s16(<4 x i16> %a, <4 x i16> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %vuqadd2.i = tail call <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16> %a, <4 x i16> %b) #4
+ ret <4 x i16> %vuqadd2.i
+}
+
+define <8 x i16> @test_vuqaddq_s16(<8 x i16> %a, <8 x i16> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %vuqadd2.i = tail call <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16> %a, <8 x i16> %b) #4
+ ret <8 x i16> %vuqadd2.i
+}
+
+define <2 x i32> @test_vuqadd_s32(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vuqadd2.i = tail call <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32> %a, <2 x i32> %b) #4
+ ret <2 x i32> %vuqadd2.i
+}
+
+define <4 x i32> @test_vuqaddq_s32(<4 x i32> %a, <4 x i32> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vuqadd2.i = tail call <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32> %a, <4 x i32> %b) #4
+ ret <4 x i32> %vuqadd2.i
+}
+
+define <2 x i64> @test_vuqaddq_s64(<2 x i64> %a, <2 x i64> %b) #0 {
+; CHECK: suqadd v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vuqadd2.i = tail call <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64> %a, <2 x i64> %b) #4
+ ret <2 x i64> %vuqadd2.i
+}
+
+define <8 x i8> @test_vcls_s8(<8 x i8> %a) #0 {
+; CHECK: cls v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vcls.i = tail call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %a) #4
+ ret <8 x i8> %vcls.i
+}
+
+define <16 x i8> @test_vclsq_s8(<16 x i8> %a) #0 {
+; CHECK: cls v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vcls.i = tail call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %a) #4
+ ret <16 x i8> %vcls.i
+}
+
+define <4 x i16> @test_vcls_s16(<4 x i16> %a) #0 {
+; CHECK: cls v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %vcls1.i = tail call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %a) #4
+ ret <4 x i16> %vcls1.i
+}
+
+define <8 x i16> @test_vclsq_s16(<8 x i16> %a) #0 {
+; CHECK: cls v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %vcls1.i = tail call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %a) #4
+ ret <8 x i16> %vcls1.i
+}
+
+define <2 x i32> @test_vcls_s32(<2 x i32> %a) #0 {
+; CHECK: cls v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcls1.i = tail call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %a) #4
+ ret <2 x i32> %vcls1.i
+}
+
+define <4 x i32> @test_vclsq_s32(<4 x i32> %a) #0 {
+; CHECK: cls v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcls1.i = tail call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %a) #4
+ ret <4 x i32> %vcls1.i
+}
+
+define <8 x i8> @test_vclz_s8(<8 x i8> %a) #0 {
+; CHECK: clz v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vclz.i = tail call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %a, i1 false) #4
+ ret <8 x i8> %vclz.i
+}
+
+define <16 x i8> @test_vclzq_s8(<16 x i8> %a) #0 {
+; CHECK: clz v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vclz.i = tail call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) #4
+ ret <16 x i8> %vclz.i
+}
+
+define <4 x i16> @test_vclz_s16(<4 x i16> %a) #0 {
+; CHECK: clz v{{[0-9]+}}.4h, v{{[0-9]+}}.4h
+ %vclz1.i = tail call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %a, i1 false) #4
+ ret <4 x i16> %vclz1.i
+}
+
+define <8 x i16> @test_vclzq_s16(<8 x i16> %a) #0 {
+; CHECK: clz v{{[0-9]+}}.8h, v{{[0-9]+}}.8h
+ %vclz1.i = tail call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) #4
+ ret <8 x i16> %vclz1.i
+}
+
+define <2 x i32> @test_vclz_s32(<2 x i32> %a) #0 {
+; CHECK: clz v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vclz1.i = tail call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %a, i1 false) #4
+ ret <2 x i32> %vclz1.i
+}
+
+define <4 x i32> @test_vclzq_s32(<4 x i32> %a) #0 {
+; CHECK: clz v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vclz1.i = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) #4
+ ret <4 x i32> %vclz1.i
+}
+
+define <8 x i8> @test_vcnt_s8(<8 x i8> %a) #0 {
+; CHECK: cnt v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vctpop.i = tail call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %a) #4
+ ret <8 x i8> %vctpop.i
+}
+
+define <16 x i8> @test_vcntq_s8(<16 x i8> %a) #0 {
+; CHECK: cnt v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vctpop.i = tail call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a) #4
+ ret <16 x i8> %vctpop.i
+}
+
+define <8 x i8> @test_vmvn_s8(<8 x i8> %a) #0 {
+; CHECK: not v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %neg.i = xor <8 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <8 x i8> %neg.i
+}
+
+define <16 x i8> @test_vmvnq_s8(<16 x i8> %a) #0 {
+; CHECK: not v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %neg.i = xor <16 x i8> %a, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ ret <16 x i8> %neg.i
+}
+
+define <4 x i16> @test_vmvn_s16(<4 x i16> %a) #0 {
+; CHECK: not v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %neg.i = xor <4 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <4 x i16> %neg.i
+}
+
+define <8 x i16> @test_vmvnq_s16(<8 x i16> %a) #0 {
+; CHECK: not v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %neg.i = xor <8 x i16> %a, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ ret <8 x i16> %neg.i
+}
+
+define <2 x i32> @test_vmvn_s32(<2 x i32> %a) #0 {
+; CHECK: not v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %neg.i = xor <2 x i32> %a, <i32 -1, i32 -1>
+ ret <2 x i32> %neg.i
+}
+
+define <4 x i32> @test_vmvnq_s32(<4 x i32> %a) #0 {
+; CHECK: not v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %neg.i = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %neg.i
+}
+
+define <8 x i8> @test_vrbit_s8(<8 x i8> %a) #0 {
+; CHECK: rbit v{{[0-9]+}}.8b, v{{[0-9]+}}.8b
+ %vrbit.i = tail call <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8> %a) #4
+ ret <8 x i8> %vrbit.i
+}
+
+define <16 x i8> @test_vrbitq_s8(<16 x i8> %a) #0 {
+; CHECK: rbit v{{[0-9]+}}.16b, v{{[0-9]+}}.16b
+ %vrbit.i = tail call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %a) #4
+ ret <16 x i8> %vrbit.i
+}
+
+define <8 x i8> @test_vmovn_s16(<8 x i16> %a) #0 {
+; CHECK: xtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
+ %vmovn.i = trunc <8 x i16> %a to <8 x i8>
+ ret <8 x i8> %vmovn.i
+}
+
+define <4 x i16> @test_vmovn_s32(<4 x i32> %a) #0 {
+; CHECK: xtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
+ %vmovn.i = trunc <4 x i32> %a to <4 x i16>
+ ret <4 x i16> %vmovn.i
+}
+
+define <2 x i32> @test_vmovn_s64(<2 x i64> %a) #0 {
+; CHECK: xtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+ %vmovn.i = trunc <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %vmovn.i
+}
+
+define <16 x i8> @test_vmovn_high_s16(<8 x i8> %a, <8 x i16> %b) #0 {
+; CHECK: xtn2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
+ %vmovn.i.i = trunc <8 x i16> %b to <8 x i8>
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vmovn.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i16> @test_vmovn_high_s32(<4 x i16> %a, <4 x i32> %b) #0 {
+; CHECK: xtn2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
+ %vmovn.i.i = trunc <4 x i32> %b to <4 x i16>
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vmovn.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_vmovn_high_s64(<2 x i32> %a, <2 x i64> %b) #0 {
+; CHECK: xtn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
+ %vmovn.i.i = trunc <2 x i64> %b to <2 x i32>
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vmovn.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_vqmovun_s16(<8 x i16> %a) #0 {
+; CHECK: sqxtun v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
+ %vqdmull1.i = tail call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %a) #4
+ ret <8 x i8> %vqdmull1.i
+}
+
+define <4 x i16> @test_vqmovun_s32(<4 x i32> %a) #0 {
+; CHECK: sqxtun v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
+ %vqdmull1.i = tail call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %a) #4
+ ret <4 x i16> %vqdmull1.i
+}
+
+define <2 x i32> @test_vqmovun_s64(<2 x i64> %a) #0 {
+; CHECK: sqxtun v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+ %vqdmull1.i = tail call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %a) #4
+ ret <2 x i32> %vqdmull1.i
+}
+
+define <16 x i8> @test_vqmovun_high_s16(<8 x i8> %a, <8 x i16> %b) #0 {
+; CHECK: sqxtun2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
+ %vqdmull1.i.i = tail call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %b) #4
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vqdmull1.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i16> @test_vqmovun_high_s32(<4 x i16> %a, <4 x i32> %b) #0 {
+; CHECK: sqxtun2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
+ %vqdmull1.i.i = tail call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %b) #4
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vqdmull1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_vqmovun_high_s64(<2 x i32> %a, <2 x i64> %b) #0 {
+; CHECK: sqxtun2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
+ %vqdmull1.i.i = tail call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %b) #4
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vqdmull1.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_vqmovn_s16(<8 x i16> %a) #0 {
+; CHECK: sqxtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
+ %vqmovn1.i = tail call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %a) #4
+ ret <8 x i8> %vqmovn1.i
+}
+
+define <4 x i16> @test_vqmovn_s32(<4 x i32> %a) #0 {
+; CHECK: sqxtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
+ %vqmovn1.i = tail call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %a) #4
+ ret <4 x i16> %vqmovn1.i
+}
+
+define <2 x i32> @test_vqmovn_s64(<2 x i64> %a) #0 {
+; CHECK: sqxtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+ %vqmovn1.i = tail call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %a) #4
+ ret <2 x i32> %vqmovn1.i
+}
+
+define <16 x i8> @test_vqmovn_high_s16(<8 x i8> %a, <8 x i16> %b) #0 {
+; CHECK: sqxtn2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
+ %vqmovn1.i.i = tail call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %b) #4
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vqmovn1.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i16> @test_vqmovn_high_s32(<4 x i16> %a, <4 x i32> %b) #0 {
+; CHECK: test_vqmovn_high_s32
+ %vqmovn1.i.i = tail call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %b) #4
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vqmovn1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_vqmovn_high_s64(<2 x i32> %a, <2 x i64> %b) #0 {
+; CHECK: test_vqmovn_high_s64
+ %vqmovn1.i.i = tail call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %b) #4
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vqmovn1.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i8> @test_vqmovn_u16(<8 x i16> %a) #0 {
+; CHECK: uqxtn v{{[0-9]+}}.8b, v{{[0-9]+}}.8h
+ %vqmovn1.i = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %a) #4
+ ret <8 x i8> %vqmovn1.i
+}
+
+define <4 x i16> @test_vqmovn_u32(<4 x i32> %a) #0 {
+; CHECK: uqxtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
+ %vqmovn1.i = tail call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %a) #4
+ ret <4 x i16> %vqmovn1.i
+}
+
+define <2 x i32> @test_vqmovn_u64(<2 x i64> %a) #0 {
+; CHECK: uqxtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+ %vqmovn1.i = tail call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %a) #4
+ ret <2 x i32> %vqmovn1.i
+}
+
+define <16 x i8> @test_vqmovn_high_u16(<8 x i8> %a, <8 x i16> %b) #0 {
+; CHECK: uqxtn2 v{{[0-9]+}}.16b, v{{[0-9]+}}.8h
+ %vqmovn1.i.i = tail call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %b) #4
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %vqmovn1.i.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %shuffle.i
+}
+
+define <8 x i16> @test_vqmovn_high_u32(<4 x i16> %a, <4 x i32> %b) #0 {
+; CHECK: uqxtn2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
+ %vqmovn1.i.i = tail call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %b) #4
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vqmovn1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x i32> @test_vqmovn_high_u64(<2 x i32> %a, <2 x i64> %b) #0 {
+; CHECK: uqxtn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
+ %vqmovn1.i.i = tail call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %b) #4
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %vqmovn1.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i32> %shuffle.i
+}
+
+define <8 x i16> @test_vshll_n_s8(<8 x i8> %a) #0 {
+; CHECK: shll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #8
+ %1 = sext <8 x i8> %a to <8 x i16>
+ %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ret <8 x i16> %vshll_n
+}
+
+define <4 x i32> @test_vshll_n_s16(<4 x i16> %a) #0 {
+; CHECK: shll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #16
+ %1 = sext <4 x i16> %a to <4 x i32>
+ %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %vshll_n
+}
+
+define <2 x i64> @test_vshll_n_s32(<2 x i32> %a) #0 {
+; CHECK: shll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #32
+ %1 = sext <2 x i32> %a to <2 x i64>
+ %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
+ ret <2 x i64> %vshll_n
+}
+
+define <8 x i16> @test_vshll_n_u8(<8 x i8> %a) #0 {
+; CHECK: shll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #8
+ %1 = zext <8 x i8> %a to <8 x i16>
+ %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ret <8 x i16> %vshll_n
+}
+
+define <4 x i32> @test_vshll_n_u16(<4 x i16> %a) #0 {
+; CHECK: shll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #16
+ %1 = zext <4 x i16> %a to <4 x i32>
+ %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %vshll_n
+}
+
+define <2 x i64> @test_vshll_n_u32(<2 x i32> %a) #0 {
+; CHECK: shll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #32
+ %1 = zext <2 x i32> %a to <2 x i64>
+ %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
+ ret <2 x i64> %vshll_n
+}
+
+define <8 x i16> @test_vshll_high_n_s8(<16 x i8> %a) #0 {
+; CHECK: shll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #8
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = sext <8 x i8> %shuffle.i to <8 x i16>
+ %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ret <8 x i16> %vshll_n
+}
+
+define <4 x i32> @test_vshll_high_n_s16(<8 x i16> %a) #0 {
+; CHECK: shll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #16
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = sext <4 x i16> %shuffle.i to <4 x i32>
+ %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %vshll_n
+}
+
+define <2 x i64> @test_vshll_high_n_s32(<4 x i32> %a) #0 {
+; CHECK: shll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #32
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = sext <2 x i32> %shuffle.i to <2 x i64>
+ %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
+ ret <2 x i64> %vshll_n
+}
+
+define <8 x i16> @test_vshll_high_n_u8(<16 x i8> %a) #0 {
+; CHECK: shll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #8
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %1 = zext <8 x i8> %shuffle.i to <8 x i16>
+ %vshll_n = shl <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ ret <8 x i16> %vshll_n
+}
+
+define <4 x i32> @test_vshll_high_n_u16(<8 x i16> %a) #0 {
+; CHECK: shll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #16
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %1 = zext <4 x i16> %shuffle.i to <4 x i32>
+ %vshll_n = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
+ ret <4 x i32> %vshll_n
+}
+
+define <2 x i64> @test_vshll_high_n_u32(<4 x i32> %a) #0 {
+; CHECK: shll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #32
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %1 = zext <2 x i32> %shuffle.i to <2 x i64>
+ %vshll_n = shl <2 x i64> %1, <i64 32, i64 32>
+ ret <2 x i64> %vshll_n
+}
+
+define <4 x i16> @test_vcvt_f16_f32(<4 x float> %a) #0 {
+; CHECK: fcvtn v{{[0-9]+}}.4h, v{{[0-9]+}}.4s
+ %vcvt1.i = tail call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %a) #4
+ ret <4 x i16> %vcvt1.i
+}
+
+define <8 x i16> @test_vcvt_high_f16_f32(<4 x i16> %a, <4 x float> %b) #0 {
+; CHECK: fcvtn2 v{{[0-9]+}}.8h, v{{[0-9]+}}.4s
+ %vcvt1.i.i = tail call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %b) #4
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %vcvt1.i.i, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i16> %shuffle.i
+}
+
+define <4 x float> @test_vcvt_f32_f16(<4 x i16> %a) #0 {
+; CHECK: fcvtl v{{[0-9]+}}.4s, v{{[0-9]+}}.4h
+ %vcvt1.i = tail call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %a) #4
+ ret <4 x float> %vcvt1.i
+}
+
+define <4 x float> @test_vcvt_high_f32_f16(<8 x i16> %a) #0 {
+; CHECK: fcvtl2 v{{[0-9]+}}.4s, v{{[0-9]+}}.8h
+ %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vcvt1.i.i = tail call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %shuffle.i.i) #4
+ ret <4 x float> %vcvt1.i.i
+}
+
+define <2 x float> @test_vcvt_f32_f64(<2 x double> %a) #0 {
+; CHECK: fcvtn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+ %vcvt.i = fptrunc <2 x double> %a to <2 x float>
+ ret <2 x float> %vcvt.i
+}
+
+define <4 x float> @test_vcvt_high_f32_f64(<2 x float> %a, <2 x double> %b) #0 {
+; CHECK: fcvtn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
+ %vcvt.i.i = fptrunc <2 x double> %b to <2 x float>
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %vcvt.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x float> @test_vcvtx_f32_f64(<2 x double> %a) #0 {
+; CHECK: fcvtxn v{{[0-9]+}}.2s, v{{[0-9]+}}.2d
+ %vcvtx_f32_f641.i = tail call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %a) #4
+ ret <2 x float> %vcvtx_f32_f641.i
+}
+
+define <4 x float> @test_vcvtx_high_f32_f64(<2 x float> %a, <2 x double> %b) #0 {
+; CHECK: fcvtxn2 v{{[0-9]+}}.4s, v{{[0-9]+}}.2d
+ %vcvtx_f32_f641.i.i = tail call <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double> %b) #4
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %vcvtx_f32_f641.i.i, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vcvt_f64_f32(<2 x float> %a) #0 {
+; CHECK: fcvtl v{{[0-9]+}}.2d, v{{[0-9]+}}.2s
+ %vcvt.i = fpext <2 x float> %a to <2 x double>
+ ret <2 x double> %vcvt.i
+}
+
+define <2 x double> @test_vcvt_high_f64_f32(<4 x float> %a) #0 {
+; CHECK: fcvtl2 v{{[0-9]+}}.2d, v{{[0-9]+}}.4s
+ %shuffle.i.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ %vcvt.i.i = fpext <2 x float> %shuffle.i.i to <2 x double>
+ ret <2 x double> %vcvt.i.i
+}
+
+define <2 x float> @test_vrndn_f32(<2 x float> %a) #0 {
+; CHECK: frintn v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrndn1.i = tail call <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrndn1.i
+}
+
+define <4 x float> @test_vrndnq_f32(<4 x float> %a) #0 {
+; CHECK: frintn v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrndn1.i = tail call <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrndn1.i
+}
+
+define <2 x double> @test_vrndnq_f64(<2 x double> %a) #0 {
+; CHECK: frintn v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrndn1.i = tail call <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrndn1.i
+}
+
+define <2 x float> @test_vrnda_f32(<2 x float> %a) #0 {
+; CHECK: frinta v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrnda1.i = tail call <2 x float> @llvm.round.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrnda1.i
+}
+
+define <4 x float> @test_vrndaq_f32(<4 x float> %a) #0 {
+; CHECK: frinta v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrnda1.i = tail call <4 x float> @llvm.round.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrnda1.i
+}
+
+define <2 x double> @test_vrndaq_f64(<2 x double> %a) #0 {
+; CHECK: frinta v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrnda1.i = tail call <2 x double> @llvm.round.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrnda1.i
+}
+
+define <2 x float> @test_vrndp_f32(<2 x float> %a) #0 {
+; CHECK: frintp v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrndp1.i = tail call <2 x float> @llvm.ceil.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrndp1.i
+}
+
+define <4 x float> @test_vrndpq_f32(<4 x float> %a) #0 {
+; CHECK: frintp v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrndp1.i = tail call <4 x float> @llvm.ceil.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrndp1.i
+}
+
+define <2 x double> @test_vrndpq_f64(<2 x double> %a) #0 {
+; CHECK: frintp v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrndp1.i = tail call <2 x double> @llvm.ceil.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrndp1.i
+}
+
+define <2 x float> @test_vrndm_f32(<2 x float> %a) #0 {
+; CHECK: frintm v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrndm1.i = tail call <2 x float> @llvm.floor.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrndm1.i
+}
+
+define <4 x float> @test_vrndmq_f32(<4 x float> %a) #0 {
+; CHECK: frintm v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrndm1.i = tail call <4 x float> @llvm.floor.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrndm1.i
+}
+
+define <2 x double> @test_vrndmq_f64(<2 x double> %a) #0 {
+; CHECK: frintm v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrndm1.i = tail call <2 x double> @llvm.floor.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrndm1.i
+}
+
+define <2 x float> @test_vrndx_f32(<2 x float> %a) #0 {
+; CHECK: frintx v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrndx1.i = tail call <2 x float> @llvm.rint.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrndx1.i
+}
+
+define <4 x float> @test_vrndxq_f32(<4 x float> %a) #0 {
+; CHECK: frintx v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrndx1.i = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrndx1.i
+}
+
+define <2 x double> @test_vrndxq_f64(<2 x double> %a) #0 {
+; CHECK: frintx v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrndx1.i = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrndx1.i
+}
+
+define <2 x float> @test_vrnd_f32(<2 x float> %a) #0 {
+; CHECK: frintz v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrnd1.i = tail call <2 x float> @llvm.trunc.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrnd1.i
+}
+
+define <4 x float> @test_vrndq_f32(<4 x float> %a) #0 {
+; CHECK: frintz v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrnd1.i = tail call <4 x float> @llvm.trunc.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrnd1.i
+}
+
+define <2 x double> @test_vrndq_f64(<2 x double> %a) #0 {
+; CHECK: frintz v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrnd1.i = tail call <2 x double> @llvm.trunc.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrnd1.i
+}
+
+define <2 x float> @test_vrndi_f32(<2 x float> %a) #0 {
+; CHECK: frinti v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrndi1.i = tail call <2 x float> @llvm.nearbyint.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrndi1.i
+}
+
+define <4 x float> @test_vrndiq_f32(<4 x float> %a) #0 {
+; CHECK: frinti v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrndi1.i = tail call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrndi1.i
+}
+
+define <2 x double> @test_vrndiq_f64(<2 x double> %a) #0 {
+; CHECK: frinti v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrndi1.i = tail call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrndi1.i
+}
+
+define <2 x i32> @test_vcvt_s32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtzs v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvt.i = fptosi <2 x float> %a to <2 x i32>
+ ret <2 x i32> %vcvt.i
+}
+
+define <4 x i32> @test_vcvtq_s32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtzs v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvt.i = fptosi <4 x float> %a to <4 x i32>
+ ret <4 x i32> %vcvt.i
+}
+
+define <2 x i64> @test_vcvtq_s64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtzs v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvt.i = fptosi <2 x double> %a to <2 x i64>
+ ret <2 x i64> %vcvt.i
+}
+
+define <2 x i32> @test_vcvt_u32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtzu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvt.i = fptoui <2 x float> %a to <2 x i32>
+ ret <2 x i32> %vcvt.i
+}
+
+define <4 x i32> @test_vcvtq_u32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtzu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvt.i = fptoui <4 x float> %a to <4 x i32>
+ ret <4 x i32> %vcvt.i
+}
+
+define <2 x i64> @test_vcvtq_u64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtzu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvt.i = fptoui <2 x double> %a to <2 x i64>
+ ret <2 x i64> %vcvt.i
+}
+
+define <2 x i32> @test_vcvtn_s32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtns v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtns_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtns_f321.i
+}
+
+define <4 x i32> @test_vcvtnq_s32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtns v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtns_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtns_f321.i
+}
+
+define <2 x i64> @test_vcvtnq_s64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtns v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtns_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtns_f641.i
+}
+
+define <2 x i32> @test_vcvtn_u32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtnu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtnu_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtnu_f321.i
+}
+
+define <4 x i32> @test_vcvtnq_u32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtnu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtnu_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtnu_f321.i
+}
+
+define <2 x i64> @test_vcvtnq_u64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtnu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtnu_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtnu_f641.i
+}
+
+define <2 x i32> @test_vcvtp_s32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtps v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtps_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtps_f321.i
+}
+
+define <4 x i32> @test_vcvtpq_s32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtps v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtps_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtps_f321.i
+}
+
+define <2 x i64> @test_vcvtpq_s64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtps v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtps_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtps_f641.i
+}
+
+define <2 x i32> @test_vcvtp_u32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtpu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtpu_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtpu_f321.i
+}
+
+define <4 x i32> @test_vcvtpq_u32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtpu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtpu_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtpu_f321.i
+}
+
+define <2 x i64> @test_vcvtpq_u64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtpu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtpu_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtpu_f641.i
+}
+
+define <2 x i32> @test_vcvtm_s32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtms v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtms_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtms_f321.i
+}
+
+define <4 x i32> @test_vcvtmq_s32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtms v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtms_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtms_f321.i
+}
+
+define <2 x i64> @test_vcvtmq_s64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtms v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtms_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtms_f641.i
+}
+
+define <2 x i32> @test_vcvtm_u32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtmu v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtmu_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtmu_f321.i
+}
+
+define <4 x i32> @test_vcvtmq_u32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtmu v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtmu_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtmu_f321.i
+}
+
+define <2 x i64> @test_vcvtmq_u64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtmu v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtmu_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtmu_f641.i
+}
+
+define <2 x i32> @test_vcvta_s32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtas v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtas_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtas_f321.i
+}
+
+define <4 x i32> @test_vcvtaq_s32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtas v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtas_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtas_f321.i
+}
+
+define <2 x i64> @test_vcvtaq_s64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtas v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtas_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtas_f641.i
+}
+
+define <2 x i32> @test_vcvta_u32_f32(<2 x float> %a) #0 {
+; CHECK: fcvtau v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvtau_f321.i = tail call <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float> %a) #4
+ ret <2 x i32> %vcvtau_f321.i
+}
+
+define <4 x i32> @test_vcvtaq_u32_f32(<4 x float> %a) #0 {
+; CHECK: fcvtau v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvtau_f321.i = tail call <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float> %a) #4
+ ret <4 x i32> %vcvtau_f321.i
+}
+
+define <2 x i64> @test_vcvtaq_u64_f64(<2 x double> %a) #0 {
+; CHECK: fcvtau v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvtau_f641.i = tail call <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double> %a) #4
+ ret <2 x i64> %vcvtau_f641.i
+}
+
+define <2 x float> @test_vrsqrte_f32(<2 x float> %a) #0 {
+; CHECK: frsqrte v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrsqrte1.i = tail call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrsqrte1.i
+}
+
+define <4 x float> @test_vrsqrteq_f32(<4 x float> %a) #0 {
+; CHECK: frsqrte v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrsqrte1.i = tail call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrsqrte1.i
+}
+
+define <2 x double> @test_vrsqrteq_f64(<2 x double> %a) #0 {
+; CHECK: frsqrte v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrsqrte1.i = tail call <2 x double> @llvm.arm.neon.vrsqrte.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrsqrte1.i
+}
+
+define <2 x float> @test_vrecpe_f32(<2 x float> %a) #0 {
+; CHECK: frecpe v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrecpe1.i = tail call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vrecpe1.i
+}
+
+define <4 x float> @test_vrecpeq_f32(<4 x float> %a) #0 {
+; CHECK: frecpe v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrecpe1.i = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vrecpe1.i
+}
+
+define <2 x double> @test_vrecpeq_f64(<2 x double> %a) #0 {
+; CHECK: frecpe v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vrecpe1.i = tail call <2 x double> @llvm.arm.neon.vrecpe.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vrecpe1.i
+}
+
+define <2 x i32> @test_vrecpe_u32(<2 x i32> %a) #0 {
+; CHECK: urecpe v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vrecpe1.i = tail call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %a) #4
+ ret <2 x i32> %vrecpe1.i
+}
+
+define <4 x i32> @test_vrecpeq_u32(<4 x i32> %a) #0 {
+; CHECK: urecpe v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vrecpe1.i = tail call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %a) #4
+ ret <4 x i32> %vrecpe1.i
+}
+
+define <2 x float> @test_vsqrt_f32(<2 x float> %a) #0 {
+; CHECK: fsqrt v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vsqrt1.i = tail call <2 x float> @llvm.sqrt.v2f32(<2 x float> %a) #4
+ ret <2 x float> %vsqrt1.i
+}
+
+define <4 x float> @test_vsqrtq_f32(<4 x float> %a) #0 {
+; CHECK: fsqrt v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vsqrt1.i = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a) #4
+ ret <4 x float> %vsqrt1.i
+}
+
+define <2 x double> @test_vsqrtq_f64(<2 x double> %a) #0 {
+; CHECK: fsqrt v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vsqrt1.i = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %a) #4
+ ret <2 x double> %vsqrt1.i
+}
+
+define <2 x float> @test_vcvt_f32_s32(<2 x i32> %a) #0 {
+; CHECK: scvtf v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvt.i = sitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %vcvt.i
+}
+
+define <2 x float> @test_vcvt_f32_u32(<2 x i32> %a) #0 {
+; CHECK: ucvtf v{{[0-9]+}}.2s, v{{[0-9]+}}.2s
+ %vcvt.i = uitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %vcvt.i
+}
+
+define <4 x float> @test_vcvtq_f32_s32(<4 x i32> %a) #0 {
+; CHECK: scvtf v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvt.i = sitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %vcvt.i
+}
+
+define <4 x float> @test_vcvtq_f32_u32(<4 x i32> %a) #0 {
+; CHECK: ucvtf v{{[0-9]+}}.4s, v{{[0-9]+}}.4s
+ %vcvt.i = uitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %vcvt.i
+}
+
+define <2 x double> @test_vcvtq_f64_s64(<2 x i64> %a) #0 {
+; CHECK: scvtf v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvt.i = sitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %vcvt.i
+}
+
+define <2 x double> @test_vcvtq_f64_u64(<2 x i64> %a) #0 {
+; CHECK: ucvtf v{{[0-9]+}}.2d, v{{[0-9]+}}.2d
+ %vcvt.i = uitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %vcvt.i
+}
+
+declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) #2
+
+declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) #2
+
+declare <2 x float> @llvm.sqrt.v2f32(<2 x float>) #2
+
+declare <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32>) #2
+
+declare <2 x double> @llvm.arm.neon.vrecpe.v2f64(<2 x double>) #2
+
+declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) #2
+
+declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) #2
+
+declare <2 x double> @llvm.arm.neon.vrsqrte.v2f64(<2 x double>) #2
+
+declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) #2
+
+declare <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtau.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtau.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtau.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtas.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtas.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtas.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtmu.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtmu.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtmu.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtms.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtms.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtms.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtpu.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtpu.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtpu.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtps.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtps.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtps.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtnu.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtnu.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtnu.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.fcvtns.v2i64.v2f64(<2 x double>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.fcvtns.v4i32.v4f32(<4 x float>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.fcvtns.v2i32.v2f32(<2 x float>) #2
+
+declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>) #3
+
+declare <2 x double> @llvm.trunc.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.trunc.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.trunc.v2f32(<2 x float>) #3
+
+declare <2 x double> @llvm.rint.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.rint.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.rint.v2f32(<2 x float>) #3
+
+declare <2 x double> @llvm.floor.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.floor.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.floor.v2f32(<2 x float>) #3
+
+declare <2 x double> @llvm.ceil.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.ceil.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.ceil.v2f32(<2 x float>) #3
+
+declare <2 x double> @llvm.round.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.round.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.round.v2f32(<2 x float>) #3
+
+declare <2 x double> @llvm.aarch64.neon.frintn.v2f64(<2 x double>) #2
+
+declare <4 x float> @llvm.aarch64.neon.frintn.v4f32(<4 x float>) #2
+
+declare <2 x float> @llvm.aarch64.neon.frintn.v2f32(<2 x float>) #2
+
+declare <2 x float> @llvm.aarch64.neon.fcvtxn.v2f32.v2f64(<2 x double>) #2
+
+declare <2 x float> @llvm.aarch64.neon.fcvtn.v2f32.v2f64(<2 x double>) #2
+
+declare <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64>) #2
+
+declare <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32>) #2
+
+declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) #2
+
+declare <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64>) #2
+
+declare <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32>) #2
+
+declare <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16>) #2
+
+declare <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64>) #2
+
+declare <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32>) #2
+
+declare <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16>) #2
+
+declare <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8>) #2
+
+declare <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8>) #2
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) #2
+
+declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>) #2
+
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) #2
+
+declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) #2
+
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) #2
+
+declare <4 x i16> @llvm.ctlz.v4i16(<4 x i16>, i1) #2
+
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) #2
+
+declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>, i1) #2
+
+declare <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32>) #2
+
+declare <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16>) #2
+
+declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) #2
+
+declare <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8>) #2
+
+declare <2 x i64> @llvm.aarch64.neon.suqadd.v2i64(<2 x i64>, <2 x i64>) #2
+
+declare <4 x i32> @llvm.aarch64.neon.suqadd.v4i32(<4 x i32>, <4 x i32>) #2
+
+declare <2 x i32> @llvm.aarch64.neon.suqadd.v2i32(<2 x i32>, <2 x i32>) #2
+
+declare <8 x i16> @llvm.aarch64.neon.suqadd.v8i16(<8 x i16>, <8 x i16>) #2
+
+declare <4 x i16> @llvm.aarch64.neon.suqadd.v4i16(<4 x i16>, <4 x i16>) #2
+
+declare <16 x i8> @llvm.aarch64.neon.suqadd.v16i8(<16 x i8>, <16 x i8>) #2
+
+declare <8 x i8> @llvm.aarch64.neon.suqadd.v8i8(<8 x i8>, <8 x i8>) #2
+
+declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #3
+
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>) #3
+
+declare <2 x float> @llvm.fabs.v2f32(<2 x float>) #3
+
+declare <2 x i64> @llvm.arm.neon.vabs.v2i64(<2 x i64>) #2
+
+declare <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32>) #2
+
+declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16>) #2
+
+declare <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8>) #2
+
+declare <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8>) #2
+
+declare <2 x i64> @llvm.arm.neon.vqneg.v2i64(<2 x i64>) #2
+
+declare <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32>) #2
+
+declare <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16>) #2
+
+declare <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8>) #2
+
+declare <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8>) #2
+
+declare <2 x i64> @llvm.arm.neon.vqabs.v2i64(<2 x i64>) #2
+
+declare <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32>) #2
+
+declare <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16>) #2
+
+declare <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8>) #2
+
+declare <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8>) #2
+
+declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) #2
+
+declare <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32>, <8 x i16>) #2
+
+declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) #2
+
+declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) #2
+
+declare <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32>, <8 x i16>) #2
+
+declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) #2
+
+declare <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64>, <2 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) #2
+
+declare <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64>, <2 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) #2
+
+declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) #2
+
+declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) #2
+
+declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) #2
+
+declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) #2
+
+declare <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16>) #2
+
+declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) #2
+
+declare <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8>) #2
+
+declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) #2
+
+declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) #2
+
+declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) #2
+
+declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) #2
+
+
+define <1 x i64> @test_vcvt_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvt_s64_f64
+; CHECK: fcvtzs d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fptosi <1 x double> %a to <1 x i64>
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvt_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvt_u64_f64
+; CHECK: fcvtzu d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = fptoui <1 x double> %a to <1 x i64>
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvtn_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvtn_s64_f64
+; CHECK: fcvtns d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvtn_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvtn_u64_f64
+; CHECK: fcvtnu d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvtp_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvtp_s64_f64
+; CHECK: fcvtps d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvtp_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvtp_u64_f64
+; CHECK: fcvtpu d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvtm_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvtm_s64_f64
+; CHECK: fcvtms d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvtm_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvtm_u64_f64
+; CHECK: fcvtmu d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvta_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvta_s64_f64
+; CHECK: fcvtas d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvta_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvta_u64_f64
+; CHECK: fcvtau d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double> %a)
+ ret <1 x i64> %1
+}
+
+define <1 x double> @test_vcvt_f64_s64(<1 x i64> %a) {
+; CHECK-LABEL: test_vcvt_f64_s64
+; CHECK: scvtf d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = sitofp <1 x i64> %a to <1 x double>
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vcvt_f64_u64(<1 x i64> %a) {
+; CHECK-LABEL: test_vcvt_f64_u64
+; CHECK: ucvtf d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = uitofp <1 x i64> %a to <1 x double>
+ ret <1 x double> %1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double>)
+
+define <1 x double> @test_vrndn_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrndn_f64
+; CHECK: frintn d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.aarch64.neon.frintn.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrnda_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrnda_f64
+; CHECK: frinta d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.round.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrndp_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrndp_f64
+; CHECK: frintp d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.ceil.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrndm_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrndm_f64
+; CHECK: frintm d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.floor.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrndx_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrndx_f64
+; CHECK: frintx d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.rint.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrnd_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrnd_f64
+; CHECK: frintz d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrndi_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrndi_f64
+; CHECK: frinti d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+declare <1 x double> @llvm.nearbyint.v1f64(<1 x double>)
+declare <1 x double> @llvm.trunc.v1f64(<1 x double>)
+declare <1 x double> @llvm.rint.v1f64(<1 x double>)
+declare <1 x double> @llvm.floor.v1f64(<1 x double>)
+declare <1 x double> @llvm.ceil.v1f64(<1 x double>)
+declare <1 x double> @llvm.round.v1f64(<1 x double>)
+declare <1 x double> @llvm.aarch64.neon.frintn.v1f64(<1 x double>)
+
+define <1 x double> @test_vrsqrte_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrsqrte_f64
+; CHECK: frsqrte d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrecpe_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vrecpe_f64
+; CHECK: frecpe d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vsqrt_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vsqrt_f64
+; CHECK: fsqrt d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.sqrt.v1f64(<1 x double> %a)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrecps_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vrecps_f64
+; CHECK: frecps d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vrsqrts_f64(<1 x double> %a, <1 x double> %b) {
+; CHECK-LABEL: test_vrsqrts_f64
+; CHECK: frsqrts d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %1 = tail call <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double> %a, <1 x double> %b)
+ ret <1 x double> %1
+}
+
+declare <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double>, <1 x double>)
+declare <1 x double> @llvm.sqrt.v1f64(<1 x double>)
+declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>)
+declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-mla-mls.ll b/test/CodeGen/AArch64/neon-mla-mls.ll
new file mode 100644
index 000000000000..23e9223a8b7b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-mla-mls.ll
@@ -0,0 +1,88 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+
+define <8 x i8> @mla8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
+;CHECK: mla {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = mul <8 x i8> %A, %B;
+ %tmp2 = add <8 x i8> %C, %tmp1;
+ ret <8 x i8> %tmp2
+}
+
+define <16 x i8> @mla16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
+;CHECK: mla {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = mul <16 x i8> %A, %B;
+ %tmp2 = add <16 x i8> %C, %tmp1;
+ ret <16 x i8> %tmp2
+}
+
+define <4 x i16> @mla4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
+;CHECK: mla {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+ %tmp1 = mul <4 x i16> %A, %B;
+ %tmp2 = add <4 x i16> %C, %tmp1;
+ ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @mla8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
+;CHECK: mla {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+ %tmp1 = mul <8 x i16> %A, %B;
+ %tmp2 = add <8 x i16> %C, %tmp1;
+ ret <8 x i16> %tmp2
+}
+
+define <2 x i32> @mla2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
+;CHECK: mla {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp1 = mul <2 x i32> %A, %B;
+ %tmp2 = add <2 x i32> %C, %tmp1;
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @mla4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
+;CHECK: mla {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp1 = mul <4 x i32> %A, %B;
+ %tmp2 = add <4 x i32> %C, %tmp1;
+ ret <4 x i32> %tmp2
+}
+
+define <8 x i8> @mls8xi8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C) {
+;CHECK: mls {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp1 = mul <8 x i8> %A, %B;
+ %tmp2 = sub <8 x i8> %C, %tmp1;
+ ret <8 x i8> %tmp2
+}
+
+define <16 x i8> @mls16xi8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) {
+;CHECK: mls {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp1 = mul <16 x i8> %A, %B;
+ %tmp2 = sub <16 x i8> %C, %tmp1;
+ ret <16 x i8> %tmp2
+}
+
+define <4 x i16> @mls4xi16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C) {
+;CHECK: mls {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+ %tmp1 = mul <4 x i16> %A, %B;
+ %tmp2 = sub <4 x i16> %C, %tmp1;
+ ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @mls8xi16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C) {
+;CHECK: mls {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+ %tmp1 = mul <8 x i16> %A, %B;
+ %tmp2 = sub <8 x i16> %C, %tmp1;
+ ret <8 x i16> %tmp2
+}
+
+define <2 x i32> @mls2xi32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C) {
+;CHECK: mls {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp1 = mul <2 x i32> %A, %B;
+ %tmp2 = sub <2 x i32> %C, %tmp1;
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @mls4xi32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C) {
+;CHECK: mls {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp1 = mul <4 x i32> %A, %B;
+ %tmp2 = sub <4 x i32> %C, %tmp1;
+ ret <4 x i32> %tmp2
+}
+
+
diff --git a/test/CodeGen/AArch64/neon-mov.ll b/test/CodeGen/AArch64/neon-mov.ll
new file mode 100644
index 000000000000..60b13b8b9a0e
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-mov.ll
@@ -0,0 +1,217 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @movi8b() {
+;CHECK: movi {{v[0-31]+}}.8b, #0x8
+ ret <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+}
+
+define <16 x i8> @movi16b() {
+;CHECK: movi {{v[0-31]+}}.16b, #0x8
+ ret <16 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
+}
+
+define <2 x i32> @movi2s_lsl0() {
+;CHECK: movi {{v[0-31]+}}.2s, #0xff
+ ret <2 x i32> < i32 255, i32 255 >
+}
+
+define <2 x i32> @movi2s_lsl8() {
+;CHECK: movi {{v[0-31]+}}.2s, #0xff, lsl #8
+ ret <2 x i32> < i32 65280, i32 65280 >
+}
+
+define <2 x i32> @movi2s_lsl16() {
+;CHECK: movi {{v[0-31]+}}.2s, #0xff, lsl #16
+ ret <2 x i32> < i32 16711680, i32 16711680 >
+
+}
+
+define <2 x i32> @movi2s_lsl24() {
+;CHECK: movi {{v[0-31]+}}.2s, #0xff, lsl #24
+ ret <2 x i32> < i32 4278190080, i32 4278190080 >
+}
+
+define <4 x i32> @movi4s_lsl0() {
+;CHECK: movi {{v[0-31]+}}.4s, #0xff
+ ret <4 x i32> < i32 255, i32 255, i32 255, i32 255 >
+}
+
+define <4 x i32> @movi4s_lsl8() {
+;CHECK: movi {{v[0-31]+}}.4s, #0xff, lsl #8
+ ret <4 x i32> < i32 65280, i32 65280, i32 65280, i32 65280 >
+}
+
+define <4 x i32> @movi4s_lsl16() {
+;CHECK: movi {{v[0-31]+}}.4s, #0xff, lsl #16
+ ret <4 x i32> < i32 16711680, i32 16711680, i32 16711680, i32 16711680 >
+
+}
+
+define <4 x i32> @movi4s_lsl24() {
+;CHECK: movi {{v[0-31]+}}.4s, #0xff, lsl #24
+ ret <4 x i32> < i32 4278190080, i32 4278190080, i32 4278190080, i32 4278190080 >
+}
+
+define <4 x i16> @movi4h_lsl0() {
+;CHECK: movi {{v[0-31]+}}.4h, #0xff
+ ret <4 x i16> < i16 255, i16 255, i16 255, i16 255 >
+}
+
+define <4 x i16> @movi4h_lsl8() {
+;CHECK: movi {{v[0-31]+}}.4h, #0xff, lsl #8
+ ret <4 x i16> < i16 65280, i16 65280, i16 65280, i16 65280 >
+}
+
+define <8 x i16> @movi8h_lsl0() {
+;CHECK: movi {{v[0-31]+}}.8h, #0xff
+ ret <8 x i16> < i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255 >
+}
+
+define <8 x i16> @movi8h_lsl8() {
+;CHECK: movi {{v[0-31]+}}.8h, #0xff, lsl #8
+ ret <8 x i16> < i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280, i16 65280 >
+}
+
+
+define <2 x i32> @mvni2s_lsl0() {
+;CHECK: mvni {{v[0-31]+}}.2s, #0x10
+ ret <2 x i32> < i32 4294967279, i32 4294967279 >
+}
+
+define <2 x i32> @mvni2s_lsl8() {
+;CHECK: mvni {{v[0-31]+}}.2s, #0x10, lsl #8
+ ret <2 x i32> < i32 4294963199, i32 4294963199 >
+}
+
+define <2 x i32> @mvni2s_lsl16() {
+;CHECK: mvni {{v[0-31]+}}.2s, #0x10, lsl #16
+ ret <2 x i32> < i32 4293918719, i32 4293918719 >
+}
+
+define <2 x i32> @mvni2s_lsl24() {
+;CHECK: mvni {{v[0-31]+}}.2s, #0x10, lsl #24
+ ret <2 x i32> < i32 4026531839, i32 4026531839 >
+}
+
+define <4 x i32> @mvni4s_lsl0() {
+;CHECK: mvni {{v[0-31]+}}.4s, #0x10
+ ret <4 x i32> < i32 4294967279, i32 4294967279, i32 4294967279, i32 4294967279 >
+}
+
+define <4 x i32> @mvni4s_lsl8() {
+;CHECK: mvni {{v[0-31]+}}.4s, #0x10, lsl #8
+ ret <4 x i32> < i32 4294963199, i32 4294963199, i32 4294963199, i32 4294963199 >
+}
+
+define <4 x i32> @mvni4s_lsl16() {
+;CHECK: mvni {{v[0-31]+}}.4s, #0x10, lsl #16
+ ret <4 x i32> < i32 4293918719, i32 4293918719, i32 4293918719, i32 4293918719 >
+
+}
+
+define <4 x i32> @mvni4s_lsl24() {
+;CHECK: mvni {{v[0-31]+}}.4s, #0x10, lsl #24
+ ret <4 x i32> < i32 4026531839, i32 4026531839, i32 4026531839, i32 4026531839 >
+}
+
+
+define <4 x i16> @mvni4h_lsl0() {
+;CHECK: mvni {{v[0-31]+}}.4h, #0x10
+ ret <4 x i16> < i16 65519, i16 65519, i16 65519, i16 65519 >
+}
+
+define <4 x i16> @mvni4h_lsl8() {
+;CHECK: mvni {{v[0-31]+}}.4h, #0x10, lsl #8
+ ret <4 x i16> < i16 61439, i16 61439, i16 61439, i16 61439 >
+}
+
+define <8 x i16> @mvni8h_lsl0() {
+;CHECK: mvni {{v[0-31]+}}.8h, #0x10
+ ret <8 x i16> < i16 65519, i16 65519, i16 65519, i16 65519, i16 65519, i16 65519, i16 65519, i16 65519 >
+}
+
+define <8 x i16> @mvni8h_lsl8() {
+;CHECK: mvni {{v[0-31]+}}.8h, #0x10, lsl #8
+ ret <8 x i16> < i16 61439, i16 61439, i16 61439, i16 61439, i16 61439, i16 61439, i16 61439, i16 61439 >
+}
+
+
+define <2 x i32> @movi2s_msl8(<2 x i32> %a) {
+;CHECK: movi {{v[0-31]+}}.2s, #0xff, msl #8
+ ret <2 x i32> < i32 65535, i32 65535 >
+}
+
+define <2 x i32> @movi2s_msl16() {
+;CHECK: movi {{v[0-31]+}}.2s, #0xff, msl #16
+ ret <2 x i32> < i32 16777215, i32 16777215 >
+}
+
+
+define <4 x i32> @movi4s_msl8() {
+;CHECK: movi {{v[0-31]+}}.4s, #0xff, msl #8
+ ret <4 x i32> < i32 65535, i32 65535, i32 65535, i32 65535 >
+}
+
+define <4 x i32> @movi4s_msl16() {
+;CHECK: movi {{v[0-31]+}}.4s, #0xff, msl #16
+ ret <4 x i32> < i32 16777215, i32 16777215, i32 16777215, i32 16777215 >
+}
+
+define <2 x i32> @mvni2s_msl8() {
+;CHECK: mvni {{v[0-31]+}}.2s, #0x10, msl #8
+ ret <2 x i32> < i32 18446744073709547264, i32 18446744073709547264>
+}
+
+define <2 x i32> @mvni2s_msl16() {
+;CHECK: mvni {{v[0-31]+}}.2s, #0x10, msl #16
+ ret <2 x i32> < i32 18446744073708437504, i32 18446744073708437504>
+}
+
+define <4 x i32> @mvni4s_msl8() {
+;CHECK: mvni {{v[0-31]+}}.4s, #0x10, msl #8
+ ret <4 x i32> < i32 18446744073709547264, i32 18446744073709547264, i32 18446744073709547264, i32 18446744073709547264>
+}
+
+define <4 x i32> @mvni4s_msl16() {
+;CHECK: mvni {{v[0-31]+}}.4s, #0x10, msl #16
+ ret <4 x i32> < i32 18446744073708437504, i32 18446744073708437504, i32 18446744073708437504, i32 18446744073708437504>
+}
+
+define <2 x i64> @movi2d() {
+;CHECK: movi {{v[0-31]+}}.2d, #0xff0000ff0000ffff
+ ret <2 x i64> < i64 18374687574888349695, i64 18374687574888349695 >
+}
+
+define <1 x i64> @movid() {
+;CHECK: movi {{d[0-31]+}}, #0xff0000ff0000ffff
+ ret <1 x i64> < i64 18374687574888349695 >
+}
+
+define <2 x float> @fmov2s() {
+;CHECK: fmov {{v[0-31]+}}.2s, #-12.00000000
+ ret <2 x float> < float -1.2e1, float -1.2e1>
+}
+
+define <4 x float> @fmov4s() {
+;CHECK: fmov {{v[0-31]+}}.4s, #-12.00000000
+ ret <4 x float> < float -1.2e1, float -1.2e1, float -1.2e1, float -1.2e1>
+}
+
+define <2 x double> @fmov2d() {
+;CHECK: fmov {{v[0-31]+}}.2d, #-12.00000000
+ ret <2 x double> < double -1.2e1, double -1.2e1>
+}
+
+define <2 x i32> @movi1d_1() {
+; CHECK: movi d0, #0xffffffff0000
+ ret <2 x i32> < i32 -65536, i32 65535>
+}
+
+
+declare <2 x i32> @test_movi1d(<2 x i32>, <2 x i32>)
+define <2 x i32> @movi1d() {
+; CHECK: movi d1, #0xffffffff0000
+ %1 = tail call <2 x i32> @test_movi1d(<2 x i32> <i32 -2147483648, i32 2147450880>, <2 x i32> <i32 -65536, i32 65535>)
+ ret <2 x i32> %1
+}
+
diff --git a/test/CodeGen/AArch64/neon-mul-div.ll b/test/CodeGen/AArch64/neon-mul-div.ll
new file mode 100644
index 000000000000..e1be31326638
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-mul-div.ll
@@ -0,0 +1,181 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+
+define <8 x i8> @mul8xi8(<8 x i8> %A, <8 x i8> %B) {
+;CHECK: mul {{v[0-31]+}}.8b, {{v[0-31]+}}.8b, {{v[0-31]+}}.8b
+ %tmp3 = mul <8 x i8> %A, %B;
+ ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @mul16xi8(<16 x i8> %A, <16 x i8> %B) {
+;CHECK: mul {{v[0-31]+}}.16b, {{v[0-31]+}}.16b, {{v[0-31]+}}.16b
+ %tmp3 = mul <16 x i8> %A, %B;
+ ret <16 x i8> %tmp3
+}
+
+define <4 x i16> @mul4xi16(<4 x i16> %A, <4 x i16> %B) {
+;CHECK: mul {{v[0-31]+}}.4h, {{v[0-31]+}}.4h, {{v[0-31]+}}.4h
+ %tmp3 = mul <4 x i16> %A, %B;
+ ret <4 x i16> %tmp3
+}
+
+define <8 x i16> @mul8xi16(<8 x i16> %A, <8 x i16> %B) {
+;CHECK: mul {{v[0-31]+}}.8h, {{v[0-31]+}}.8h, {{v[0-31]+}}.8h
+ %tmp3 = mul <8 x i16> %A, %B;
+ ret <8 x i16> %tmp3
+}
+
+define <2 x i32> @mul2xi32(<2 x i32> %A, <2 x i32> %B) {
+;CHECK: mul {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = mul <2 x i32> %A, %B;
+ ret <2 x i32> %tmp3
+}
+
+define <4 x i32> @mul4x32(<4 x i32> %A, <4 x i32> %B) {
+;CHECK: mul {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = mul <4 x i32> %A, %B;
+ ret <4 x i32> %tmp3
+}
+
+ define <2 x float> @mul2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fmul {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = fmul <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @mul4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fmul {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = fmul <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @mul2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: fmul {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp3 = fmul <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+
+ define <2 x float> @div2xfloat(<2 x float> %A, <2 x float> %B) {
+;CHECK: fdiv {{v[0-31]+}}.2s, {{v[0-31]+}}.2s, {{v[0-31]+}}.2s
+ %tmp3 = fdiv <2 x float> %A, %B;
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @div4xfloat(<4 x float> %A, <4 x float> %B) {
+;CHECK: fdiv {{v[0-31]+}}.4s, {{v[0-31]+}}.4s, {{v[0-31]+}}.4s
+ %tmp3 = fdiv <4 x float> %A, %B;
+ ret <4 x float> %tmp3
+}
+define <2 x double> @div2xdouble(<2 x double> %A, <2 x double> %B) {
+;CHECK: fdiv {{v[0-31]+}}.2d, {{v[0-31]+}}.2d, {{v[0-31]+}}.2d
+ %tmp3 = fdiv <2 x double> %A, %B;
+ ret <2 x double> %tmp3
+}
+
+declare <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>)
+
+define <8 x i8> @poly_mulv8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: poly_mulv8i8:
+ %prod = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: pmul v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %prod
+}
+
+define <16 x i8> @poly_mulv16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: poly_mulv16i8:
+ %prod = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: pmul v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %prod
+}
+
+declare <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16>, <8 x i16>)
+declare <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i16> @test_sqdmulh_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sqdmulh_v4i16:
+ %prod = call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqdmulh v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %prod
+}
+
+define <8 x i16> @test_sqdmulh_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sqdmulh_v8i16:
+ %prod = call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqdmulh v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %prod
+}
+
+define <2 x i32> @test_sqdmulh_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sqdmulh_v2i32:
+ %prod = call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqdmulh v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %prod
+}
+
+define <4 x i32> @test_sqdmulh_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sqdmulh_v4i32:
+ %prod = call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqdmulh v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %prod
+}
+
+declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>)
+declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>)
+declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>)
+declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i16> @test_sqrdmulh_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sqrdmulh_v4i16:
+ %prod = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqrdmulh v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %prod
+}
+
+define <8 x i16> @test_sqrdmulh_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sqrdmulh_v8i16:
+ %prod = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqrdmulh v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %prod
+}
+
+define <2 x i32> @test_sqrdmulh_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sqrdmulh_v2i32:
+ %prod = call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqrdmulh v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %prod
+}
+
+define <4 x i32> @test_sqrdmulh_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sqrdmulh_v4i32:
+ %prod = call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqrdmulh v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %prod
+}
+
+declare <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float>, <2 x float>)
+declare <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double>, <2 x double>)
+
+define <2 x float> @fmulx_v2f32(<2 x float> %lhs, <2 x float> %rhs) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: fmulx v0.2s, v0.2s, v1.2s
+ %val = call <2 x float> @llvm.aarch64.neon.vmulx.v2f32(<2 x float> %lhs, <2 x float> %rhs)
+ ret <2 x float> %val
+}
+
+define <4 x float> @fmulx_v4f32(<4 x float> %lhs, <4 x float> %rhs) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: fmulx v0.4s, v0.4s, v1.4s
+ %val = call <4 x float> @llvm.aarch64.neon.vmulx.v4f32(<4 x float> %lhs, <4 x float> %rhs)
+ ret <4 x float> %val
+}
+
+define <2 x double> @fmulx_v2f64(<2 x double> %lhs, <2 x double> %rhs) {
+; Using registers other than v0, v1 and v2 are possible, but would be odd.
+; CHECK: fmulx v0.2d, v0.2d, v1.2d
+ %val = call <2 x double> @llvm.aarch64.neon.vmulx.v2f64(<2 x double> %lhs, <2 x double> %rhs)
+ ret <2 x double> %val
+}
diff --git a/test/CodeGen/AArch64/neon-perm.ll b/test/CodeGen/AArch64/neon-perm.ll
new file mode 100644
index 000000000000..fa4d54dc745f
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-perm.ll
@@ -0,0 +1,1693 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+%struct.int8x8x2_t = type { [2 x <8 x i8>] }
+%struct.int16x4x2_t = type { [2 x <4 x i16>] }
+%struct.int32x2x2_t = type { [2 x <2 x i32>] }
+%struct.uint8x8x2_t = type { [2 x <8 x i8>] }
+%struct.uint16x4x2_t = type { [2 x <4 x i16>] }
+%struct.uint32x2x2_t = type { [2 x <2 x i32>] }
+%struct.float32x2x2_t = type { [2 x <2 x float>] }
+%struct.poly8x8x2_t = type { [2 x <8 x i8>] }
+%struct.poly16x4x2_t = type { [2 x <4 x i16>] }
+%struct.int8x16x2_t = type { [2 x <16 x i8>] }
+%struct.int16x8x2_t = type { [2 x <8 x i16>] }
+%struct.int32x4x2_t = type { [2 x <4 x i32>] }
+%struct.uint8x16x2_t = type { [2 x <16 x i8>] }
+%struct.uint16x8x2_t = type { [2 x <8 x i16>] }
+%struct.uint32x4x2_t = type { [2 x <4 x i32>] }
+%struct.float32x4x2_t = type { [2 x <4 x float>] }
+%struct.poly8x16x2_t = type { [2 x <16 x i8>] }
+%struct.poly16x8x2_t = type { [2 x <8 x i16>] }
+
+define <8 x i8> @test_vuzp1_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp1_s8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vuzp1q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzp1q_s8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vuzp1_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp1_s16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vuzp1q_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzp1q_s16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vuzp1_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vuzp1_s32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vuzp1q_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vuzp1q_s32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vuzp1q_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vuzp1q_s64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vuzp1_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp1_u8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vuzp1q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzp1q_u8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vuzp1_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp1_u16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vuzp1q_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzp1q_u16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vuzp1_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vuzp1_u32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vuzp1q_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vuzp1q_u32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vuzp1q_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vuzp1q_u64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle.i
+}
+
+define <2 x float> @test_vuzp1_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vuzp1_f32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x float> %shuffle.i
+}
+
+define <4 x float> @test_vuzp1q_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vuzp1q_f32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vuzp1q_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vuzp1q_f64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vuzp1_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp1_p8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vuzp1q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzp1q_p8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vuzp1_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp1_p16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vuzp1q_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzp1q_p16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_vuzp2_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp2_s8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vuzp2q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzp2q_s8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vuzp2_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp2_s16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vuzp2q_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzp2q_s16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vuzp2_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vuzp2_s32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vuzp2q_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vuzp2q_s32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vuzp2q_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vuzp2q_s64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vuzp2_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp2_u8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vuzp2q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzp2q_u8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vuzp2_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp2_u16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vuzp2q_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzp2q_u16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vuzp2_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vuzp2_u32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vuzp2q_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vuzp2q_u32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vuzp2q_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vuzp2q_u64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle.i
+}
+
+define <2 x float> @test_vuzp2_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vuzp2_f32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x float> %shuffle.i
+}
+
+define <4 x float> @test_vuzp2q_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vuzp2q_f32:
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vuzp2q_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vuzp2q_f64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+; CHECK-NEXT: orr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vuzp2_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp2_p8:
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vuzp2q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzp2q_p8:
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vuzp2_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp2_p16:
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vuzp2q_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzp2q_p16:
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_vzip1_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip1_s8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vzip1q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzip1q_s8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vzip1_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip1_s16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vzip1q_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzip1q_s16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vzip1_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vzip1_s32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vzip1q_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vzip1q_s32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vzip1q_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vzip1q_s64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vzip1_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip1_u8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vzip1q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzip1q_u8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vzip1_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip1_u16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vzip1q_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzip1q_u16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vzip1_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vzip1_u32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vzip1q_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vzip1q_u32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vzip1q_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vzip1q_u64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle.i
+}
+
+define <2 x float> @test_vzip1_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vzip1_f32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x float> %shuffle.i
+}
+
+define <4 x float> @test_vzip1q_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vzip1q_f32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vzip1q_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vzip1q_f64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vzip1_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip1_p8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vzip1q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzip1q_p8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vzip1_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip1_p16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vzip1q_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzip1q_p16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_vzip2_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip2_s8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vzip2q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzip2q_s8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vzip2_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip2_s16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vzip2q_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzip2q_s16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vzip2_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vzip2_s32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vzip2q_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vzip2q_s32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vzip2q_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vzip2q_s64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vzip2_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip2_u8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vzip2q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzip2q_u8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vzip2_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip2_u16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vzip2q_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzip2q_u16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vzip2_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vzip2_u32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vzip2q_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vzip2q_u32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vzip2q_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vzip2q_u64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle.i
+}
+
+define <2 x float> @test_vzip2_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vzip2_f32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x float> %shuffle.i
+}
+
+define <4 x float> @test_vzip2q_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vzip2q_f32:
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vzip2q_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vzip2q_f64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vzip2_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip2_p8:
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vzip2q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzip2q_p8:
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vzip2_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip2_p16:
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vzip2q_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzip2q_p16:
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_vtrn1_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn1_s8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vtrn1q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrn1q_s8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vtrn1_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn1_s16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vtrn1q_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrn1q_s16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vtrn1_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vtrn1_s32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vtrn1q_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vtrn1q_s32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vtrn1q_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vtrn1q_s64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vtrn1_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn1_u8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vtrn1q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrn1q_u8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vtrn1_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn1_u16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vtrn1q_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrn1q_u16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vtrn1_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vtrn1_u32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vtrn1q_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vtrn1q_u32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vtrn1q_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vtrn1q_u64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %shuffle.i
+}
+
+define <2 x float> @test_vtrn1_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vtrn1_f32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+entry:
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x float> %shuffle.i
+}
+
+define <4 x float> @test_vtrn1q_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vtrn1q_f32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vtrn1q_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vtrn1q_f64:
+; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vtrn1_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn1_p8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vtrn1q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrn1q_p8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vtrn1_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn1_p16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vtrn1q_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrn1q_p16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ ret <8 x i16> %shuffle.i
+}
+
+define <8 x i8> @test_vtrn2_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn2_s8:
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vtrn2q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrn2q_s8:
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vtrn2_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn2_s16:
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vtrn2q_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrn2q_s16:
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vtrn2_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vtrn2_s32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vtrn2q_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vtrn2q_s32:
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vtrn2q_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vtrn2q_s64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vtrn2_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn2_u8:
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vtrn2q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrn2q_u8:
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vtrn2_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn2_u16:
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vtrn2q_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrn2q_u16:
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vtrn2_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vtrn2_u32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <4 x i32> @test_vtrn2q_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vtrn2q_u32:
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i32> %shuffle.i
+}
+
+define <2 x i64> @test_vtrn2q_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vtrn2q_u64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %shuffle.i
+}
+
+define <2 x float> @test_vtrn2_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vtrn2_f32:
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %shuffle.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x float> %shuffle.i
+}
+
+define <4 x float> @test_vtrn2q_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vtrn2q_f32:
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x float> %shuffle.i
+}
+
+define <2 x double> @test_vtrn2q_f64(<2 x double> %a, <2 x double> %b) {
+; CHECK: test_vtrn2q_f64:
+; CHECK: ins {{v[0-9]+}}.d[0], {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vtrn2_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn2_p8:
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %shuffle.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <16 x i8> @test_vtrn2q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrn2q_p8:
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <16 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vtrn2_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn2_p16:
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %shuffle.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <8 x i16> @test_vtrn2q_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrn2q_p16:
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <8 x i16> %shuffle.i
+}
+
+define %struct.int8x8x2_t @test_vuzp_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp_s8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vuzp.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vuzp1.i, 0, 1
+ ret %struct.int8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x4x2_t @test_vuzp_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp_s16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vuzp.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vuzp1.i, 0, 1
+ ret %struct.int16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x2x2_t @test_vuzp_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vuzp_s32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vuzp.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ %vuzp1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vuzp1.i, 0, 1
+ ret %struct.int32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x8x2_t @test_vuzp_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp_u8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vuzp.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint8x8x2_t undef, <8 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x8x2_t %.fca.0.0.insert, <8 x i8> %vuzp1.i, 0, 1
+ ret %struct.uint8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint16x4x2_t @test_vuzp_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp_u16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vuzp.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.uint16x4x2_t undef, <4 x i16> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint16x4x2_t %.fca.0.0.insert, <4 x i16> %vuzp1.i, 0, 1
+ ret %struct.uint16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.uint32x2x2_t @test_vuzp_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vuzp_u32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vuzp.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ %vuzp1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.uint32x2x2_t undef, <2 x i32> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint32x2x2_t %.fca.0.0.insert, <2 x i32> %vuzp1.i, 0, 1
+ ret %struct.uint32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x2x2_t @test_vuzp_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vuzp_f32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vuzp.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+ %vuzp1.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vuzp1.i, 0, 1
+ ret %struct.float32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x8x2_t @test_vuzp_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vuzp_p8:
+; CHECK: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vuzp.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.poly8x8x2_t undef, <8 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly8x8x2_t %.fca.0.0.insert, <8 x i8> %vuzp1.i, 0, 1
+ ret %struct.poly8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.poly16x4x2_t @test_vuzp_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vuzp_p16:
+; CHECK: uzp1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: uzp2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vuzp.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.poly16x4x2_t undef, <4 x i16> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly16x4x2_t %.fca.0.0.insert, <4 x i16> %vuzp1.i, 0, 1
+ ret %struct.poly16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x16x2_t @test_vuzpq_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzpq_s8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vuzp.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ %vuzp1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vuzp1.i, 0, 1
+ ret %struct.int8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x8x2_t @test_vuzpq_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzpq_s16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vuzp.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vuzp1.i, 0, 1
+ ret %struct.int16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x4x2_t @test_vuzpq_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vuzpq_s32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vuzp.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vuzp1.i, 0, 1
+ ret %struct.int32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x16x2_t @test_vuzpq_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzpq_u8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vuzp.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ %vuzp1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.uint8x16x2_t undef, <16 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x16x2_t %.fca.0.0.insert, <16 x i8> %vuzp1.i, 0, 1
+ ret %struct.uint8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.uint16x8x2_t @test_vuzpq_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzpq_u16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vuzp.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint16x8x2_t undef, <8 x i16> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint16x8x2_t %.fca.0.0.insert, <8 x i16> %vuzp1.i, 0, 1
+ ret %struct.uint16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint32x4x2_t @test_vuzpq_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vuzpq_u32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vuzp.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.uint32x4x2_t undef, <4 x i32> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint32x4x2_t %.fca.0.0.insert, <4 x i32> %vuzp1.i, 0, 1
+ ret %struct.uint32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x4x2_t @test_vuzpq_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vuzpq_f32:
+; CHECK: uzp1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: uzp2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vuzp.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vuzp1.i, 0, 1
+ ret %struct.float32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x16x2_t @test_vuzpq_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vuzpq_p8:
+; CHECK: uzp1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: uzp2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vuzp.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+ %vuzp1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.poly8x16x2_t undef, <16 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly8x16x2_t %.fca.0.0.insert, <16 x i8> %vuzp1.i, 0, 1
+ ret %struct.poly8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.poly16x8x2_t @test_vuzpq_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vuzpq_p16:
+; CHECK: uzp1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: uzp2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vuzp.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.poly16x8x2_t undef, <8 x i16> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly16x8x2_t %.fca.0.0.insert, <8 x i16> %vuzp1.i, 0, 1
+ ret %struct.poly16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x8x2_t @test_vzip_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip_s8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vzip.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vzip1.i, 0, 1
+ ret %struct.int8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x4x2_t @test_vzip_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip_s16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vzip.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vzip1.i, 0, 1
+ ret %struct.int16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x2x2_t @test_vzip_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vzip_s32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vzip.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ %vzip1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vzip1.i, 0, 1
+ ret %struct.int32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x8x2_t @test_vzip_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip_u8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vzip.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint8x8x2_t undef, <8 x i8> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x8x2_t %.fca.0.0.insert, <8 x i8> %vzip1.i, 0, 1
+ ret %struct.uint8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint16x4x2_t @test_vzip_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip_u16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vzip.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.uint16x4x2_t undef, <4 x i16> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint16x4x2_t %.fca.0.0.insert, <4 x i16> %vzip1.i, 0, 1
+ ret %struct.uint16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.uint32x2x2_t @test_vzip_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vzip_u32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vzip.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ %vzip1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.uint32x2x2_t undef, <2 x i32> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint32x2x2_t %.fca.0.0.insert, <2 x i32> %vzip1.i, 0, 1
+ ret %struct.uint32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x2x2_t @test_vzip_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vzip_f32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vzip.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+ %vzip1.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vzip1.i, 0, 1
+ ret %struct.float32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x8x2_t @test_vzip_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vzip_p8:
+; CHECK: zip1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: zip2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vzip.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.poly8x8x2_t undef, <8 x i8> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly8x8x2_t %.fca.0.0.insert, <8 x i8> %vzip1.i, 0, 1
+ ret %struct.poly8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.poly16x4x2_t @test_vzip_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vzip_p16:
+; CHECK: zip1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: zip2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vzip.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.poly16x4x2_t undef, <4 x i16> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly16x4x2_t %.fca.0.0.insert, <4 x i16> %vzip1.i, 0, 1
+ ret %struct.poly16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x16x2_t @test_vzipq_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzipq_s8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vzip.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ %vzip1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vzip1.i, 0, 1
+ ret %struct.int8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x8x2_t @test_vzipq_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzipq_s16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vzip.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vzip1.i, 0, 1
+ ret %struct.int16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x4x2_t @test_vzipq_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vzipq_s32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vzip.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vzip1.i, 0, 1
+ ret %struct.int32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x16x2_t @test_vzipq_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzipq_u8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vzip.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ %vzip1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.uint8x16x2_t undef, <16 x i8> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x16x2_t %.fca.0.0.insert, <16 x i8> %vzip1.i, 0, 1
+ ret %struct.uint8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.uint16x8x2_t @test_vzipq_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzipq_u16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vzip.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint16x8x2_t undef, <8 x i16> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint16x8x2_t %.fca.0.0.insert, <8 x i16> %vzip1.i, 0, 1
+ ret %struct.uint16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint32x4x2_t @test_vzipq_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vzipq_u32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vzip.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.uint32x4x2_t undef, <4 x i32> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint32x4x2_t %.fca.0.0.insert, <4 x i32> %vzip1.i, 0, 1
+ ret %struct.uint32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x4x2_t @test_vzipq_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vzipq_f32:
+; CHECK: zip1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: zip2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vzip.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vzip1.i, 0, 1
+ ret %struct.float32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x16x2_t @test_vzipq_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vzipq_p8:
+; CHECK: zip1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: zip2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vzip.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+ %vzip1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.poly8x16x2_t undef, <16 x i8> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly8x16x2_t %.fca.0.0.insert, <16 x i8> %vzip1.i, 0, 1
+ ret %struct.poly8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.poly16x8x2_t @test_vzipq_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vzipq_p16:
+; CHECK: zip1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: zip2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vzip.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.poly16x8x2_t undef, <8 x i16> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly16x8x2_t %.fca.0.0.insert, <8 x i16> %vzip1.i, 0, 1
+ ret %struct.poly16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x8x2_t @test_vtrn_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn_s8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vtrn.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vtrn1.i, 0, 1
+ ret %struct.int8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x4x2_t @test_vtrn_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn_s16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vtrn.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vtrn1.i, 0, 1
+ ret %struct.int16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x2x2_t @test_vtrn_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vtrn_s32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vtrn.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ %vtrn1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vtrn1.i, 0, 1
+ ret %struct.int32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x8x2_t @test_vtrn_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn_u8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vtrn.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint8x8x2_t undef, <8 x i8> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x8x2_t %.fca.0.0.insert, <8 x i8> %vtrn1.i, 0, 1
+ ret %struct.uint8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint16x4x2_t @test_vtrn_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn_u16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vtrn.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.uint16x4x2_t undef, <4 x i16> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint16x4x2_t %.fca.0.0.insert, <4 x i16> %vtrn1.i, 0, 1
+ ret %struct.uint16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.uint32x2x2_t @test_vtrn_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vtrn_u32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vtrn.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 0, i32 2>
+ %vtrn1.i = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.uint32x2x2_t undef, <2 x i32> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint32x2x2_t %.fca.0.0.insert, <2 x i32> %vtrn1.i, 0, 1
+ ret %struct.uint32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x2x2_t @test_vtrn_f32(<2 x float> %a, <2 x float> %b) {
+; CHECK: test_vtrn_f32:
+; CHECK: ins {{v[0-9]+}}.s[1], {{v[0-9]+}}.s[0]
+; CHECK: ins {{v[0-9]+}}.s[0], {{v[0-9]+}}.s[1]
+entry:
+ %vtrn.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 0, i32 2>
+ %vtrn1.i = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> <i32 1, i32 3>
+ %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vtrn1.i, 0, 1
+ ret %struct.float32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x8x2_t @test_vtrn_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtrn_p8:
+; CHECK: trn1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK: trn2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+entry:
+ %vtrn.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.poly8x8x2_t undef, <8 x i8> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly8x8x2_t %.fca.0.0.insert, <8 x i8> %vtrn1.i, 0, 1
+ ret %struct.poly8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.poly16x4x2_t @test_vtrn_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vtrn_p16:
+; CHECK: trn1 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; CHECK: trn2 {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+entry:
+ %vtrn.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.poly16x4x2_t undef, <4 x i16> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly16x4x2_t %.fca.0.0.insert, <4 x i16> %vtrn1.i, 0, 1
+ ret %struct.poly16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x16x2_t @test_vtrnq_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrnq_s8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vtrn.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ %vtrn1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vtrn1.i, 0, 1
+ ret %struct.int8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x8x2_t @test_vtrnq_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrnq_s16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vtrn.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vtrn1.i, 0, 1
+ ret %struct.int16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x4x2_t @test_vtrnq_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vtrnq_s32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vtrn.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vtrn1.i, 0, 1
+ ret %struct.int32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x16x2_t @test_vtrnq_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrnq_u8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vtrn.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ %vtrn1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.uint8x16x2_t undef, <16 x i8> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x16x2_t %.fca.0.0.insert, <16 x i8> %vtrn1.i, 0, 1
+ ret %struct.uint8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.uint16x8x2_t @test_vtrnq_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrnq_u16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vtrn.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint16x8x2_t undef, <8 x i16> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint16x8x2_t %.fca.0.0.insert, <8 x i16> %vtrn1.i, 0, 1
+ ret %struct.uint16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint32x4x2_t @test_vtrnq_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vtrnq_u32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vtrn.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.uint32x4x2_t undef, <4 x i32> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint32x4x2_t %.fca.0.0.insert, <4 x i32> %vtrn1.i, 0, 1
+ ret %struct.uint32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x4x2_t @test_vtrnq_f32(<4 x float> %a, <4 x float> %b) {
+; CHECK: test_vtrnq_f32:
+; CHECK: trn1 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: trn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+ %vtrn.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vtrn1.i, 0, 1
+ ret %struct.float32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x16x2_t @test_vtrnq_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vtrnq_p8:
+; CHECK: trn1 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+; CHECK: trn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
+entry:
+ %vtrn.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ %vtrn1.i = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ %.fca.0.0.insert = insertvalue %struct.poly8x16x2_t undef, <16 x i8> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly8x16x2_t %.fca.0.0.insert, <16 x i8> %vtrn1.i, 0, 1
+ ret %struct.poly8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.poly16x8x2_t @test_vtrnq_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vtrnq_p16:
+; CHECK: trn1 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; CHECK: trn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+entry:
+ %vtrn.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.poly16x8x2_t undef, <8 x i16> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.poly16x8x2_t %.fca.0.0.insert, <8 x i16> %vtrn1.i, 0, 1
+ ret %struct.poly16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x8x2_t @test_uzp(<16 x i8> %y) {
+; CHECK: test_uzp:
+
+ %vuzp.i = shufflevector <16 x i8> %y, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <16 x i8> %y, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.uint8x8x2_t undef, <8 x i8> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.uint8x8x2_t %.fca.0.0.insert, <8 x i8> %vuzp1.i, 0, 1
+ ret %struct.uint8x8x2_t %.fca.0.1.insert
+
+; CHECK: dup {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-NEXT: uzp1 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+; CHECK-NEXT: uzp2 {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
+}
diff --git a/test/CodeGen/AArch64/neon-rounding-halving-add.ll b/test/CodeGen/AArch64/neon-rounding-halving-add.ll
new file mode 100644
index 000000000000..009da3b51a83
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-rounding-halving-add.ll
@@ -0,0 +1,105 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_urhadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_urhadd_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: urhadd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_srhadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_srhadd_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: srhadd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_urhadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_urhadd_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: urhadd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_srhadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_srhadd_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: srhadd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_urhadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_urhadd_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: urhadd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_srhadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_srhadd_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: srhadd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_urhadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_urhadd_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: urhadd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_srhadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_srhadd_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: srhadd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_urhadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_urhadd_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: urhadd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_srhadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_srhadd_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: srhadd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_urhadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_urhadd_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: urhadd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_srhadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_srhadd_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: srhadd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+
diff --git a/test/CodeGen/AArch64/neon-rounding-shift.ll b/test/CodeGen/AArch64/neon-rounding-shift.ll
new file mode 100644
index 000000000000..5b4ec2862c79
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-rounding-shift.ll
@@ -0,0 +1,121 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_urshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_urshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: urshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_srshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_srshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: srshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_urshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_urshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: urshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_srshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_srshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: srshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_urshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_urshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: urshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_srshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_srshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: srshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_urshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_urshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: urshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_srshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_srshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: srshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_urshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_urshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: urshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_srshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_srshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: srshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_urshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_urshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: urshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_srshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_srshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: srshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_urshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_urshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: urshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @test_srshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_srshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: srshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
diff --git a/test/CodeGen/AArch64/neon-saturating-add-sub.ll b/test/CodeGen/AArch64/neon-saturating-add-sub.ll
new file mode 100644
index 000000000000..fc60d900e4db
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-saturating-add-sub.ll
@@ -0,0 +1,241 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uqadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uqadd_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uqadd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_sqadd_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sqadd_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sqadd v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uqadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uqadd_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uqadd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_sqadd_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sqadd_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sqadd v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uqadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uqadd_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uqadd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_sqadd_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sqadd_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqadd v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uqadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uqadd_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uqadd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_sqadd_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sqadd_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqadd v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uqadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uqadd_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uqadd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_sqadd_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sqadd_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqadd v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uqadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uqadd_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uqadd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_sqadd_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sqadd_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqadd v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+
+
+declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_uqadd_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_uqadd_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: uqadd v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @test_sqadd_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_sqadd_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: sqadd v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+declare <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uqsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uqsub_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uqsub v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_sqsub_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sqsub_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sqsub v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uqsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uqsub_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uqsub v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_sqsub_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sqsub_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sqsub v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uqsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uqsub_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uqsub v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_sqsub_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sqsub_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqsub v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uqsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uqsub_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uqsub v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_sqsub_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sqsub_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqsub v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uqsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uqsub_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uqsub v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_sqsub_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sqsub_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqsub v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uqsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uqsub_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uqsub v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_sqsub_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sqsub_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqsub v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_uqsub_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_uqsub_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: uqsub v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @test_sqsub_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_sqsub_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: sqsub v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
diff --git a/test/CodeGen/AArch64/neon-saturating-rounding-shift.ll b/test/CodeGen/AArch64/neon-saturating-rounding-shift.ll
new file mode 100644
index 000000000000..d89262c2abaa
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-saturating-rounding-shift.ll
@@ -0,0 +1,121 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uqrshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uqrshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uqrshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_sqrshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sqrshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sqrshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uqrshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uqrshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uqrshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_sqrshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sqrshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sqrshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uqrshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uqrshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uqrshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_sqrshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sqrshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqrshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uqrshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uqrshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uqrshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_sqrshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sqrshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqrshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uqrshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uqrshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uqrshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_sqrshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sqrshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqrshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uqrshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uqrshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uqrshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_sqrshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sqrshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqrshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_uqrshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_uqrshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: uqrshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @test_sqrshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_sqrshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: sqrshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
diff --git a/test/CodeGen/AArch64/neon-saturating-shift.ll b/test/CodeGen/AArch64/neon-saturating-shift.ll
new file mode 100644
index 000000000000..11009fba7511
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-saturating-shift.ll
@@ -0,0 +1,121 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uqshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: uqshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_sqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sqshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sqshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_uqshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_uqshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: uqshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_sqshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sqshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sqshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_uqshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_uqshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: uqshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_sqshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sqshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sqshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_uqshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_uqshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: uqshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_sqshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sqshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sqshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_uqshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_uqshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: uqshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_sqshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sqshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sqshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_uqshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_uqshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: uqshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_sqshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sqshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sqshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_uqshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_uqshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: uqshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @test_sqshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_sqshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: sqshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
diff --git a/test/CodeGen/AArch64/neon-scalar-abs.ll b/test/CodeGen/AArch64/neon-scalar-abs.ll
new file mode 100644
index 000000000000..03a89e043e50
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-abs.ll
@@ -0,0 +1,61 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define i64 @test_vabsd_s64(i64 %a) {
+; CHECK: test_vabsd_s64
+; CHECK: abs {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vabs.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vabs1.i = tail call <1 x i64> @llvm.aarch64.neon.vabs(<1 x i64> %vabs.i)
+ %0 = extractelement <1 x i64> %vabs1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vabs(<1 x i64>)
+
+define i8 @test_vqabsb_s8(i8 %a) {
+; CHECK: test_vqabsb_s8
+; CHECK: sqabs {{b[0-9]+}}, {{b[0-9]+}}
+entry:
+ %vqabs.i = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vqabs1.i = call <1 x i8> @llvm.arm.neon.vqabs.v1i8(<1 x i8> %vqabs.i)
+ %0 = extractelement <1 x i8> %vqabs1.i, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.arm.neon.vqabs.v1i8(<1 x i8>)
+
+define i16 @test_vqabsh_s16(i16 %a) {
+; CHECK: test_vqabsh_s16
+; CHECK: sqabs {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqabs.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqabs1.i = call <1 x i16> @llvm.arm.neon.vqabs.v1i16(<1 x i16> %vqabs.i)
+ %0 = extractelement <1 x i16> %vqabs1.i, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.arm.neon.vqabs.v1i16(<1 x i16>)
+
+define i32 @test_vqabss_s32(i32 %a) {
+; CHECK: test_vqabss_s32
+; CHECK: sqabs {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqabs.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqabs1.i = call <1 x i32> @llvm.arm.neon.vqabs.v1i32(<1 x i32> %vqabs.i)
+ %0 = extractelement <1 x i32> %vqabs1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.arm.neon.vqabs.v1i32(<1 x i32>)
+
+define i64 @test_vqabsd_s64(i64 %a) {
+; CHECK: test_vqabsd_s64
+; CHECK: sqabs {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vqabs.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqabs1.i = call <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64> %vqabs.i)
+ %0 = extractelement <1 x i64> %vqabs1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.arm.neon.vqabs.v1i64(<1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-add-sub.ll b/test/CodeGen/AArch64/neon-scalar-add-sub.ll
new file mode 100644
index 000000000000..09ca880c8053
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-add-sub.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <1 x i64> @add1xi64(<1 x i64> %A, <1 x i64> %B) {
+;CHECK: add {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ %tmp3 = add <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+define <1 x i64> @sub1xi64(<1 x i64> %A, <1 x i64> %B) {
+;CHECK: sub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ %tmp3 = sub <1 x i64> %A, %B;
+ ret <1 x i64> %tmp3
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vaddds(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vadddu(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_add_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_add_v1i64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vaddds(<1 x i64> %lhs, <1 x i64> %rhs)
+; CHECK: add {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_uadd_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uadd_v1i64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vadddu(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: add {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsubds(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vsubdu(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_sub_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sub_v1i64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vsubds(<1 x i64> %lhs, <1 x i64> %rhs)
+; CHECK: sub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_usub_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_usub_v1i64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vsubdu(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+
+
diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
new file mode 100644
index 000000000000..8ce42def409a
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
@@ -0,0 +1,108 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+declare float @llvm.fma.f32(float, float, float)
+declare double @llvm.fma.f64(double, double, double)
+
+define float @test_fmla_ss4S(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmla_ss4S
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
+ ret float %tmp2
+}
+
+define float @test_fmla_ss4S_swap(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmla_ss4S_swap
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a)
+ ret float %tmp2
+}
+
+define float @test_fmla_ss2S(float %a, float %b, <2 x float> %v) {
+ ; CHECK: test_fmla_ss2S
+ ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
+ ret float %tmp2
+}
+
+define double @test_fmla_ddD(double %a, double %b, <1 x double> %v) {
+ ; CHECK: test_fmla_ddD
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
+ ret double %tmp2
+}
+
+define double @test_fmla_dd2D(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmla_dd2D
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
+ ret double %tmp2
+}
+
+define double @test_fmla_dd2D_swap(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmla_dd2D_swap
+ ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
+ ret double %tmp2
+}
+
+define float @test_fmls_ss4S(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmls_ss4S
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fsub float -0.0, %tmp1
+ %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
+ ret float %tmp3
+}
+
+define float @test_fmls_ss4S_swap(float %a, float %b, <4 x float> %v) {
+ ; CHECK: test_fmls_ss4S_swap
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fsub float -0.0, %tmp1
+ %tmp3 = call float @llvm.fma.f32(float %tmp1, float %tmp2, float %a)
+ ret float %tmp3
+}
+
+
+define float @test_fmls_ss2S(float %a, float %b, <2 x float> %v) {
+ ; CHECK: test_fmls_ss2S
+ ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fsub float -0.0, %tmp1
+ %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
+ ret float %tmp3
+}
+
+define double @test_fmls_ddD(double %a, double %b, <1 x double> %v) {
+ ; CHECK: test_fmls_ddD
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = fsub double -0.0, %tmp1
+ %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
+ ret double %tmp3
+}
+
+define double @test_fmls_dd2D(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmls_dd2D
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fsub double -0.0, %tmp1
+ %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
+ ret double %tmp3
+}
+
+define double @test_fmls_dd2D_swap(double %a, double %b, <2 x double> %v) {
+ ; CHECK: test_fmls_dd2D_swap
+ ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fsub double -0.0, %tmp1
+ %tmp3 = call double @llvm.fma.f64(double %tmp1, double %tmp2, double %a)
+ ret double %tmp3
+}
+
diff --git a/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll
new file mode 100644
index 000000000000..968ad3e8cf71
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-by-elem-mul.ll
@@ -0,0 +1,124 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
+
+define float @test_fmul_lane_ss2S(float %a, <2 x float> %v) {
+ ; CHECK: test_fmul_lane_ss2S
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fmul float %a, %tmp1;
+ ret float %tmp2;
+}
+
+define float @test_fmul_lane_ss2S_swap(float %a, <2 x float> %v) {
+ ; CHECK: test_fmul_lane_ss2S_swap
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = fmul float %tmp1, %a;
+ ret float %tmp2;
+}
+
+
+define float @test_fmul_lane_ss4S(float %a, <4 x float> %v) {
+ ; CHECK: test_fmul_lane_ss4S
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fmul float %a, %tmp1;
+ ret float %tmp2;
+}
+
+define float @test_fmul_lane_ss4S_swap(float %a, <4 x float> %v) {
+ ; CHECK: test_fmul_lane_ss4S_swap
+ ; CHECK: fmul {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = fmul float %tmp1, %a;
+ ret float %tmp2;
+}
+
+
+define double @test_fmul_lane_ddD(double %a, <1 x double> %v) {
+ ; CHECK: test_fmul_lane_ddD
+ ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = fmul double %a, %tmp1;
+ ret double %tmp2;
+}
+
+
+
+define double @test_fmul_lane_dd2D(double %a, <2 x double> %v) {
+ ; CHECK: test_fmul_lane_dd2D
+ ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fmul double %a, %tmp1;
+ ret double %tmp2;
+}
+
+
+define double @test_fmul_lane_dd2D_swap(double %a, <2 x double> %v) {
+ ; CHECK: test_fmul_lane_dd2D_swap
+ ; CHECK: fmul {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = fmul double %tmp1, %a;
+ ret double %tmp2;
+}
+
+declare float @llvm.aarch64.neon.vmulx.f32(float, float)
+
+define float @test_fmulx_lane_f32(float %a, <2 x float> %v) {
+ ; CHECK: test_fmulx_lane_f32
+ ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
+ ret float %tmp2;
+}
+
+define float @test_fmulx_laneq_f32(float %a, <4 x float> %v) {
+ ; CHECK: test_fmulx_laneq_f32
+ ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %tmp1)
+ ret float %tmp2;
+}
+
+define float @test_fmulx_laneq_f32_swap(float %a, <4 x float> %v) {
+ ; CHECK: test_fmulx_laneq_f32_swap
+ ; CHECK: fmulx {{s[0-31]+}}, {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %tmp1 = extractelement <4 x float> %v, i32 3
+ %tmp2 = call float @llvm.aarch64.neon.vmulx.f32(float %tmp1, float %a)
+ ret float %tmp2;
+}
+
+declare double @llvm.aarch64.neon.vmulx.f64(double, double)
+
+define double @test_fmulx_lane_f64(double %a, <1 x double> %v) {
+ ; CHECK: test_fmulx_lane_f64
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+define double @test_fmulx_laneq_f64_0(double %a, <2 x double> %v) {
+ ; CHECK: test_fmulx_laneq_f64_0
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ %tmp1 = extractelement <2 x double> %v, i32 0
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+
+define double @test_fmulx_laneq_f64_1(double %a, <2 x double> %v) {
+ ; CHECK: test_fmulx_laneq_f64_1
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %tmp1)
+ ret double %tmp2;
+}
+
+define double @test_fmulx_laneq_f64_1_swap(double %a, <2 x double> %v) {
+ ; CHECK: test_fmulx_laneq_f64_1_swap
+ ; CHECK: fmulx {{d[0-31]+}}, {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ %tmp2 = call double @llvm.aarch64.neon.vmulx.f64(double %tmp1, double %a)
+ ret double %tmp2;
+}
+
diff --git a/test/CodeGen/AArch64/neon-scalar-compare.ll b/test/CodeGen/AArch64/neon-scalar-compare.ll
new file mode 100644
index 000000000000..5f10cbbab2a6
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-compare.ll
@@ -0,0 +1,343 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+;; Scalar Integer Compare
+
+define i64 @test_vceqd(i64 %a, i64 %b) {
+; CHECK: test_vceqd
+; CHECK: cmeq {{d[0-9]+}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vceq.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vceq1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64> %vceq.i, <1 x i64> %vceq1.i)
+ %0 = extractelement <1 x i64> %vceq2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vceqzd(i64 %a) {
+; CHECK: test_vceqzd
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vceqz.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vceqz1.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64> %vceqz.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vceqz1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcged(i64 %a, i64 %b) {
+; CHECK: test_vcged
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcge1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcge.i, <1 x i64> %vcge1.i)
+ %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcgezd(i64 %a) {
+; CHECK: test_vcgezd
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vcgez.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgez1.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcgez.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vcgez1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcgtd(i64 %a, i64 %b) {
+; CHECK: test_vcgtd
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgt1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i)
+ %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcgtzd(i64 %a) {
+; CHECK: test_vcgtzd
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vcgtz.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgtz1.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgtz.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vcgtz1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcled(i64 %a, i64 %b) {
+; CHECK: test_vcled
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcgt1.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i)
+ %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vclezd(i64 %a) {
+; CHECK: test_vclezd
+; CHECK: cmle {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vclez.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vclez1.i = call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1i64.v1i64(<1 x i64> %vclez.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vclez1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcltd(i64 %a, i64 %b) {
+; CHECK: test_vcltd
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcge1.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64> %vcge.i, <1 x i64> %vcge1.i)
+ %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcltzd(i64 %a) {
+; CHECK: test_vcltzd
+; CHECK: cmlt {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vcltz.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcltz1.i = call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64> %vcltz.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vcltz1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vtstd(i64 %a, i64 %b) {
+; CHECK: test_vtstd
+; CHECK: cmtst {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vtst.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vtst1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vtst2.i = call <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64> %vtst.i, <1 x i64> %vtst1.i)
+ %0 = extractelement <1 x i64> %vtst2.i, i32 0
+ ret i64 %0
+}
+
+
+define <1 x i64> @test_vcage_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcage_f64
+; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %vcage2.i = tail call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %a, <1 x double> %b) #2
+ ret <1 x i64> %vcage2.i
+}
+
+define <1 x i64> @test_vcagt_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcagt_f64
+; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %vcagt2.i = tail call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %a, <1 x double> %b) #2
+ ret <1 x i64> %vcagt2.i
+}
+
+define <1 x i64> @test_vcale_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcale_f64
+; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %vcage2.i = tail call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %b, <1 x double> %a) #2
+ ret <1 x i64> %vcage2.i
+}
+
+define <1 x i64> @test_vcalt_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcalt_f64
+; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %vcagt2.i = tail call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %b, <1 x double> %a) #2
+ ret <1 x i64> %vcagt2.i
+}
+
+define <1 x i64> @test_vceq_s64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vceq_s64
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp eq <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vceq_u64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vceq_u64
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp eq <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vceq_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vceq_f64
+; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = fcmp oeq <1 x double> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcge_s64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vcge_s64
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp sge <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcge_u64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vcge_u64
+; CHECK: cmhs {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp uge <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcge_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcge_f64
+; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = fcmp oge <1 x double> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcle_s64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vcle_s64
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp sle <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcle_u64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vcle_u64
+; CHECK: cmhs {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp ule <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcle_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcle_f64
+; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = fcmp ole <1 x double> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcgt_s64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vcgt_s64
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp sgt <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcgt_u64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vcgt_u64
+; CHECK: cmhi {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp ugt <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vcgt_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vcgt_f64
+; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = fcmp ogt <1 x double> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vclt_s64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vclt_s64
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp slt <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vclt_u64(<1 x i64> %a, <1 x i64> %b) #0 {
+; CHECK: test_vclt_u64
+; CHECK: cmhi {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = icmp ult <1 x i64> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vclt_f64(<1 x double> %a, <1 x double> %b) #0 {
+; CHECK: test_vclt_f64
+; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+ %cmp.i = fcmp olt <1 x double> %a, %b
+ %sext.i = sext <1 x i1> %cmp.i to <1 x i64>
+ ret <1 x i64> %sext.i
+}
+
+define <1 x i64> @test_vceqz_s64(<1 x i64> %a) #0 {
+; CHECK: test_vceqz_s64
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
+ %1 = icmp eq <1 x i64> %a, zeroinitializer
+ %vceqz.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vceqz.i
+}
+
+define <1 x i64> @test_vceqz_u64(<1 x i64> %a) #0 {
+; CHECK: test_vceqz_u64
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
+ %1 = icmp eq <1 x i64> %a, zeroinitializer
+ %vceqz.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vceqz.i
+}
+
+define <1 x i64> @test_vceqz_p64(<1 x i64> %a) #0 {
+; CHECK: test_vceqz_p64
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
+ %1 = icmp eq <1 x i64> %a, zeroinitializer
+ %vceqz.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vceqz.i
+}
+
+define <2 x i64> @test_vceqzq_p64(<2 x i64> %a) #0 {
+; CHECK: test_vceqzq_p64
+; CHECK: cmeq {{v[0-9]}}.2d, {{v[0-9]}}.2d, #0
+ %1 = icmp eq <2 x i64> %a, zeroinitializer
+ %vceqz.i = zext <2 x i1> %1 to <2 x i64>
+ ret <2 x i64> %vceqz.i
+}
+
+define <1 x i64> @test_vcgez_s64(<1 x i64> %a) #0 {
+; CHECK: test_vcgez_s64
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, #0x0
+ %1 = icmp sge <1 x i64> %a, zeroinitializer
+ %vcgez.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vcgez.i
+}
+
+define <1 x i64> @test_vclez_s64(<1 x i64> %a) #0 {
+; CHECK: test_vclez_s64
+; CHECK: cmle {{d[0-9]}}, {{d[0-9]}}, #0x0
+ %1 = icmp sle <1 x i64> %a, zeroinitializer
+ %vclez.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vclez.i
+}
+
+define <1 x i64> @test_vcgtz_s64(<1 x i64> %a) #0 {
+; CHECK: test_vcgtz_s64
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, #0x0
+ %1 = icmp sgt <1 x i64> %a, zeroinitializer
+ %vcgtz.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vcgtz.i
+}
+
+define <1 x i64> @test_vcltz_s64(<1 x i64> %a) #0 {
+; CHECK: test_vcltz_s64
+; CHECK: cmlt {{d[0-9]}}, {{d[0-9]}}, #0
+ %1 = icmp slt <1 x i64> %a, zeroinitializer
+ %vcltz.i = zext <1 x i1> %1 to <1 x i64>
+ ret <1 x i64> %vcltz.i
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vchs.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vchi.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-copy.ll b/test/CodeGen/AArch64/neon-scalar-copy.ll
new file mode 100644
index 000000000000..d433ff595d1c
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-copy.ll
@@ -0,0 +1,88 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+define float @test_dup_sv2S(<2 x float> %v) {
+ ;CHECK: test_dup_sv2S
+ ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %tmp1 = extractelement <2 x float> %v, i32 1
+ ret float %tmp1
+}
+
+define float @test_dup_sv4S(<4 x float> %v) {
+ ;CHECK: test_dup_sv4S
+ ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[0]
+ %tmp1 = extractelement <4 x float> %v, i32 0
+ ret float %tmp1
+}
+
+define double @test_dup_dvD(<1 x double> %v) {
+ ;CHECK: test_dup_dvD
+ ;CHECK-NOT: dup {{d[0-31]+}}, {{v[0-31]+}}.d[0]
+ ;CHECK: ret
+ %tmp1 = extractelement <1 x double> %v, i32 0
+ ret double %tmp1
+}
+
+define double @test_dup_dv2D(<2 x double> %v) {
+ ;CHECK: test_dup_dv2D
+ ;CHECK: dup {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %tmp1 = extractelement <2 x double> %v, i32 1
+ ret double %tmp1
+}
+
+define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) {
+ ;CHECK: test_vector_dup_bv16B
+ ;CHECK: dup {{b[0-31]+}}, {{v[0-31]+}}.b[14]
+ %shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> <i32 14>
+ ret <1 x i8> %shuffle.i
+}
+
+define <1 x i8> @test_vector_dup_bv8B(<8 x i8> %v1) {
+ ;CHECK: test_vector_dup_bv8B
+ ;CHECK: dup {{b[0-31]+}}, {{v[0-31]+}}.b[7]
+ %shuffle.i = shufflevector <8 x i8> %v1, <8 x i8> undef, <1 x i32> <i32 7>
+ ret <1 x i8> %shuffle.i
+}
+
+define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) {
+ ;CHECK: test_vector_dup_hv8H
+ ;CHECK: dup {{h[0-31]+}}, {{v[0-31]+}}.h[7]
+ %shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> <i32 7>
+ ret <1 x i16> %shuffle.i
+}
+
+define <1 x i16> @test_vector_dup_hv4H(<4 x i16> %v1) {
+ ;CHECK: test_vector_dup_hv4H
+ ;CHECK: dup {{h[0-31]+}}, {{v[0-31]+}}.h[3]
+ %shuffle.i = shufflevector <4 x i16> %v1, <4 x i16> undef, <1 x i32> <i32 3>
+ ret <1 x i16> %shuffle.i
+}
+
+define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) {
+ ;CHECK: test_vector_dup_sv4S
+ ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[3]
+ %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> <i32 3>
+ ret <1 x i32> %shuffle
+}
+
+define <1 x i32> @test_vector_dup_sv2S(<2 x i32> %v1) {
+ ;CHECK: test_vector_dup_sv2S
+ ;CHECK: dup {{s[0-31]+}}, {{v[0-31]+}}.s[1]
+ %shuffle = shufflevector <2 x i32> %v1, <2 x i32> undef, <1 x i32> <i32 1>
+ ret <1 x i32> %shuffle
+}
+
+define <1 x i64> @test_vector_dup_dv2D(<2 x i64> %v1) {
+ ;CHECK: test_vector_dup_dv2D
+ ;CHECK: dup {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %shuffle.i = shufflevector <2 x i64> %v1, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) {
+ ;CHECK: test_vector_copy_dup_dv2D
+ ;CHECK: dup {{d[0-31]+}}, {{v[0-31]+}}.d[1]
+ %vget_lane = extractelement <2 x i64> %c, i32 1
+ %vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0
+ ret <1 x i64> %vset_lane
+}
+
diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll
new file mode 100644
index 000000000000..a06d5d60a85b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-cvt.ll
@@ -0,0 +1,137 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+define float @test_vcvts_f32_s32(i32 %a) {
+; CHECK: test_vcvts_f32_s32
+; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
+ ret float %0
+}
+
+declare float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>)
+
+define double @test_vcvtd_f64_s64(i64 %a) {
+; CHECK: test_vcvtd_f64_s64
+; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
+ ret double %0
+}
+
+declare double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>)
+
+define float @test_vcvts_f32_u32(i32 %a) {
+; CHECK: test_vcvts_f32_u32
+; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
+ ret float %0
+}
+
+declare float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>)
+
+define double @test_vcvtd_f64_u64(i64 %a) {
+; CHECK: test_vcvtd_f64_u64
+; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
+ ret double %0
+}
+
+declare double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
+
+define float @test_vcvts_n_f32_s32(i32 %a) {
+; CHECK: test_vcvts_n_f32_s32
+; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
+ ret float %0
+}
+
+declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
+
+define double @test_vcvtd_n_f64_s64(i64 %a) {
+; CHECK: test_vcvtd_n_f64_s64
+; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
+ ret double %0
+}
+
+declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
+
+define float @test_vcvts_n_f32_u32(i32 %a) {
+; CHECK: test_vcvts_n_f32_u32
+; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
+ ret float %0
+}
+
+declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
+
+define double @test_vcvtd_n_f64_u64(i64 %a) {
+; CHECK: test_vcvtd_n_f64_u64
+; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
+ ret double %0
+}
+
+declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
+
+define i32 @test_vcvts_n_s32_f32(float %a) {
+; CHECK: test_vcvts_n_s32_f32
+; CHECK: fcvtzs {{s[0-9]+}}, {{s[0-9]+}}, #1
+entry:
+ %fcvtzs = insertelement <1 x float> undef, float %a, i32 0
+ %fcvtzs1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float> %fcvtzs, i32 1)
+ %0 = extractelement <1 x i32> %fcvtzs1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vcvts.n.s32.f32(<1 x float>, i32)
+
+define i64 @test_vcvtd_n_s64_f64(double %a) {
+; CHECK: test_vcvtd_n_s64_f64
+; CHECK: fcvtzs {{d[0-9]+}}, {{d[0-9]+}}, #1
+entry:
+ %fcvtzs = insertelement <1 x double> undef, double %a, i32 0
+ %fcvtzs1 = call <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double> %fcvtzs, i32 1)
+ %0 = extractelement <1 x i64> %fcvtzs1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.s64.f64(<1 x double>, i32)
+
+define i32 @test_vcvts_n_u32_f32(float %a) {
+; CHECK: test_vcvts_n_u32_f32
+; CHECK: fcvtzu {{s[0-9]+}}, {{s[0-9]+}}, #32
+entry:
+ %fcvtzu = insertelement <1 x float> undef, float %a, i32 0
+ %fcvtzu1 = call <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float> %fcvtzu, i32 32)
+ %0 = extractelement <1 x i32> %fcvtzu1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vcvts.n.u32.f32(<1 x float>, i32)
+
+define i64 @test_vcvtd_n_u64_f64(double %a) {
+; CHECK: test_vcvtd_n_u64_f64
+; CHECK: fcvtzu {{d[0-9]+}}, {{d[0-9]+}}, #64
+entry:
+ %fcvtzu = insertelement <1 x double> undef, double %a, i32 0
+ %fcvtzu1 = tail call <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double> %fcvtzu, i32 64)
+ %0 = extractelement <1 x i64> %fcvtzu1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vcvtd.n.u64.f64(<1 x double>, i32)
diff --git a/test/CodeGen/AArch64/neon-scalar-extract-narrow.ll b/test/CodeGen/AArch64/neon-scalar-extract-narrow.ll
new file mode 100644
index 000000000000..faf521bc889a
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-extract-narrow.ll
@@ -0,0 +1,104 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+define i8 @test_vqmovunh_s16(i16 %a) {
+; CHECK: test_vqmovunh_s16
+; CHECK: sqxtun {{b[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqmovun.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqmovun1.i = call <1 x i8> @llvm.arm.neon.vqmovnsu.v1i8(<1 x i16> %vqmovun.i)
+ %0 = extractelement <1 x i8> %vqmovun1.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vqmovuns_s32(i32 %a) {
+; CHECK: test_vqmovuns_s32
+; CHECK: sqxtun {{h[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqmovun.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqmovun1.i = call <1 x i16> @llvm.arm.neon.vqmovnsu.v1i16(<1 x i32> %vqmovun.i)
+ %0 = extractelement <1 x i16> %vqmovun1.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vqmovund_s64(i64 %a) {
+; CHECK: test_vqmovund_s64
+; CHECK: sqxtun {{s[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vqmovun.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqmovun1.i = call <1 x i32> @llvm.arm.neon.vqmovnsu.v1i32(<1 x i64> %vqmovun.i)
+ %0 = extractelement <1 x i32> %vqmovun1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i8> @llvm.arm.neon.vqmovnsu.v1i8(<1 x i16>)
+declare <1 x i16> @llvm.arm.neon.vqmovnsu.v1i16(<1 x i32>)
+declare <1 x i32> @llvm.arm.neon.vqmovnsu.v1i32(<1 x i64>)
+
+define i8 @test_vqmovnh_s16(i16 %a) {
+; CHECK: test_vqmovnh_s16
+; CHECK: sqxtn {{b[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqmovn.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqmovn1.i = call <1 x i8> @llvm.arm.neon.vqmovns.v1i8(<1 x i16> %vqmovn.i)
+ %0 = extractelement <1 x i8> %vqmovn1.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vqmovns_s32(i32 %a) {
+; CHECK: test_vqmovns_s32
+; CHECK: sqxtn {{h[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqmovn.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqmovn1.i = call <1 x i16> @llvm.arm.neon.vqmovns.v1i16(<1 x i32> %vqmovn.i)
+ %0 = extractelement <1 x i16> %vqmovn1.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vqmovnd_s64(i64 %a) {
+; CHECK: test_vqmovnd_s64
+; CHECK: sqxtn {{s[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vqmovn.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqmovn1.i = call <1 x i32> @llvm.arm.neon.vqmovns.v1i32(<1 x i64> %vqmovn.i)
+ %0 = extractelement <1 x i32> %vqmovn1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i8> @llvm.arm.neon.vqmovns.v1i8(<1 x i16>)
+declare <1 x i16> @llvm.arm.neon.vqmovns.v1i16(<1 x i32>)
+declare <1 x i32> @llvm.arm.neon.vqmovns.v1i32(<1 x i64>)
+
+define i8 @test_vqmovnh_u16(i16 %a) {
+; CHECK: test_vqmovnh_u16
+; CHECK: uqxtn {{b[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqmovn.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqmovn1.i = call <1 x i8> @llvm.arm.neon.vqmovnu.v1i8(<1 x i16> %vqmovn.i)
+ %0 = extractelement <1 x i8> %vqmovn1.i, i32 0
+ ret i8 %0
+}
+
+
+define i16 @test_vqmovns_u32(i32 %a) {
+; CHECK: test_vqmovns_u32
+; CHECK: uqxtn {{h[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqmovn.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqmovn1.i = call <1 x i16> @llvm.arm.neon.vqmovnu.v1i16(<1 x i32> %vqmovn.i)
+ %0 = extractelement <1 x i16> %vqmovn1.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vqmovnd_u64(i64 %a) {
+; CHECK: test_vqmovnd_u64
+; CHECK: uqxtn {{s[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vqmovn.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqmovn1.i = call <1 x i32> @llvm.arm.neon.vqmovnu.v1i32(<1 x i64> %vqmovn.i)
+ %0 = extractelement <1 x i32> %vqmovn1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i8> @llvm.arm.neon.vqmovnu.v1i8(<1 x i16>)
+declare <1 x i16> @llvm.arm.neon.vqmovnu.v1i16(<1 x i32>)
+declare <1 x i32> @llvm.arm.neon.vqmovnu.v1i32(<1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-fabd.ll b/test/CodeGen/AArch64/neon-scalar-fabd.ll
new file mode 100644
index 000000000000..75686d32064b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-fabd.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define float @test_vabds_f32(float %a, float %b) {
+; CHECK-LABEL: test_vabds_f32
+; CHECK: fabd {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vabd.i = insertelement <1 x float> undef, float %a, i32 0
+ %vabd1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vabd2.i = call <1 x float> @llvm.aarch64.neon.vabd.v1f32(<1 x float> %vabd.i, <1 x float> %vabd1.i)
+ %0 = extractelement <1 x float> %vabd2.i, i32 0
+ ret float %0
+}
+
+define double @test_vabdd_f64(double %a, double %b) {
+; CHECK-LABEL: test_vabdd_f64
+; CHECK: fabd {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vabd.i = insertelement <1 x double> undef, double %a, i32 0
+ %vabd1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vabd2.i = call <1 x double> @llvm.aarch64.neon.vabd.v1f64(<1 x double> %vabd.i, <1 x double> %vabd1.i)
+ %0 = extractelement <1 x double> %vabd2.i, i32 0
+ ret double %0
+}
+
+declare <1 x double> @llvm.aarch64.neon.vabd.v1f64(<1 x double>, <1 x double>)
+declare <1 x float> @llvm.aarch64.neon.vabd.v1f32(<1 x float>, <1 x float>)
diff --git a/test/CodeGen/AArch64/neon-scalar-fcvt.ll b/test/CodeGen/AArch64/neon-scalar-fcvt.ll
new file mode 100644
index 000000000000..d7b84fae7375
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-fcvt.ll
@@ -0,0 +1,255 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+;; Scalar Floating-point Convert
+
+define float @test_vcvtxn(double %a) {
+; CHECK: test_vcvtxn
+; CHECK: fcvtxn {{s[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtf.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtf1.i = tail call <1 x float> @llvm.aarch64.neon.fcvtxn.v1f32.v1f64(<1 x double> %vcvtf.i)
+ %0 = extractelement <1 x float> %vcvtf1.i, i32 0
+ ret float %0
+}
+
+declare <1 x float> @llvm.aarch64.neon.fcvtxn.v1f32.v1f64(<1 x double>)
+
+define i32 @test_vcvtass(float %a) {
+; CHECK: test_vcvtass
+; CHECK: fcvtas {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtas.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtas1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtas.v1i32.v1f32(<1 x float> %vcvtas.i)
+ %0 = extractelement <1 x i32> %vcvtas1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtas.v1i32.v1f32(<1 x float>)
+
+define i64 @test_test_vcvtasd(double %a) {
+; CHECK: test_test_vcvtasd
+; CHECK: fcvtas {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtas.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtas1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double> %vcvtas.i)
+ %0 = extractelement <1 x i64> %vcvtas1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtas.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtaus(float %a) {
+; CHECK: test_vcvtaus
+; CHECK: fcvtau {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtau.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtau1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtau.v1i32.v1f32(<1 x float> %vcvtau.i)
+ %0 = extractelement <1 x i32> %vcvtau1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtau.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtaud(double %a) {
+; CHECK: test_vcvtaud
+; CHECK: fcvtau {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtau.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtau1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double> %vcvtau.i)
+ %0 = extractelement <1 x i64> %vcvtau1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtau.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtmss(float %a) {
+; CHECK: test_vcvtmss
+; CHECK: fcvtms {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtms.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtms1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtms.v1i32.v1f32(<1 x float> %vcvtms.i)
+ %0 = extractelement <1 x i32> %vcvtms1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtms.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtmd_s64_f64(double %a) {
+; CHECK: test_vcvtmd_s64_f64
+; CHECK: fcvtms {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtms.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtms1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double> %vcvtms.i)
+ %0 = extractelement <1 x i64> %vcvtms1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtms.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtmus(float %a) {
+; CHECK: test_vcvtmus
+; CHECK: fcvtmu {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtmu.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtmu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtmu.v1i32.v1f32(<1 x float> %vcvtmu.i)
+ %0 = extractelement <1 x i32> %vcvtmu1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtmu.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtmud(double %a) {
+; CHECK: test_vcvtmud
+; CHECK: fcvtmu {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtmu.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtmu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double> %vcvtmu.i)
+ %0 = extractelement <1 x i64> %vcvtmu1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtmu.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtnss(float %a) {
+; CHECK: test_vcvtnss
+; CHECK: fcvtns {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtns.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtns1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtns.v1i32.v1f32(<1 x float> %vcvtns.i)
+ %0 = extractelement <1 x i32> %vcvtns1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtns.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtnd_s64_f64(double %a) {
+; CHECK: test_vcvtnd_s64_f64
+; CHECK: fcvtns {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtns.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtns1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double> %vcvtns.i)
+ %0 = extractelement <1 x i64> %vcvtns1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtns.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtnus(float %a) {
+; CHECK: test_vcvtnus
+; CHECK: fcvtnu {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtnu.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtnu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtnu.v1i32.v1f32(<1 x float> %vcvtnu.i)
+ %0 = extractelement <1 x i32> %vcvtnu1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtnu.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtnud(double %a) {
+; CHECK: test_vcvtnud
+; CHECK: fcvtnu {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtnu.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtnu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double> %vcvtnu.i)
+ %0 = extractelement <1 x i64> %vcvtnu1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtnu.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtpss(float %a) {
+; CHECK: test_vcvtpss
+; CHECK: fcvtps {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtps.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtps1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtps.v1i32.v1f32(<1 x float> %vcvtps.i)
+ %0 = extractelement <1 x i32> %vcvtps1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtps.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtpd_s64_f64(double %a) {
+; CHECK: test_vcvtpd_s64_f64
+; CHECK: fcvtps {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtps.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtps1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double> %vcvtps.i)
+ %0 = extractelement <1 x i64> %vcvtps1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtps.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtpus(float %a) {
+; CHECK: test_vcvtpus
+; CHECK: fcvtpu {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtpu.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtpu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtpu.v1i32.v1f32(<1 x float> %vcvtpu.i)
+ %0 = extractelement <1 x i32> %vcvtpu1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtpu.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtpud(double %a) {
+; CHECK: test_vcvtpud
+; CHECK: fcvtpu {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtpu.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtpu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double> %vcvtpu.i)
+ %0 = extractelement <1 x i64> %vcvtpu1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtpu.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtss(float %a) {
+; CHECK: test_vcvtss
+; CHECK: fcvtzs {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtzs.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtzs1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtzs.v1i32.v1f32(<1 x float> %vcvtzs.i)
+ %0 = extractelement <1 x i32> %vcvtzs1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtzs.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtd_s64_f64(double %a) {
+; CHECK: test_vcvtd_s64_f64
+; CHECK: fcvtzs {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvzs.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvzs1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> %vcvzs.i)
+ %0 = extractelement <1 x i64> %vcvzs1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double>)
+
+define i32 @test_vcvtus(float %a) {
+; CHECK: test_vcvtus
+; CHECK: fcvtzu {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcvtzu.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcvtzu1.i = tail call <1 x i32> @llvm.aarch64.neon.fcvtzu.v1i32.v1f32(<1 x float> %vcvtzu.i)
+ %0 = extractelement <1 x i32> %vcvtzu1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.fcvtzu.v1i32.v1f32(<1 x float>)
+
+define i64 @test_vcvtud(double %a) {
+; CHECK: test_vcvtud
+; CHECK: fcvtzu {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcvtzu.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcvtzu1.i = tail call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> %vcvtzu.i)
+ %0 = extractelement <1 x i64> %vcvtzu1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double>)
diff --git a/test/CodeGen/AArch64/neon-scalar-fp-compare.ll b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
new file mode 100644
index 000000000000..a6e58599acdb
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
@@ -0,0 +1,328 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+;; Scalar Floating-point Compare
+
+define i32 @test_vceqs_f32(float %a, float %b) {
+; CHECK: test_vceqs_f32
+; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vceq.i = insertelement <1 x float> undef, float %a, i32 0
+ %vceq1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vceq2.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> %vceq1.i)
+ %0 = extractelement <1 x i32> %vceq2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vceqd_f64(double %a, double %b) {
+; CHECK: test_vceqd_f64
+; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vceq.i = insertelement <1 x double> undef, double %a, i32 0
+ %vceq1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> %vceq1.i)
+ %0 = extractelement <1 x i64> %vceq2.i, i32 0
+ ret i64 %0
+}
+
+define <1 x i64> @test_vceqz_f64(<1 x double> %a) #0 {
+; CHECK: test_vceqz_f64
+; CHECK: fcmeq {{d[0-9]+}}, {{d[0-9]+}}, #0.0
+entry:
+ %0 = fcmp oeq <1 x double> %a, zeroinitializer
+ %vceqz.i = zext <1 x i1> %0 to <1 x i64>
+ ret <1 x i64> %vceqz.i
+}
+
+define i32 @test_vceqzs_f32(float %a) {
+; CHECK: test_vceqzs_f32
+; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, #0.0
+entry:
+ %vceq.i = insertelement <1 x float> undef, float %a, i32 0
+ %vceq1.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> zeroinitializer)
+ %0 = extractelement <1 x i32> %vceq1.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vceqzd_f64(double %a) {
+; CHECK: test_vceqzd_f64
+; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, #0.0
+entry:
+ %vceq.i = insertelement <1 x double> undef, double %a, i32 0
+ %vceq1.i = tail call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double> %vceq.i, <1 x float> zeroinitializer) #5
+ %0 = extractelement <1 x i64> %vceq1.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcges_f32(float %a, float %b) {
+; CHECK: test_vcges_f32
+; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcge1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
+ %0 = extractelement <1 x i32> %vcge2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcged_f64(double %a, double %b) {
+; CHECK: test_vcged_f64
+; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcge1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
+ %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcgezs_f32(float %a) {
+; CHECK: test_vcgezs_f32
+; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, #0.0
+entry:
+ %vcge.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcge1.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> zeroinitializer)
+ %0 = extractelement <1 x i32> %vcge1.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcgezd_f64(double %a) {
+; CHECK: test_vcgezd_f64
+; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, #0.0
+entry:
+ %vcge.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcge1.i = tail call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double> %vcge.i, <1 x float> zeroinitializer) #5
+ %0 = extractelement <1 x i64> %vcge1.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcgts_f32(float %a, float %b) {
+; CHECK: test_vcgts_f32
+; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcgt1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
+ %0 = extractelement <1 x i32> %vcgt2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcgtd_f64(double %a, double %b) {
+; CHECK: test_vcgtd_f64
+; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcgt1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
+ %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcgtzs_f32(float %a) {
+; CHECK: test_vcgtzs_f32
+; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, #0.0
+entry:
+ %vcgt.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcgt1.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> zeroinitializer)
+ %0 = extractelement <1 x i32> %vcgt1.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcgtzd_f64(double %a) {
+; CHECK: test_vcgtzd_f64
+; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, #0.0
+entry:
+ %vcgt.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcgt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double> %vcgt.i, <1 x float> zeroinitializer) #5
+ %0 = extractelement <1 x i64> %vcgt1.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcles_f32(float %a, float %b) {
+; CHECK: test_vcles_f32
+; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcge1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
+ %0 = extractelement <1 x i32> %vcge2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcled_f64(double %a, double %b) {
+; CHECK: test_vcled_f64
+; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcge1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
+ %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vclezs_f32(float %a) {
+; CHECK: test_vclezs_f32
+; CHECK: fcmle {{s[0-9]}}, {{s[0-9]}}, #0.0
+entry:
+ %vcle.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcle1.i = call <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float> %vcle.i, <1 x float> zeroinitializer)
+ %0 = extractelement <1 x i32> %vcle1.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vclezd_f64(double %a) {
+; CHECK: test_vclezd_f64
+; CHECK: fcmle {{d[0-9]}}, {{d[0-9]}}, #0.0
+entry:
+ %vcle.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcle1.i = tail call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double> %vcle.i, <1 x float> zeroinitializer) #5
+ %0 = extractelement <1 x i64> %vcle1.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vclts_f32(float %a, float %b) {
+; CHECK: test_vclts_f32
+; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcgt1.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
+ %0 = extractelement <1 x i32> %vcgt2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcltd_f64(double %a, double %b) {
+; CHECK: test_vcltd_f64
+; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcgt1.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
+ %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcltzs_f32(float %a) {
+; CHECK: test_vcltzs_f32
+; CHECK: fcmlt {{s[0-9]}}, {{s[0-9]}}, #0.0
+entry:
+ %vclt.i = insertelement <1 x float> undef, float %a, i32 0
+ %vclt1.i = call <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float> %vclt.i, <1 x float> zeroinitializer)
+ %0 = extractelement <1 x i32> %vclt1.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcltzd_f64(double %a) {
+; CHECK: test_vcltzd_f64
+; CHECK: fcmlt {{d[0-9]}}, {{d[0-9]}}, #0.0
+entry:
+ %vclt.i = insertelement <1 x double> undef, double %a, i32 0
+ %vclt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double> %vclt.i, <1 x float> zeroinitializer) #5
+ %0 = extractelement <1 x i64> %vclt1.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcages_f32(float %a, float %b) {
+; CHECK: test_vcages_f32
+; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcage.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcage1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
+ %0 = extractelement <1 x i32> %vcage2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcaged_f64(double %a, double %b) {
+; CHECK: test_vcaged_f64
+; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcage.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcage1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
+ %0 = extractelement <1 x i64> %vcage2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcagts_f32(float %a, float %b) {
+; CHECK: test_vcagts_f32
+; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcagt.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcagt1.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcagt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcagt.i, <1 x float> %vcagt1.i)
+ %0 = extractelement <1 x i32> %vcagt2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcagtd_f64(double %a, double %b) {
+; CHECK: test_vcagtd_f64
+; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcagt.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcagt1.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcagt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcagt.i, <1 x double> %vcagt1.i)
+ %0 = extractelement <1 x i64> %vcagt2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcales_f32(float %a, float %b) {
+; CHECK: test_vcales_f32
+; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcage.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcage1.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
+ %0 = extractelement <1 x i32> %vcage2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcaled_f64(double %a, double %b) {
+; CHECK: test_vcaled_f64
+; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcage.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcage1.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
+ %0 = extractelement <1 x i64> %vcage2.i, i32 0
+ ret i64 %0
+}
+
+define i32 @test_vcalts_f32(float %a, float %b) {
+; CHECK: test_vcalts_f32
+; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
+entry:
+ %vcalt.i = insertelement <1 x float> undef, float %b, i32 0
+ %vcalt1.i = insertelement <1 x float> undef, float %a, i32 0
+ %vcalt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcalt.i, <1 x float> %vcalt1.i)
+ %0 = extractelement <1 x i32> %vcalt2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vcaltd_f64(double %a, double %b) {
+; CHECK: test_vcaltd_f64
+; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcalt.i = insertelement <1 x double> undef, double %b, i32 0
+ %vcalt1.i = insertelement <1 x double> undef, double %a, i32 0
+ %vcalt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcalt.i, <1 x double> %vcalt1.i)
+ %0 = extractelement <1 x i64> %vcalt2.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
+declare <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
+declare <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
+declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
diff --git a/test/CodeGen/AArch64/neon-scalar-mul.ll b/test/CodeGen/AArch64/neon-scalar-mul.ll
new file mode 100644
index 000000000000..991037f6cb88
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-mul.ll
@@ -0,0 +1,143 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+define i16 @test_vqdmulhh_s16(i16 %a, i16 %b) {
+; CHECK: test_vqdmulhh_s16
+; CHECK: sqdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
+ %1 = insertelement <1 x i16> undef, i16 %a, i32 0
+ %2 = insertelement <1 x i16> undef, i16 %b, i32 0
+ %3 = call <1 x i16> @llvm.arm.neon.vqdmulh.v1i16(<1 x i16> %1, <1 x i16> %2)
+ %4 = extractelement <1 x i16> %3, i32 0
+ ret i16 %4
+}
+
+define i32 @test_vqdmulhs_s32(i32 %a, i32 %b) {
+; CHECK: test_vqdmulhs_s32
+; CHECK: sqdmulh {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ %1 = insertelement <1 x i32> undef, i32 %a, i32 0
+ %2 = insertelement <1 x i32> undef, i32 %b, i32 0
+ %3 = call <1 x i32> @llvm.arm.neon.vqdmulh.v1i32(<1 x i32> %1, <1 x i32> %2)
+ %4 = extractelement <1 x i32> %3, i32 0
+ ret i32 %4
+}
+
+declare <1 x i16> @llvm.arm.neon.vqdmulh.v1i16(<1 x i16>, <1 x i16>)
+declare <1 x i32> @llvm.arm.neon.vqdmulh.v1i32(<1 x i32>, <1 x i32>)
+
+define i16 @test_vqrdmulhh_s16(i16 %a, i16 %b) {
+; CHECK: test_vqrdmulhh_s16
+; CHECK: sqrdmulh {{h[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
+ %1 = insertelement <1 x i16> undef, i16 %a, i32 0
+ %2 = insertelement <1 x i16> undef, i16 %b, i32 0
+ %3 = call <1 x i16> @llvm.arm.neon.vqrdmulh.v1i16(<1 x i16> %1, <1 x i16> %2)
+ %4 = extractelement <1 x i16> %3, i32 0
+ ret i16 %4
+}
+
+define i32 @test_vqrdmulhs_s32(i32 %a, i32 %b) {
+; CHECK: test_vqrdmulhs_s32
+; CHECK: sqrdmulh {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ %1 = insertelement <1 x i32> undef, i32 %a, i32 0
+ %2 = insertelement <1 x i32> undef, i32 %b, i32 0
+ %3 = call <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32> %1, <1 x i32> %2)
+ %4 = extractelement <1 x i32> %3, i32 0
+ ret i32 %4
+}
+
+declare <1 x i16> @llvm.arm.neon.vqrdmulh.v1i16(<1 x i16>, <1 x i16>)
+declare <1 x i32> @llvm.arm.neon.vqrdmulh.v1i32(<1 x i32>, <1 x i32>)
+
+define float @test_vmulxs_f32(float %a, float %b) {
+; CHECK: test_vmulxs_f32
+; CHECK: fmulx {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ %1 = call float @llvm.aarch64.neon.vmulx.f32(float %a, float %b)
+ ret float %1
+}
+
+define double @test_vmulxd_f64(double %a, double %b) {
+; CHECK: test_vmulxd_f64
+; CHECK: fmulx {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ %1 = call double @llvm.aarch64.neon.vmulx.f64(double %a, double %b)
+ ret double %1
+}
+
+declare float @llvm.aarch64.neon.vmulx.f32(float, float)
+declare double @llvm.aarch64.neon.vmulx.f64(double, double)
+
+define i32 @test_vqdmlalh_s16(i32 %a, i16 %b, i16 %c) {
+; CHECK: test_vqdmlalh_s16
+; CHECK: sqdmlal {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqdmlal.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqdmlal1.i = insertelement <1 x i16> undef, i16 %b, i32 0
+ %vqdmlal2.i = insertelement <1 x i16> undef, i16 %c, i32 0
+ %vqdmlal3.i = call <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i32> %vqdmlal.i, <1 x i16> %vqdmlal1.i, <1 x i16> %vqdmlal2.i)
+ %0 = extractelement <1 x i32> %vqdmlal3.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vqdmlals_s32(i64 %a, i32 %b, i32 %c) {
+; CHECK: test_vqdmlals_s32
+; CHECK: sqdmlal {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqdmlal.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqdmlal1.i = insertelement <1 x i32> undef, i32 %b, i32 0
+ %vqdmlal2.i = insertelement <1 x i32> undef, i32 %c, i32 0
+ %vqdmlal3.i = call <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i64> %vqdmlal.i, <1 x i32> %vqdmlal1.i, <1 x i32> %vqdmlal2.i)
+ %0 = extractelement <1 x i64> %vqdmlal3.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i32>, <1 x i16>, <1 x i16>)
+declare <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i64>, <1 x i32>, <1 x i32>)
+
+define i32 @test_vqdmlslh_s16(i32 %a, i16 %b, i16 %c) {
+; CHECK: test_vqdmlslh_s16
+; CHECK: sqdmlsl {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqdmlsl.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqdmlsl1.i = insertelement <1 x i16> undef, i16 %b, i32 0
+ %vqdmlsl2.i = insertelement <1 x i16> undef, i16 %c, i32 0
+ %vqdmlsl3.i = call <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i32> %vqdmlsl.i, <1 x i16> %vqdmlsl1.i, <1 x i16> %vqdmlsl2.i)
+ %0 = extractelement <1 x i32> %vqdmlsl3.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vqdmlsls_s32(i64 %a, i32 %b, i32 %c) {
+; CHECK: test_vqdmlsls_s32
+; CHECK: sqdmlsl {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqdmlsl.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqdmlsl1.i = insertelement <1 x i32> undef, i32 %b, i32 0
+ %vqdmlsl2.i = insertelement <1 x i32> undef, i32 %c, i32 0
+ %vqdmlsl3.i = call <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i64> %vqdmlsl.i, <1 x i32> %vqdmlsl1.i, <1 x i32> %vqdmlsl2.i)
+ %0 = extractelement <1 x i64> %vqdmlsl3.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i32>, <1 x i16>, <1 x i16>)
+declare <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i64>, <1 x i32>, <1 x i32>)
+
+define i32 @test_vqdmullh_s16(i16 %a, i16 %b) {
+; CHECK: test_vqdmullh_s16
+; CHECK: sqdmull {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqdmull.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqdmull1.i = insertelement <1 x i16> undef, i16 %b, i32 0
+ %vqdmull2.i = call <1 x i32> @llvm.arm.neon.vqdmull.v1i32(<1 x i16> %vqdmull.i, <1 x i16> %vqdmull1.i)
+ %0 = extractelement <1 x i32> %vqdmull2.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vqdmulls_s32(i32 %a, i32 %b) {
+; CHECK: test_vqdmulls_s32
+; CHECK: sqdmull {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqdmull.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqdmull1.i = insertelement <1 x i32> undef, i32 %b, i32 0
+ %vqdmull2.i = call <1 x i64> @llvm.arm.neon.vqdmull.v1i64(<1 x i32> %vqdmull.i, <1 x i32> %vqdmull1.i)
+ %0 = extractelement <1 x i64> %vqdmull2.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i32> @llvm.arm.neon.vqdmull.v1i32(<1 x i16>, <1 x i16>)
+declare <1 x i64> @llvm.arm.neon.vqdmull.v1i64(<1 x i32>, <1 x i32>)
diff --git a/test/CodeGen/AArch64/neon-scalar-neg.ll b/test/CodeGen/AArch64/neon-scalar-neg.ll
new file mode 100644
index 000000000000..4dc9d519783d
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-neg.ll
@@ -0,0 +1,61 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define i64 @test_vnegd_s64(i64 %a) {
+; CHECK: test_vnegd_s64
+; CHECK: neg {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vneg.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vneg1.i = tail call <1 x i64> @llvm.aarch64.neon.vneg(<1 x i64> %vneg.i)
+ %0 = extractelement <1 x i64> %vneg1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vneg(<1 x i64>)
+
+define i8 @test_vqnegb_s8(i8 %a) {
+; CHECK: test_vqnegb_s8
+; CHECK: sqneg {{b[0-9]+}}, {{b[0-9]+}}
+entry:
+ %vqneg.i = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vqneg1.i = call <1 x i8> @llvm.arm.neon.vqneg.v1i8(<1 x i8> %vqneg.i)
+ %0 = extractelement <1 x i8> %vqneg1.i, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.arm.neon.vqneg.v1i8(<1 x i8>)
+
+define i16 @test_vqnegh_s16(i16 %a) {
+; CHECK: test_vqnegh_s16
+; CHECK: sqneg {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vqneg.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqneg1.i = call <1 x i16> @llvm.arm.neon.vqneg.v1i16(<1 x i16> %vqneg.i)
+ %0 = extractelement <1 x i16> %vqneg1.i, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.arm.neon.vqneg.v1i16(<1 x i16>)
+
+define i32 @test_vqnegs_s32(i32 %a) {
+; CHECK: test_vqnegs_s32
+; CHECK: sqneg {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vqneg.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqneg1.i = call <1 x i32> @llvm.arm.neon.vqneg.v1i32(<1 x i32> %vqneg.i)
+ %0 = extractelement <1 x i32> %vqneg1.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.arm.neon.vqneg.v1i32(<1 x i32>)
+
+define i64 @test_vqnegd_s64(i64 %a) {
+; CHECK: test_vqnegd_s64
+; CHECK: sqneg {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vqneg.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vqneg1.i = call <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64> %vqneg.i)
+ %0 = extractelement <1 x i64> %vqneg1.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.arm.neon.vqneg.v1i64(<1 x i64>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-scalar-recip.ll b/test/CodeGen/AArch64/neon-scalar-recip.ll
new file mode 100644
index 000000000000..f21c27bee435
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-recip.ll
@@ -0,0 +1,116 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+define float @test_vrecpss_f32(float %a, float %b) {
+; CHECK: test_vrecpss_f32
+; CHECK: frecps {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ %1 = insertelement <1 x float> undef, float %a, i32 0
+ %2 = insertelement <1 x float> undef, float %b, i32 0
+ %3 = call <1 x float> @llvm.arm.neon.vrecps.v1f32(<1 x float> %1, <1 x float> %2)
+ %4 = extractelement <1 x float> %3, i32 0
+ ret float %4
+}
+
+define double @test_vrecpsd_f64(double %a, double %b) {
+; CHECK: test_vrecpsd_f64
+; CHECK: frecps {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ %1 = insertelement <1 x double> undef, double %a, i32 0
+ %2 = insertelement <1 x double> undef, double %b, i32 0
+ %3 = call <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double> %1, <1 x double> %2)
+ %4 = extractelement <1 x double> %3, i32 0
+ ret double %4
+}
+
+declare <1 x float> @llvm.arm.neon.vrecps.v1f32(<1 x float>, <1 x float>)
+declare <1 x double> @llvm.arm.neon.vrecps.v1f64(<1 x double>, <1 x double>)
+
+define float @test_vrsqrtss_f32(float %a, float %b) {
+; CHECK: test_vrsqrtss_f32
+; CHECK: frsqrts {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
+ %1 = insertelement <1 x float> undef, float %a, i32 0
+ %2 = insertelement <1 x float> undef, float %b, i32 0
+ %3 = call <1 x float> @llvm.arm.neon.vrsqrts.v1f32(<1 x float> %1, <1 x float> %2)
+ %4 = extractelement <1 x float> %3, i32 0
+ ret float %4
+}
+
+define double @test_vrsqrtsd_f64(double %a, double %b) {
+; CHECK: test_vrsqrtsd_f64
+; CHECK: frsqrts {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ %1 = insertelement <1 x double> undef, double %a, i32 0
+ %2 = insertelement <1 x double> undef, double %b, i32 0
+ %3 = call <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double> %1, <1 x double> %2)
+ %4 = extractelement <1 x double> %3, i32 0
+ ret double %4
+}
+
+declare <1 x float> @llvm.arm.neon.vrsqrts.v1f32(<1 x float>, <1 x float>)
+declare <1 x double> @llvm.arm.neon.vrsqrts.v1f64(<1 x double>, <1 x double>)
+
+define float @test_vrecpes_f32(float %a) {
+; CHECK: test_vrecpes_f32
+; CHECK: frecpe {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vrecpe.i = insertelement <1 x float> undef, float %a, i32 0
+ %vrecpe1.i = tail call <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float> %vrecpe.i)
+ %0 = extractelement <1 x float> %vrecpe1.i, i32 0
+ ret float %0
+}
+
+define double @test_vrecped_f64(double %a) {
+; CHECK: test_vrecped_f64
+; CHECK: frecpe {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vrecpe.i = insertelement <1 x double> undef, double %a, i32 0
+ %vrecpe1.i = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %vrecpe.i)
+ %0 = extractelement <1 x double> %vrecpe1.i, i32 0
+ ret double %0
+}
+
+declare <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float>)
+declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>)
+
+define float @test_vrecpxs_f32(float %a) {
+; CHECK: test_vrecpxs_f32
+; CHECK: frecpx {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vrecpx.i = insertelement <1 x float> undef, float %a, i32 0
+ %vrecpx1.i = tail call <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float> %vrecpx.i)
+ %0 = extractelement <1 x float> %vrecpx1.i, i32 0
+ ret float %0
+}
+
+define double @test_vrecpxd_f64(double %a) {
+; CHECK: test_vrecpxd_f64
+; CHECK: frecpx {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vrecpx.i = insertelement <1 x double> undef, double %a, i32 0
+ %vrecpx1.i = tail call <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double> %vrecpx.i)
+ %0 = extractelement <1 x double> %vrecpx1.i, i32 0
+ ret double %0
+}
+
+declare <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float>)
+declare <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double>)
+
+define float @test_vrsqrtes_f32(float %a) {
+; CHECK: test_vrsqrtes_f32
+; CHECK: frsqrte {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vrsqrte.i = insertelement <1 x float> undef, float %a, i32 0
+ %vrsqrte1.i = tail call <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float> %vrsqrte.i)
+ %0 = extractelement <1 x float> %vrsqrte1.i, i32 0
+ ret float %0
+}
+
+define double @test_vrsqrted_f64(double %a) {
+; CHECK: test_vrsqrted_f64
+; CHECK: frsqrte {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vrsqrte.i = insertelement <1 x double> undef, double %a, i32 0
+ %vrsqrte1.i = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %vrsqrte.i)
+ %0 = extractelement <1 x double> %vrsqrte1.i, i32 0
+ ret double %0
+}
+
+declare <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float>)
+declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>)
diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
new file mode 100644
index 000000000000..80e8dc339d68
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
@@ -0,0 +1,247 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64>)
+
+define <1 x i64> @test_addp_v1i64(<2 x i64> %a) {
+; CHECK: test_addp_v1i64:
+ %val = call <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64> %a)
+; CHECK: addp d0, v0.2d
+ ret <1 x i64> %val
+}
+
+declare <1 x float> @llvm.aarch64.neon.vpfadd(<2 x float>)
+
+define <1 x float> @test_faddp_v1f32(<2 x float> %a) {
+; CHECK: test_faddp_v1f32:
+ %val = call <1 x float> @llvm.aarch64.neon.vpfadd(<2 x float> %a)
+; CHECK: faddp s0, v0.2s
+ ret <1 x float> %val
+}
+
+declare <1 x double> @llvm.aarch64.neon.vpfaddq(<2 x double>)
+
+define <1 x double> @test_faddp_v1f64(<2 x double> %a) {
+; CHECK: test_faddp_v1f64:
+ %val = call <1 x double> @llvm.aarch64.neon.vpfaddq(<2 x double> %a)
+; CHECK: faddp d0, v0.2d
+ ret <1 x double> %val
+}
+
+
+declare <1 x float> @llvm.aarch64.neon.vpmax(<2 x float>)
+
+define <1 x float> @test_fmaxp_v1f32(<2 x float> %a) {
+; CHECK: test_fmaxp_v1f32:
+ %val = call <1 x float> @llvm.aarch64.neon.vpmax(<2 x float> %a)
+; CHECK: fmaxp s0, v0.2s
+ ret <1 x float> %val
+}
+
+declare <1 x double> @llvm.aarch64.neon.vpmaxq(<2 x double>)
+
+define <1 x double> @test_fmaxp_v1f64(<2 x double> %a) {
+; CHECK: test_fmaxp_v1f64:
+ %val = call <1 x double> @llvm.aarch64.neon.vpmaxq(<2 x double> %a)
+; CHECK: fmaxp d0, v0.2d
+ ret <1 x double> %val
+}
+
+
+declare <1 x float> @llvm.aarch64.neon.vpmin(<2 x float>)
+
+define <1 x float> @test_fminp_v1f32(<2 x float> %a) {
+; CHECK: test_fminp_v1f32:
+ %val = call <1 x float> @llvm.aarch64.neon.vpmin(<2 x float> %a)
+; CHECK: fminp s0, v0.2s
+ ret <1 x float> %val
+}
+
+declare <1 x double> @llvm.aarch64.neon.vpminq(<2 x double>)
+
+define <1 x double> @test_fminp_v1f64(<2 x double> %a) {
+; CHECK: test_fminp_v1f64:
+ %val = call <1 x double> @llvm.aarch64.neon.vpminq(<2 x double> %a)
+; CHECK: fminp d0, v0.2d
+ ret <1 x double> %val
+}
+
+declare <1 x float> @llvm.aarch64.neon.vpfmaxnm(<2 x float>)
+
+define <1 x float> @test_fmaxnmp_v1f32(<2 x float> %a) {
+; CHECK: test_fmaxnmp_v1f32:
+ %val = call <1 x float> @llvm.aarch64.neon.vpfmaxnm(<2 x float> %a)
+; CHECK: fmaxnmp s0, v0.2s
+ ret <1 x float> %val
+}
+
+declare <1 x double> @llvm.aarch64.neon.vpfmaxnmq(<2 x double>)
+
+define <1 x double> @test_fmaxnmp_v1f64(<2 x double> %a) {
+; CHECK: test_fmaxnmp_v1f64:
+ %val = call <1 x double> @llvm.aarch64.neon.vpfmaxnmq(<2 x double> %a)
+; CHECK: fmaxnmp d0, v0.2d
+ ret <1 x double> %val
+}
+
+declare <1 x float> @llvm.aarch64.neon.vpfminnm(<2 x float>)
+
+define <1 x float> @test_fminnmp_v1f32(<2 x float> %a) {
+; CHECK: test_fminnmp_v1f32:
+ %val = call <1 x float> @llvm.aarch64.neon.vpfminnm(<2 x float> %a)
+; CHECK: fminnmp s0, v0.2s
+ ret <1 x float> %val
+}
+
+declare <1 x double> @llvm.aarch64.neon.vpfminnmq(<2 x double>)
+
+define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) {
+; CHECK: test_fminnmp_v1f64:
+ %val = call <1 x double> @llvm.aarch64.neon.vpfminnmq(<2 x double> %a)
+; CHECK: fminnmp d0, v0.2d
+ ret <1 x double> %val
+}
+
+define float @test_vaddv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vaddv_f32
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a)
+ %2 = extractelement <1 x float> %1, i32 0
+ ret float %2
+}
+
+define float @test_vaddvq_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vaddvq_f32
+; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a)
+ %2 = extractelement <1 x float> %1, i32 0
+ ret float %2
+}
+
+define double @test_vaddvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vaddvq_f64
+; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a)
+ %2 = extractelement <1 x double> %1, i32 0
+ ret double %2
+}
+
+define float @test_vmaxv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vmaxv_f32
+; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a)
+ %2 = extractelement <1 x float> %1, i32 0
+ ret float %2
+}
+
+define double @test_vmaxvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vmaxvq_f64
+; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a)
+ %2 = extractelement <1 x double> %1, i32 0
+ ret double %2
+}
+
+define float @test_vminv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vminv_f32
+; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a)
+ %2 = extractelement <1 x float> %1, i32 0
+ ret float %2
+}
+
+define double @test_vminvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vminvq_f64
+; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a)
+ %2 = extractelement <1 x double> %1, i32 0
+ ret double %2
+}
+
+define double @test_vmaxnmvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vmaxnmvq_f64
+; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a)
+ %2 = extractelement <1 x double> %1, i32 0
+ ret double %2
+}
+
+define float @test_vmaxnmv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vmaxnmv_f32
+; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a)
+ %2 = extractelement <1 x float> %1, i32 0
+ ret float %2
+}
+
+define double @test_vminnmvq_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vminnmvq_f64
+; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a)
+ %2 = extractelement <1 x double> %1, i32 0
+ ret double %2
+}
+
+define float @test_vminnmv_f32(<2 x float> %a) {
+; CHECK-LABEL: test_vminnmv_f32
+; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a)
+ %2 = extractelement <1 x float> %1, i32 0
+ ret float %2
+}
+
+define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vpaddq_s64
+; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %1
+}
+
+define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vpaddq_u64
+; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
+ %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+ ret <2 x i64> %1
+}
+
+define i64 @test_vaddvq_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vaddvq_s64
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+ %2 = extractelement <1 x i64> %1, i32 0
+ ret i64 %2
+}
+
+define i64 @test_vaddvq_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vaddvq_u64
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+ %2 = extractelement <1 x i64> %1, i32 0
+ ret i64 %2
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>)
+
+declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
+
+declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>)
+
+declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>)
+
+declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>)
+
+declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-scalar-rounding-shift.ll b/test/CodeGen/AArch64/neon-scalar-rounding-shift.ll
new file mode 100644
index 000000000000..83ceb4ebdad5
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-rounding-shift.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+
+declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_urshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_urshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: urshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_srshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_srshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: srshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrshldu(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vrshlds(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_urshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_urshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vrshldu(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: urshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_srshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_srshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vrshlds(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: srshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+
+
diff --git a/test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll b/test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll
new file mode 100644
index 000000000000..bd66f80cebb6
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-saturating-add-sub.ll
@@ -0,0 +1,242 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <1 x i8> @llvm.arm.neon.vqaddu.v1i8(<1 x i8>, <1 x i8>)
+declare <1 x i8> @llvm.arm.neon.vqadds.v1i8(<1 x i8>, <1 x i8>)
+
+define <1 x i8> @test_uqadd_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_uqadd_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.arm.neon.vqaddu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: uqadd {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+define <1 x i8> @test_sqadd_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_sqadd_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.arm.neon.vqadds.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: sqadd {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+declare <1 x i8> @llvm.arm.neon.vqsubu.v1i8(<1 x i8>, <1 x i8>)
+declare <1 x i8> @llvm.arm.neon.vqsubs.v1i8(<1 x i8>, <1 x i8>)
+
+define <1 x i8> @test_uqsub_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_uqsub_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.arm.neon.vqsubu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: uqsub {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+define <1 x i8> @test_sqsub_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_sqsub_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.arm.neon.vqsubs.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: sqsub {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+declare <1 x i16> @llvm.arm.neon.vqaddu.v1i16(<1 x i16>, <1 x i16>)
+declare <1 x i16> @llvm.arm.neon.vqadds.v1i16(<1 x i16>, <1 x i16>)
+
+define <1 x i16> @test_uqadd_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_uqadd_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.arm.neon.vqaddu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: uqadd {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+define <1 x i16> @test_sqadd_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_sqadd_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.arm.neon.vqadds.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: sqadd {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+declare <1 x i16> @llvm.arm.neon.vqsubu.v1i16(<1 x i16>, <1 x i16>)
+declare <1 x i16> @llvm.arm.neon.vqsubs.v1i16(<1 x i16>, <1 x i16>)
+
+define <1 x i16> @test_uqsub_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_uqsub_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.arm.neon.vqsubu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: uqsub {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+define <1 x i16> @test_sqsub_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_sqsub_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.arm.neon.vqsubs.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: sqsub {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+declare <1 x i32> @llvm.arm.neon.vqaddu.v1i32(<1 x i32>, <1 x i32>)
+declare <1 x i32> @llvm.arm.neon.vqadds.v1i32(<1 x i32>, <1 x i32>)
+
+define <1 x i32> @test_uqadd_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_uqadd_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.arm.neon.vqaddu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: uqadd {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+define <1 x i32> @test_sqadd_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_sqadd_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.arm.neon.vqadds.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: sqadd {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+declare <1 x i32> @llvm.arm.neon.vqsubu.v1i32(<1 x i32>, <1 x i32>)
+declare <1 x i32> @llvm.arm.neon.vqsubs.v1i32(<1 x i32>, <1 x i32>)
+
+define <1 x i32> @test_uqsub_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_uqsub_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.arm.neon.vqsubu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: uqsub {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+
+define <1 x i32> @test_sqsub_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_sqsub_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.arm.neon.vqsubs.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: sqsub {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+declare <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_uqadd_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uqadd_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: uqadd {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sqadd_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sqadd_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sqadd {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+declare <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_uqsub_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uqsub_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: uqsub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sqsub_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sqsub_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sqsub {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define i8 @test_vuqaddb_s8(i8 %a, i8 %b) {
+; CHECK: test_vuqaddb_s8
+; CHECK: suqadd {{b[0-9]+}}, {{b[0-9]+}}
+entry:
+ %vuqadd.i = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vuqadd1.i = insertelement <1 x i8> undef, i8 %b, i32 0
+ %vuqadd2.i = call <1 x i8> @llvm.aarch64.neon.vuqadd.v1i8(<1 x i8> %vuqadd.i, <1 x i8> %vuqadd1.i)
+ %0 = extractelement <1 x i8> %vuqadd2.i, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqadd.v1i8(<1 x i8>, <1 x i8>)
+
+define i16 @test_vuqaddh_s16(i16 %a, i16 %b) {
+; CHECK: test_vuqaddh_s16
+; CHECK: suqadd {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vuqadd.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqadd1.i = insertelement <1 x i16> undef, i16 %b, i32 0
+ %vuqadd2.i = call <1 x i16> @llvm.aarch64.neon.vuqadd.v1i16(<1 x i16> %vuqadd.i, <1 x i16> %vuqadd1.i)
+ %0 = extractelement <1 x i16> %vuqadd2.i, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqadd.v1i16(<1 x i16>, <1 x i16>)
+
+define i32 @test_vuqadds_s32(i32 %a, i32 %b) {
+; CHECK: test_vuqadds_s32
+; CHECK: suqadd {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vuqadd.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqadd1.i = insertelement <1 x i32> undef, i32 %b, i32 0
+ %vuqadd2.i = call <1 x i32> @llvm.aarch64.neon.vuqadd.v1i32(<1 x i32> %vuqadd.i, <1 x i32> %vuqadd1.i)
+ %0 = extractelement <1 x i32> %vuqadd2.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqadd.v1i32(<1 x i32>, <1 x i32>)
+
+define i64 @test_vuqaddd_s64(i64 %a, i64 %b) {
+; CHECK: test_vuqaddd_s64
+; CHECK: suqadd {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vuqadd.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqadd1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vuqadd2.i = call <1 x i64> @llvm.aarch64.neon.vuqadd.v1i64(<1 x i64> %vuqadd.i, <1 x i64> %vuqadd1.i)
+ %0 = extractelement <1 x i64> %vuqadd2.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsqadd.v1i64(<1 x i64>, <1 x i64>)
+
+define i8 @test_vsqaddb_u8(i8 %a, i8 %b) {
+; CHECK: test_vsqaddb_u8
+; CHECK: usqadd {{b[0-9]+}}, {{b[0-9]+}}
+entry:
+ %vsqadd.i = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vsqadd1.i = insertelement <1 x i8> undef, i8 %b, i32 0
+ %vsqadd2.i = call <1 x i8> @llvm.aarch64.neon.vsqadd.v1i8(<1 x i8> %vsqadd.i, <1 x i8> %vsqadd1.i)
+ %0 = extractelement <1 x i8> %vsqadd2.i, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vuqadd.v1i8(<1 x i8>, <1 x i8>)
+
+define i16 @test_vsqaddh_u16(i16 %a, i16 %b) {
+; CHECK: test_vsqaddh_u16
+; CHECK: usqadd {{h[0-9]+}}, {{h[0-9]+}}
+entry:
+ %vsqadd.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqadd1.i = insertelement <1 x i16> undef, i16 %b, i32 0
+ %vsqadd2.i = call <1 x i16> @llvm.aarch64.neon.vsqadd.v1i16(<1 x i16> %vsqadd.i, <1 x i16> %vsqadd1.i)
+ %0 = extractelement <1 x i16> %vsqadd2.i, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vuqadd.v1i16(<1 x i16>, <1 x i16>)
+
+define i32 @test_vsqadds_u32(i32 %a, i32 %b) {
+; CHECK: test_vsqadds_u32
+; CHECK: usqadd {{s[0-9]+}}, {{s[0-9]+}}
+entry:
+ %vsqadd.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqadd1.i = insertelement <1 x i32> undef, i32 %b, i32 0
+ %vsqadd2.i = call <1 x i32> @llvm.aarch64.neon.vsqadd.v1i32(<1 x i32> %vsqadd.i, <1 x i32> %vsqadd1.i)
+ %0 = extractelement <1 x i32> %vsqadd2.i, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vuqadd.v1i32(<1 x i32>, <1 x i32>)
+
+define i64 @test_vsqaddd_u64(i64 %a, i64 %b) {
+; CHECK: test_vsqaddd_u64
+; CHECK: usqadd {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+ %vsqadd.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqadd1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsqadd2.i = call <1 x i64> @llvm.aarch64.neon.vsqadd.v1i64(<1 x i64> %vsqadd.i, <1 x i64> %vsqadd1.i)
+ %0 = extractelement <1 x i64> %vsqadd2.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vuqadd.v1i64(<1 x i64>, <1 x i64>)
diff --git a/test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll b/test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll
new file mode 100644
index 000000000000..0fd67dfa901c
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-saturating-rounding-shift.ll
@@ -0,0 +1,94 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_uqrshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uqrshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: uqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sqrshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sqrshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqrshlu.v1i8(<1 x i8>, <1 x i8>)
+declare <1 x i8> @llvm.aarch64.neon.vqrshls.v1i8(<1 x i8>, <1 x i8>)
+
+define <1 x i8> @test_uqrshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_uqrshl_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqrshlu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: uqrshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+
+ ret <1 x i8> %tmp1
+}
+
+define <1 x i8> @test_sqrshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_sqrshl_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqrshls.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: sqrshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqrshlu.v1i16(<1 x i16>, <1 x i16>)
+declare <1 x i16> @llvm.aarch64.neon.vqrshls.v1i16(<1 x i16>, <1 x i16>)
+
+define <1 x i16> @test_uqrshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_uqrshl_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqrshlu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: uqrshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+
+ ret <1 x i16> %tmp1
+}
+
+define <1 x i16> @test_sqrshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_sqrshl_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqrshls.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: sqrshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqrshlu.v1i32(<1 x i32>, <1 x i32>)
+declare <1 x i32> @llvm.aarch64.neon.vqrshls.v1i32(<1 x i32>, <1 x i32>)
+
+define <1 x i32> @test_uqrshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_uqrshl_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqrshlu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: uqrshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+
+ ret <1 x i32> %tmp1
+}
+
+define <1 x i32> @test_sqrshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_sqrshl_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqrshls.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: sqrshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqrshlu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vqrshls.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_uqrshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uqrshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqrshlu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: uqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sqrshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sqrshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqrshls.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sqrshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+
+
diff --git a/test/CodeGen/AArch64/neon-scalar-saturating-shift.ll b/test/CodeGen/AArch64/neon-scalar-saturating-shift.ll
new file mode 100644
index 000000000000..8fdea24a36d7
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-saturating-shift.ll
@@ -0,0 +1,88 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+declare <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_uqshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uqshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: uqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sqshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sqshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqshlu.v1i8(<1 x i8>, <1 x i8>)
+declare <1 x i8> @llvm.aarch64.neon.vqshls.v1i8(<1 x i8>, <1 x i8>)
+
+define <1 x i8> @test_uqshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_uqshl_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqshlu.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: uqshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+define <1 x i8> @test_sqshl_v1i8_aarch64(<1 x i8> %lhs, <1 x i8> %rhs) {
+; CHECK: test_sqshl_v1i8_aarch64:
+ %tmp1 = call <1 x i8> @llvm.aarch64.neon.vqshls.v1i8(<1 x i8> %lhs, <1 x i8> %rhs)
+;CHECK: sqshl {{b[0-31]+}}, {{b[0-31]+}}, {{b[0-31]+}}
+ ret <1 x i8> %tmp1
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqshlu.v1i16(<1 x i16>, <1 x i16>)
+declare <1 x i16> @llvm.aarch64.neon.vqshls.v1i16(<1 x i16>, <1 x i16>)
+
+define <1 x i16> @test_uqshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_uqshl_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqshlu.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: uqshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+define <1 x i16> @test_sqshl_v1i16_aarch64(<1 x i16> %lhs, <1 x i16> %rhs) {
+; CHECK: test_sqshl_v1i16_aarch64:
+ %tmp1 = call <1 x i16> @llvm.aarch64.neon.vqshls.v1i16(<1 x i16> %lhs, <1 x i16> %rhs)
+;CHECK: sqshl {{h[0-31]+}}, {{h[0-31]+}}, {{h[0-31]+}}
+ ret <1 x i16> %tmp1
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqshlu.v1i32(<1 x i32>, <1 x i32>)
+declare <1 x i32> @llvm.aarch64.neon.vqshls.v1i32(<1 x i32>, <1 x i32>)
+
+define <1 x i32> @test_uqshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_uqshl_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqshlu.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: uqshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+define <1 x i32> @test_sqshl_v1i32_aarch64(<1 x i32> %lhs, <1 x i32> %rhs) {
+; CHECK: test_sqshl_v1i32_aarch64:
+ %tmp1 = call <1 x i32> @llvm.aarch64.neon.vqshls.v1i32(<1 x i32> %lhs, <1 x i32> %rhs)
+;CHECK: sqshl {{s[0-31]+}}, {{s[0-31]+}}, {{s[0-31]+}}
+ ret <1 x i32> %tmp1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqshlu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vqshls.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_uqshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_uqshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqshlu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: uqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sqshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sqshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vqshls.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+;CHECK: sqshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+
diff --git a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
new file mode 100644
index 000000000000..62243618171a
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
@@ -0,0 +1,531 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define i64 @test_vshrd_n_s64(i64 %a) {
+; CHECK: test_vshrd_n_s64
+; CHECK: sshr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsshr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsshr1 = call <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64> %vsshr, i32 63)
+ %0 = extractelement <1 x i64> %vsshr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64>, i32)
+
+define i64 @test_vshrd_n_u64(i64 %a) {
+; CHECK: test_vshrd_n_u64
+; CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vushr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vushr1 = call <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64> %vushr, i32 63)
+ %0 = extractelement <1 x i64> %vushr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64>, i32)
+
+define i64 @test_vrshrd_n_s64(i64 %a) {
+; CHECK: test_vrshrd_n_s64
+; CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsrshr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsrshr1 = call <1 x i64> @llvm.aarch64.neon.vsrshr.v1i64(<1 x i64> %vsrshr, i32 63)
+ %0 = extractelement <1 x i64> %vsrshr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsrshr.v1i64(<1 x i64>, i32)
+
+define i64 @test_vrshrd_n_u64(i64 %a) {
+; CHECK: test_vrshrd_n_u64
+; CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vurshr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vurshr1 = call <1 x i64> @llvm.aarch64.neon.vurshr.v1i64(<1 x i64> %vurshr, i32 63)
+ %0 = extractelement <1 x i64> %vurshr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vurshr.v1i64(<1 x i64>, i32)
+
+define i64 @test_vsrad_n_s64(i64 %a, i64 %b) {
+; CHECK: test_vsrad_n_s64
+; CHECK: ssra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vssra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vssra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vssra2 = call <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64> %vssra, <1 x i64> %vssra1, i32 63)
+ %0 = extractelement <1 x i64> %vssra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vsrad_n_u64(i64 %a, i64 %b) {
+; CHECK: test_vsrad_n_u64
+; CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vusra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vusra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vusra2 = call <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64> %vusra, <1 x i64> %vusra1, i32 63)
+ %0 = extractelement <1 x i64> %vusra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vrsrad_n_s64(i64 %a, i64 %b) {
+; CHECK: test_vrsrad_n_s64
+; CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsrsra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsrsra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsrsra2 = call <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64> %vsrsra, <1 x i64> %vsrsra1, i32 63)
+ %0 = extractelement <1 x i64> %vsrsra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vrsrad_n_u64(i64 %a, i64 %b) {
+; CHECK: test_vrsrad_n_u64
+; CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vursra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vursra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vursra2 = call <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64> %vursra, <1 x i64> %vursra1, i32 63)
+ %0 = extractelement <1 x i64> %vursra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vshld_n_s64(i64 %a) {
+; CHECK: test_vshld_n_s64
+; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
+ %0 = extractelement <1 x i64> %vshl1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64>, i32)
+
+define i64 @test_vshld_n_u64(i64 %a) {
+; CHECK: test_vshld_n_u64
+; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
+ %0 = extractelement <1 x i64> %vshl1, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vqshlb_n_s8(i8 %a) {
+; CHECK: test_vqshlb_n_s8
+; CHECK: sqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
+entry:
+ %vsqshl = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vsqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8> %vsqshl, i32 7)
+ %0 = extractelement <1 x i8> %vsqshl1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8>, i32)
+
+define i16 @test_vqshlh_n_s16(i16 %a) {
+; CHECK: test_vqshlh_n_s16
+; CHECK: sqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqshl = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16> %vsqshl, i32 15)
+ %0 = extractelement <1 x i16> %vsqshl1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16>, i32)
+
+define i32 @test_vqshls_n_s32(i32 %a) {
+; CHECK: test_vqshls_n_s32
+; CHECK: sqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqshl = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32> %vsqshl, i32 31)
+ %0 = extractelement <1 x i32> %vsqshl1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32>, i32)
+
+define i64 @test_vqshld_n_s64(i64 %a) {
+; CHECK: test_vqshld_n_s64
+; CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64> %vsqshl, i32 63)
+ %0 = extractelement <1 x i64> %vsqshl1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64>, i32)
+
+define i8 @test_vqshlb_n_u8(i8 %a) {
+; CHECK: test_vqshlb_n_u8
+; CHECK: uqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
+entry:
+ %vuqshl = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vuqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8> %vuqshl, i32 7)
+ %0 = extractelement <1 x i8> %vuqshl1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8>, i32)
+
+define i16 @test_vqshlh_n_u16(i16 %a) {
+; CHECK: test_vqshlh_n_u16
+; CHECK: uqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vuqshl = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16> %vuqshl, i32 15)
+ %0 = extractelement <1 x i16> %vuqshl1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16>, i32)
+
+define i32 @test_vqshls_n_u32(i32 %a) {
+; CHECK: test_vqshls_n_u32
+; CHECK: uqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vuqshl = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32> %vuqshl, i32 31)
+ %0 = extractelement <1 x i32> %vuqshl1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32>, i32)
+
+define i64 @test_vqshld_n_u64(i64 %a) {
+; CHECK: test_vqshld_n_u64
+; CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vuqshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64> %vuqshl, i32 63)
+ %0 = extractelement <1 x i64> %vuqshl1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64>, i32)
+
+define i8 @test_vqshlub_n_s8(i8 %a) {
+; CHECK: test_vqshlub_n_s8
+; CHECK: sqshlu {{b[0-9]+}}, {{b[0-9]+}}, #7
+entry:
+ %vsqshlu = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vsqshlu1 = call <1 x i8> @llvm.aarch64.neon.vsqshlu.v1i8(<1 x i8> %vsqshlu, i32 7)
+ %0 = extractelement <1 x i8> %vsqshlu1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqshlu.v1i8(<1 x i8>, i32)
+
+define i16 @test_vqshluh_n_s16(i16 %a) {
+; CHECK: test_vqshluh_n_s16
+; CHECK: sqshlu {{h[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqshlu = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshlu1 = call <1 x i16> @llvm.aarch64.neon.vsqshlu.v1i16(<1 x i16> %vsqshlu, i32 15)
+ %0 = extractelement <1 x i16> %vsqshlu1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqshlu.v1i16(<1 x i16>, i32)
+
+define i32 @test_vqshlus_n_s32(i32 %a) {
+; CHECK: test_vqshlus_n_s32
+; CHECK: sqshlu {{s[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqshlu = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshlu1 = call <1 x i32> @llvm.aarch64.neon.vsqshlu.v1i32(<1 x i32> %vsqshlu, i32 31)
+ %0 = extractelement <1 x i32> %vsqshlu1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqshlu.v1i32(<1 x i32>, i32)
+
+define i64 @test_vqshlud_n_s64(i64 %a) {
+; CHECK: test_vqshlud_n_s64
+; CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqshlu = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshlu1 = call <1 x i64> @llvm.aarch64.neon.vsqshlu.v1i64(<1 x i64> %vsqshlu, i32 63)
+ %0 = extractelement <1 x i64> %vsqshlu1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsqshlu.v1i64(<1 x i64>, i32)
+
+define i64 @test_vsrid_n_s64(i64 %a, i64 %b) {
+; CHECK: test_vsrid_n_s64
+; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
+ %0 = extractelement <1 x i64> %vsri2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vsrid_n_u64(i64 %a, i64 %b) {
+; CHECK: test_vsrid_n_u64
+; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
+ %0 = extractelement <1 x i64> %vsri2, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vslid_n_s64(i64 %a, i64 %b) {
+; CHECK: test_vslid_n_s64
+; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsli2 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
+ %0 = extractelement <1 x i64> %vsli2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vslid_n_u64(i64 %a, i64 %b) {
+; CHECK: test_vslid_n_u64
+; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsli2 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
+ %0 = extractelement <1 x i64> %vsli2, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vqshrnh_n_s16(i16 %a) {
+; CHECK: test_vqshrnh_n_s16
+; CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
+entry:
+ %vsqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16> %vsqshrn, i32 8)
+ %0 = extractelement <1 x i8> %vsqshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqshrns_n_s32(i32 %a) {
+; CHECK: test_vqshrns_n_s32
+; CHECK: sqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
+entry:
+ %vsqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32> %vsqshrn, i32 16)
+ %0 = extractelement <1 x i16> %vsqshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqshrnd_n_s64(i64 %a) {
+; CHECK: test_vqshrnd_n_s64
+; CHECK: sqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
+entry:
+ %vsqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64> %vsqshrn, i32 32)
+ %0 = extractelement <1 x i32> %vsqshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqshrnh_n_u16(i16 %a) {
+; CHECK: test_vqshrnh_n_u16
+; CHECK: uqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
+entry:
+ %vuqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16> %vuqshrn, i32 8)
+ %0 = extractelement <1 x i8> %vuqshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqshrns_n_u32(i32 %a) {
+; CHECK: test_vqshrns_n_u32
+; CHECK: uqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
+entry:
+ %vuqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32> %vuqshrn, i32 16)
+ %0 = extractelement <1 x i16> %vuqshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqshrnd_n_u64(i64 %a) {
+; CHECK: test_vqshrnd_n_u64
+; CHECK: uqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
+entry:
+ %vuqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64> %vuqshrn, i32 32)
+ %0 = extractelement <1 x i32> %vuqshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqrshrnh_n_s16(i16 %a) {
+; CHECK: test_vqrshrnh_n_s16
+; CHECK: sqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
+entry:
+ %vsqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16> %vsqrshrn, i32 8)
+ %0 = extractelement <1 x i8> %vsqrshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqrshrns_n_s32(i32 %a) {
+; CHECK: test_vqrshrns_n_s32
+; CHECK: sqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
+entry:
+ %vsqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32> %vsqrshrn, i32 16)
+ %0 = extractelement <1 x i16> %vsqrshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqrshrnd_n_s64(i64 %a) {
+; CHECK: test_vqrshrnd_n_s64
+; CHECK: sqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
+entry:
+ %vsqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64> %vsqrshrn, i32 32)
+ %0 = extractelement <1 x i32> %vsqrshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqrshrnh_n_u16(i16 %a) {
+; CHECK: test_vqrshrnh_n_u16
+; CHECK: uqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
+entry:
+ %vuqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16> %vuqrshrn, i32 8)
+ %0 = extractelement <1 x i8> %vuqrshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqrshrns_n_u32(i32 %a) {
+; CHECK: test_vqrshrns_n_u32
+; CHECK: uqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
+entry:
+ %vuqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %vuqrshrn, i32 16)
+ %0 = extractelement <1 x i16> %vuqrshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqrshrnd_n_u64(i64 %a) {
+; CHECK: test_vqrshrnd_n_u64
+; CHECK: uqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
+entry:
+ %vuqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64> %vuqrshrn, i32 32)
+ %0 = extractelement <1 x i32> %vuqrshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqshrunh_n_s16(i16 %a) {
+; CHECK: test_vqshrunh_n_s16
+; CHECK: sqshrun {{b[0-9]+}}, {{h[0-9]+}}, #8
+entry:
+ %vsqshrun = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16> %vsqshrun, i32 8)
+ %0 = extractelement <1 x i8> %vsqshrun1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqshruns_n_s32(i32 %a) {
+; CHECK: test_vqshruns_n_s32
+; CHECK: sqshrun {{h[0-9]+}}, {{s[0-9]+}}, #16
+entry:
+ %vsqshrun = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32> %vsqshrun, i32 16)
+ %0 = extractelement <1 x i16> %vsqshrun1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqshrund_n_s64(i64 %a) {
+; CHECK: test_vqshrund_n_s64
+; CHECK: sqshrun {{s[0-9]+}}, {{d[0-9]+}}, #32
+entry:
+ %vsqshrun = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64> %vsqshrun, i32 32)
+ %0 = extractelement <1 x i32> %vsqshrun1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqrshrunh_n_s16(i16 %a) {
+; CHECK: test_vqrshrunh_n_s16
+; CHECK: sqrshrun {{b[0-9]+}}, {{h[0-9]+}}, #8
+entry:
+ %vsqrshrun = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqrshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16> %vsqrshrun, i32 8)
+ %0 = extractelement <1 x i8> %vsqrshrun1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqrshruns_n_s32(i32 %a) {
+; CHECK: test_vqrshruns_n_s32
+; CHECK: sqrshrun {{h[0-9]+}}, {{s[0-9]+}}, #16
+entry:
+ %vsqrshrun = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqrshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32> %vsqrshrun, i32 16)
+ %0 = extractelement <1 x i16> %vsqrshrun1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqrshrund_n_s64(i64 %a) {
+; CHECK: test_vqrshrund_n_s64
+; CHECK: sqrshrun {{s[0-9]+}}, {{d[0-9]+}}, #32
+entry:
+ %vsqrshrun = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqrshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64> %vsqrshrun, i32 32)
+ %0 = extractelement <1 x i32> %vsqrshrun1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64>, i32)
diff --git a/test/CodeGen/AArch64/neon-scalar-shift.ll b/test/CodeGen/AArch64/neon-scalar-shift.ll
new file mode 100644
index 000000000000..1222be50cf4b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-shift.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_ushl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_ushl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+; CHECK: ushl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sshl_v1i64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sshl_v1i64:
+ %tmp1 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %lhs, <1 x i64> %rhs)
+; CHECK: sshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshldu(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vshlds(<1 x i64>, <1 x i64>)
+
+define <1 x i64> @test_ushl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_ushl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vshldu(<1 x i64> %lhs, <1 x i64> %rhs)
+; CHECK: ushl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+define <1 x i64> @test_sshl_v1i64_aarch64(<1 x i64> %lhs, <1 x i64> %rhs) {
+; CHECK: test_sshl_v1i64_aarch64:
+ %tmp1 = call <1 x i64> @llvm.aarch64.neon.vshlds(<1 x i64> %lhs, <1 x i64> %rhs)
+; CHECK: sshl {{d[0-31]+}}, {{d[0-31]+}}, {{d[0-31]+}}
+ ret <1 x i64> %tmp1
+}
+
+
diff --git a/test/CodeGen/AArch64/neon-shift-left-long.ll b/test/CodeGen/AArch64/neon-shift-left-long.ll
new file mode 100644
index 000000000000..d45c47685b0f
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-shift-left-long.ll
@@ -0,0 +1,193 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i16> @test_sshll_v8i8(<8 x i8> %a) {
+; CHECK: test_sshll_v8i8:
+; CHECK: sshll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #3
+ %1 = sext <8 x i8> %a to <8 x i16>
+ %tmp = shl <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_sshll_v4i16(<4 x i16> %a) {
+; CHECK: test_sshll_v4i16:
+; CHECK: sshll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #9
+ %1 = sext <4 x i16> %a to <4 x i32>
+ %tmp = shl <4 x i32> %1, <i32 9, i32 9, i32 9, i32 9>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_sshll_v2i32(<2 x i32> %a) {
+; CHECK: test_sshll_v2i32:
+; CHECK: sshll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #19
+ %1 = sext <2 x i32> %a to <2 x i64>
+ %tmp = shl <2 x i64> %1, <i64 19, i64 19>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_ushll_v8i8(<8 x i8> %a) {
+; CHECK: test_ushll_v8i8:
+; CHECK: ushll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #3
+ %1 = zext <8 x i8> %a to <8 x i16>
+ %tmp = shl <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_ushll_v4i16(<4 x i16> %a) {
+; CHECK: test_ushll_v4i16:
+; CHECK: ushll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #9
+ %1 = zext <4 x i16> %a to <4 x i32>
+ %tmp = shl <4 x i32> %1, <i32 9, i32 9, i32 9, i32 9>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_ushll_v2i32(<2 x i32> %a) {
+; CHECK: test_ushll_v2i32:
+; CHECK: ushll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #19
+ %1 = zext <2 x i32> %a to <2 x i64>
+ %tmp = shl <2 x i64> %1, <i64 19, i64 19>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_sshll2_v16i8(<16 x i8> %a) {
+; CHECK: test_sshll2_v16i8:
+; CHECK: sshll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #3
+ %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = sext <8 x i8> %1 to <8 x i16>
+ %tmp = shl <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_sshll2_v8i16(<8 x i16> %a) {
+; CHECK: test_sshll2_v8i16:
+; CHECK: sshll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #9
+ %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %2 = sext <4 x i16> %1 to <4 x i32>
+ %tmp = shl <4 x i32> %2, <i32 9, i32 9, i32 9, i32 9>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_sshll2_v4i32(<4 x i32> %a) {
+; CHECK: test_sshll2_v4i32:
+; CHECK: sshll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #19
+ %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %2 = sext <2 x i32> %1 to <2 x i64>
+ %tmp = shl <2 x i64> %2, <i64 19, i64 19>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_ushll2_v16i8(<16 x i8> %a) {
+; CHECK: test_ushll2_v16i8:
+; CHECK: ushll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #3
+ %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %2 = zext <8 x i8> %1 to <8 x i16>
+ %tmp = shl <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_ushll2_v8i16(<8 x i16> %a) {
+; CHECK: test_ushll2_v8i16:
+; CHECK: ushll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #9
+ %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %2 = zext <4 x i16> %1 to <4 x i32>
+ %tmp = shl <4 x i32> %2, <i32 9, i32 9, i32 9, i32 9>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_ushll2_v4i32(<4 x i32> %a) {
+; CHECK: test_ushll2_v4i32:
+; CHECK: ushll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #19
+ %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %2 = zext <2 x i32> %1 to <2 x i64>
+ %tmp = shl <2 x i64> %2, <i64 19, i64 19>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_sshll_shl0_v8i8(<8 x i8> %a) {
+; CHECK: test_sshll_shl0_v8i8:
+; CHECK: sshll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #0
+ %tmp = sext <8 x i8> %a to <8 x i16>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_sshll_shl0_v4i16(<4 x i16> %a) {
+; CHECK: test_sshll_shl0_v4i16:
+; CHECK: sshll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #0
+ %tmp = sext <4 x i16> %a to <4 x i32>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_sshll_shl0_v2i32(<2 x i32> %a) {
+; CHECK: test_sshll_shl0_v2i32:
+; CHECK: sshll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #0
+ %tmp = sext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_ushll_shl0_v8i8(<8 x i8> %a) {
+; CHECK: test_ushll_shl0_v8i8:
+; CHECK: ushll {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, #0
+ %tmp = zext <8 x i8> %a to <8 x i16>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_ushll_shl0_v4i16(<4 x i16> %a) {
+; CHECK: test_ushll_shl0_v4i16:
+; CHECK: ushll {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, #0
+ %tmp = zext <4 x i16> %a to <4 x i32>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_ushll_shl0_v2i32(<2 x i32> %a) {
+; CHECK: test_ushll_shl0_v2i32:
+; CHECK: ushll {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, #0
+ %tmp = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_sshll2_shl0_v16i8(<16 x i8> %a) {
+; CHECK: test_sshll2_shl0_v16i8:
+; CHECK: sshll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #0
+ %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp = sext <8 x i8> %1 to <8 x i16>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_sshll2_shl0_v8i16(<8 x i16> %a) {
+; CHECK: test_sshll2_shl0_v8i16:
+; CHECK: sshll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #0
+ %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp = sext <4 x i16> %1 to <4 x i32>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_sshll2_shl0_v4i32(<4 x i32> %a) {
+; CHECK: test_sshll2_shl0_v4i32:
+; CHECK: sshll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #0
+ %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp = sext <2 x i32> %1 to <2 x i64>
+ ret <2 x i64> %tmp
+}
+
+define <8 x i16> @test_ushll2_shl0_v16i8(<16 x i8> %a) {
+; CHECK: test_ushll2_shl0_v16i8:
+; CHECK: ushll2 {{v[0-9]+}}.8h, {{v[0-9]+}}.16b, #0
+ %1 = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %tmp = zext <8 x i8> %1 to <8 x i16>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_ushll2_shl0_v8i16(<8 x i16> %a) {
+; CHECK: test_ushll2_shl0_v8i16:
+; CHECK: ushll2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, #0
+ %1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %tmp = zext <4 x i16> %1 to <4 x i32>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_ushll2_shl0_v4i32(<4 x i32> %a) {
+; CHECK: test_ushll2_shl0_v4i32:
+; CHECK: ushll2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, #0
+ %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ %tmp = zext <2 x i32> %1 to <2 x i64>
+ ret <2 x i64> %tmp
+}
diff --git a/test/CodeGen/AArch64/neon-shift.ll b/test/CodeGen/AArch64/neon-shift.ll
new file mode 100644
index 000000000000..33b04ceb4895
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-shift.ll
@@ -0,0 +1,171 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8>, <8 x i8>)
+declare <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8>, <8 x i8>)
+
+define <8 x i8> @test_uqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_uqshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: ushl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+define <8 x i8> @test_sqshl_v8i8(<8 x i8> %lhs, <8 x i8> %rhs) {
+; CHECK: test_sqshl_v8i8:
+ %tmp1 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %lhs, <8 x i8> %rhs)
+; CHECK: sshl v0.8b, v0.8b, v1.8b
+ ret <8 x i8> %tmp1
+}
+
+declare <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8>, <16 x i8>)
+
+define <16 x i8> @test_ushl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_ushl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: ushl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+define <16 x i8> @test_sshl_v16i8(<16 x i8> %lhs, <16 x i8> %rhs) {
+; CHECK: test_sshl_v16i8:
+ %tmp1 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %lhs, <16 x i8> %rhs)
+; CHECK: sshl v0.16b, v0.16b, v1.16b
+ ret <16 x i8> %tmp1
+}
+
+declare <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16>, <4 x i16>)
+declare <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16>, <4 x i16>)
+
+define <4 x i16> @test_ushl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_ushl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: ushl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+define <4 x i16> @test_sshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; CHECK: test_sshl_v4i16:
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+; CHECK: sshl v0.4h, v0.4h, v1.4h
+ ret <4 x i16> %tmp1
+}
+
+declare <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16>, <8 x i16>)
+declare <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16>, <8 x i16>)
+
+define <8 x i16> @test_ushl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_ushl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: ushl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+define <8 x i16> @test_sshl_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; CHECK: test_sshl_v8i16:
+ %tmp1 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+; CHECK: sshl v0.8h, v0.8h, v1.8h
+ ret <8 x i16> %tmp1
+}
+
+declare <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32>, <2 x i32>)
+declare <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32>, <2 x i32>)
+
+define <2 x i32> @test_ushl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_ushl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: ushl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+define <2 x i32> @test_sshl_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; CHECK: test_sshl_v2i32:
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+; CHECK: sshl v0.2s, v0.2s, v1.2s
+ ret <2 x i32> %tmp1
+}
+
+declare <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32>, <4 x i32>)
+
+define <4 x i32> @test_ushl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_ushl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: ushl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+define <4 x i32> @test_sshl_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; CHECK: test_sshl_v4i32:
+ %tmp1 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+; CHECK: sshl v0.4s, v0.4s, v1.4s
+ ret <4 x i32> %tmp1
+}
+
+declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>)
+declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>)
+
+define <2 x i64> @test_ushl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_ushl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: ushl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+define <2 x i64> @test_sshl_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; CHECK: test_sshl_v2i64:
+ %tmp1 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+; CHECK: sshl v0.2d, v0.2d, v1.2d
+ ret <2 x i64> %tmp1
+}
+
+
+define <8 x i8> @test_shl_v8i8(<8 x i8> %a) {
+; CHECK: test_shl_v8i8:
+; CHECK: shl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %tmp = shl <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <8 x i8> %tmp
+}
+
+define <4 x i16> @test_shl_v4i16(<4 x i16> %a) {
+; CHECK: test_shl_v4i16:
+; CHECK: shl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %tmp = shl <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
+ ret <4 x i16> %tmp
+}
+
+define <2 x i32> @test_shl_v2i32(<2 x i32> %a) {
+; CHECK: test_shl_v2i32:
+; CHECK: shl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %tmp = shl <2 x i32> %a, <i32 3, i32 3>
+ ret <2 x i32> %tmp
+}
+
+define <16 x i8> @test_shl_v16i8(<16 x i8> %a) {
+; CHECK: test_shl_v16i8:
+; CHECK: shl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %tmp = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %tmp
+}
+
+define <8 x i16> @test_shl_v8i16(<8 x i16> %a) {
+; CHECK: test_shl_v8i16:
+; CHECK: shl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %tmp = shl <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %tmp
+}
+
+define <4 x i32> @test_shl_v4i32(<4 x i32> %a) {
+; CHECK: test_shl_v4i32:
+; CHECK: shl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %tmp = shl <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %tmp
+}
+
+define <2 x i64> @test_shl_v2i64(<2 x i64> %a) {
+; CHECK: test_shl_v2i64:
+; CHECK: shl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #63
+ %tmp = shl <2 x i64> %a, <i64 63, i64 63>
+ ret <2 x i64> %tmp
+}
+
diff --git a/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
new file mode 100644
index 000000000000..d5557c0c8562
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
@@ -0,0 +1,2314 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define void @test_ldst1_v16i8(<16 x i8>* %ptr, <16 x i8>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v16i8:
+; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
+ %tmp = load <16 x i8>* %ptr
+ store <16 x i8> %tmp, <16 x i8>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v8i16(<8 x i16>* %ptr, <8 x i16>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v8i16:
+; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
+ %tmp = load <8 x i16>* %ptr
+ store <8 x i16> %tmp, <8 x i16>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v4i32(<4 x i32>* %ptr, <4 x i32>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v4i32:
+; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %tmp = load <4 x i32>* %ptr
+ store <4 x i32> %tmp, <4 x i32>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v2i64(<2 x i64>* %ptr, <2 x i64>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v2i64:
+; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %tmp = load <2 x i64>* %ptr
+ store <2 x i64> %tmp, <2 x i64>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v8i8(<8 x i8>* %ptr, <8 x i8>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v8i8:
+; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+ %tmp = load <8 x i8>* %ptr
+ store <8 x i8> %tmp, <8 x i8>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v4i16(<4 x i16>* %ptr, <4 x i16>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v4i16:
+; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+ %tmp = load <4 x i16>* %ptr
+ store <4 x i16> %tmp, <4 x i16>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v2i32(<2 x i32>* %ptr, <2 x i32>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v2i32:
+; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %tmp = load <2 x i32>* %ptr
+ store <2 x i32> %tmp, <2 x i32>* %ptr2
+ ret void
+}
+
+define void @test_ldst1_v1i64(<1 x i64>* %ptr, <1 x i64>* %ptr2) {
+; CHECK-LABEL: test_ldst1_v1i64:
+; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+; CHECK: st1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %tmp = load <1 x i64>* %ptr
+ store <1 x i64> %tmp, <1 x i64>* %ptr2
+ ret void
+}
+
+%struct.int8x16x2_t = type { [2 x <16 x i8>] }
+%struct.int16x8x2_t = type { [2 x <8 x i16>] }
+%struct.int32x4x2_t = type { [2 x <4 x i32>] }
+%struct.int64x2x2_t = type { [2 x <2 x i64>] }
+%struct.float32x4x2_t = type { [2 x <4 x float>] }
+%struct.float64x2x2_t = type { [2 x <2 x double>] }
+%struct.int8x8x2_t = type { [2 x <8 x i8>] }
+%struct.int16x4x2_t = type { [2 x <4 x i16>] }
+%struct.int32x2x2_t = type { [2 x <2 x i32>] }
+%struct.int64x1x2_t = type { [2 x <1 x i64>] }
+%struct.float32x2x2_t = type { [2 x <2 x float>] }
+%struct.float64x1x2_t = type { [2 x <1 x double>] }
+%struct.int8x16x3_t = type { [3 x <16 x i8>] }
+%struct.int16x8x3_t = type { [3 x <8 x i16>] }
+%struct.int32x4x3_t = type { [3 x <4 x i32>] }
+%struct.int64x2x3_t = type { [3 x <2 x i64>] }
+%struct.float32x4x3_t = type { [3 x <4 x float>] }
+%struct.float64x2x3_t = type { [3 x <2 x double>] }
+%struct.int8x8x3_t = type { [3 x <8 x i8>] }
+%struct.int16x4x3_t = type { [3 x <4 x i16>] }
+%struct.int32x2x3_t = type { [3 x <2 x i32>] }
+%struct.int64x1x3_t = type { [3 x <1 x i64>] }
+%struct.float32x2x3_t = type { [3 x <2 x float>] }
+%struct.float64x1x3_t = type { [3 x <1 x double>] }
+%struct.int8x16x4_t = type { [4 x <16 x i8>] }
+%struct.int16x8x4_t = type { [4 x <8 x i16>] }
+%struct.int32x4x4_t = type { [4 x <4 x i32>] }
+%struct.int64x2x4_t = type { [4 x <2 x i64>] }
+%struct.float32x4x4_t = type { [4 x <4 x float>] }
+%struct.float64x2x4_t = type { [4 x <2 x double>] }
+%struct.int8x8x4_t = type { [4 x <8 x i8>] }
+%struct.int16x4x4_t = type { [4 x <4 x i16>] }
+%struct.int32x2x4_t = type { [4 x <2 x i32>] }
+%struct.int64x1x4_t = type { [4 x <1 x i64>] }
+%struct.float32x2x4_t = type { [4 x <2 x float>] }
+%struct.float64x1x4_t = type { [4 x <1 x double>] }
+
+
+define <16 x i8> @test_vld1q_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld1q_s8
+; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
+ %vld1 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %a, i32 1)
+ ret <16 x i8> %vld1
+}
+
+define <8 x i16> @test_vld1q_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld1q_s16
+; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld1 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %1, i32 2)
+ ret <8 x i16> %vld1
+}
+
+define <4 x i32> @test_vld1q_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld1q_s32
+; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld1 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %1, i32 4)
+ ret <4 x i32> %vld1
+}
+
+define <2 x i64> @test_vld1q_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld1q_s64
+; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld1 = tail call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %1, i32 8)
+ ret <2 x i64> %vld1
+}
+
+define <4 x float> @test_vld1q_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld1q_f32
+; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %1, i32 4)
+ ret <4 x float> %vld1
+}
+
+define <2 x double> @test_vld1q_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld1q_f64
+; CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld1 = tail call <2 x double> @llvm.arm.neon.vld1.v2f64(i8* %1, i32 8)
+ ret <2 x double> %vld1
+}
+
+define <8 x i8> @test_vld1_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld1_s8
+; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+ %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1)
+ ret <8 x i8> %vld1
+}
+
+define <4 x i16> @test_vld1_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld1_s16
+; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2)
+ ret <4 x i16> %vld1
+}
+
+define <2 x i32> @test_vld1_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld1_s32
+; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld1 = tail call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %1, i32 4)
+ ret <2 x i32> %vld1
+}
+
+define <1 x i64> @test_vld1_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld1_s64
+; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld1 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %1, i32 8)
+ ret <1 x i64> %vld1
+}
+
+define <2 x float> @test_vld1_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld1_f32
+; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %1, i32 4)
+ ret <2 x float> %vld1
+}
+
+define <1 x double> @test_vld1_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld1_f64
+; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld1 = tail call <1 x double> @llvm.arm.neon.vld1.v1f64(i8* %1, i32 8)
+ ret <1 x double> %vld1
+}
+
+define <8 x i8> @test_vld1_p8(i8* readonly %a) {
+; CHECK-LABEL: test_vld1_p8
+; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+ %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1)
+ ret <8 x i8> %vld1
+}
+
+define <4 x i16> @test_vld1_p16(i16* readonly %a) {
+; CHECK-LABEL: test_vld1_p16
+; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2)
+ ret <4 x i16> %vld1
+}
+
+define %struct.int8x16x2_t @test_vld2q_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld2q_s8
+; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
+ %vld2 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %a, i32 1)
+ %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vld2.fca.1.extract, 0, 1
+ ret %struct.int8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x8x2_t @test_vld2q_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld2q_s16
+; CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld2 = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8* %1, i32 2)
+ %vld2.fca.0.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vld2.fca.1.extract, 0, 1
+ ret %struct.int16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x4x2_t @test_vld2q_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld2q_s32
+; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld2 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8* %1, i32 4)
+ %vld2.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vld2.fca.1.extract, 0, 1
+ ret %struct.int32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int64x2x2_t @test_vld2q_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld2q_s64
+; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld2 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8* %1, i32 8)
+ %vld2.fca.0.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %vld2.fca.1.extract, 0, 1
+ ret %struct.int64x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x4x2_t @test_vld2q_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld2q_f32
+; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
+ %vld2.fca.0.extract = extractvalue { <4 x float>, <4 x float> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <4 x float>, <4 x float> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vld2.fca.1.extract, 0, 1
+ ret %struct.float32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.float64x2x2_t @test_vld2q_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld2q_f64
+; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld2 = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8* %1, i32 8)
+ %vld2.fca.0.extract = extractvalue { <2 x double>, <2 x double> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <2 x double>, <2 x double> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %vld2.fca.1.extract, 0, 1
+ ret %struct.float64x2x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x8x2_t @test_vld2_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld2_s8
+; CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+ %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8* %a, i32 1)
+ %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vld2.fca.1.extract, 0, 1
+ ret %struct.int8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x4x2_t @test_vld2_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld2_s16
+; CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld2 = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8* %1, i32 2)
+ %vld2.fca.0.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vld2.fca.1.extract, 0, 1
+ ret %struct.int16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x2x2_t @test_vld2_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld2_s32
+; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld2 = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8* %1, i32 4)
+ %vld2.fca.0.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vld2.fca.1.extract, 0, 1
+ ret %struct.int32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.int64x1x2_t @test_vld2_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld2_s64
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld2 = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8* %1, i32 8)
+ %vld2.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld2.fca.1.extract, 0, 1
+ ret %struct.int64x1x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x2x2_t @test_vld2_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld2_f32
+; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld2 = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %1, i32 4)
+ %vld2.fca.0.extract = extractvalue { <2 x float>, <2 x float> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <2 x float>, <2 x float> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vld2.fca.1.extract, 0, 1
+ ret %struct.float32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float64x1x2_t @test_vld2_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld2_f64
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld2 = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8* %1, i32 8)
+ %vld2.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld2, 1
+ %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld2.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld2.fca.1.extract, 0, 1
+ ret %struct.float64x1x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x16x3_t @test_vld3q_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld3q_s8
+; CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
+ %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %a, i32 1)
+ %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int8x16x3_t undef, <16 x i8> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x3_t %.fca.0.0.insert, <16 x i8> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x16x3_t %.fca.0.1.insert, <16 x i8> %vld3.fca.2.extract, 0, 2
+ ret %struct.int8x16x3_t %.fca.0.2.insert
+}
+
+define %struct.int16x8x3_t @test_vld3q_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld3q_s16
+; CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8* %1, i32 2)
+ %vld3.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %vld3.fca.2.extract, 0, 2
+ ret %struct.int16x8x3_t %.fca.0.2.insert
+}
+
+define %struct.int32x4x3_t @test_vld3q_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld3q_s32
+; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %1, i32 4)
+ %vld3.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %vld3.fca.2.extract, 0, 2
+ ret %struct.int32x4x3_t %.fca.0.2.insert
+}
+
+define %struct.int64x2x3_t @test_vld3q_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld3q_s64
+; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8* %1, i32 8)
+ %vld3.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %vld3.fca.2.extract, 0, 2
+ ret %struct.int64x2x3_t %.fca.0.2.insert
+}
+
+define %struct.float32x4x3_t @test_vld3q_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld3q_f32
+; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8* %1, i32 4)
+ %vld3.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %vld3.fca.2.extract, 0, 2
+ ret %struct.float32x4x3_t %.fca.0.2.insert
+}
+
+define %struct.float64x2x3_t @test_vld3q_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld3q_f64
+; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8* %1, i32 8)
+ %vld3.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %vld3.fca.2.extract, 0, 2
+ ret %struct.float64x2x3_t %.fca.0.2.insert
+}
+
+define %struct.int8x8x3_t @test_vld3_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld3_s8
+; CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+ %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8* %a, i32 1)
+ %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %vld3.fca.2.extract, 0, 2
+ ret %struct.int8x8x3_t %.fca.0.2.insert
+}
+
+define %struct.int16x4x3_t @test_vld3_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld3_s16
+; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %1, i32 2)
+ %vld3.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %vld3.fca.2.extract, 0, 2
+ ret %struct.int16x4x3_t %.fca.0.2.insert
+}
+
+define %struct.int32x2x3_t @test_vld3_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld3_s32
+; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8* %1, i32 4)
+ %vld3.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %vld3.fca.2.extract, 0, 2
+ ret %struct.int32x2x3_t %.fca.0.2.insert
+}
+
+define %struct.int64x1x3_t @test_vld3_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld3_s64
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8* %1, i32 8)
+ %vld3.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld3.fca.2.extract, 0, 2
+ ret %struct.int64x1x3_t %.fca.0.2.insert
+}
+
+define %struct.float32x2x3_t @test_vld3_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld3_f32
+; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8* %1, i32 4)
+ %vld3.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %vld3.fca.2.extract, 0, 2
+ ret %struct.float32x2x3_t %.fca.0.2.insert
+}
+
+define %struct.float64x1x3_t @test_vld3_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld3_f64
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8* %1, i32 8)
+ %vld3.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3, 2
+ %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld3.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld3.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld3.fca.2.extract, 0, 2
+ ret %struct.float64x1x3_t %.fca.0.2.insert
+}
+
+define %struct.int8x16x4_t @test_vld4q_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld4q_s8
+; CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
+ %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %a, i32 1)
+ %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %vld4.fca.3.extract, 0, 3
+ ret %struct.int8x16x4_t %.fca.0.3.insert
+}
+
+define %struct.int16x8x4_t @test_vld4q_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld4q_s16
+; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %1, i32 2)
+ %vld4.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %vld4.fca.3.extract, 0, 3
+ ret %struct.int16x8x4_t %.fca.0.3.insert
+}
+
+define %struct.int32x4x4_t @test_vld4q_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld4q_s32
+; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8* %1, i32 4)
+ %vld4.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %vld4.fca.3.extract, 0, 3
+ ret %struct.int32x4x4_t %.fca.0.3.insert
+}
+
+define %struct.int64x2x4_t @test_vld4q_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld4q_s64
+; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8* %1, i32 8)
+ %vld4.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %vld4.fca.3.extract, 0, 3
+ ret %struct.int64x2x4_t %.fca.0.3.insert
+}
+
+define %struct.float32x4x4_t @test_vld4q_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld4q_f32
+; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8* %1, i32 4)
+ %vld4.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %vld4.fca.3.extract, 0, 3
+ ret %struct.float32x4x4_t %.fca.0.3.insert
+}
+
+define %struct.float64x2x4_t @test_vld4q_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld4q_f64
+; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8* %1, i32 8)
+ %vld4.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %vld4.fca.3.extract, 0, 3
+ ret %struct.float64x2x4_t %.fca.0.3.insert
+}
+
+define %struct.int8x8x4_t @test_vld4_s8(i8* readonly %a) {
+; CHECK-LABEL: test_vld4_s8
+; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
+ %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %a, i32 1)
+ %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %vld4.fca.3.extract, 0, 3
+ ret %struct.int8x8x4_t %.fca.0.3.insert
+}
+
+define %struct.int16x4x4_t @test_vld4_s16(i16* readonly %a) {
+; CHECK-LABEL: test_vld4_s16
+; CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %vld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8* %1, i32 2)
+ %vld4.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %vld4.fca.3.extract, 0, 3
+ ret %struct.int16x4x4_t %.fca.0.3.insert
+}
+
+define %struct.int32x2x4_t @test_vld4_s32(i32* readonly %a) {
+; CHECK-LABEL: test_vld4_s32
+; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %vld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8* %1, i32 4)
+ %vld4.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %vld4.fca.3.extract, 0, 3
+ ret %struct.int32x2x4_t %.fca.0.3.insert
+}
+
+define %struct.int64x1x4_t @test_vld4_s64(i64* readonly %a) {
+; CHECK-LABEL: test_vld4_s64
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %vld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8* %1, i32 8)
+ %vld4.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld4.fca.3.extract, 0, 3
+ ret %struct.int64x1x4_t %.fca.0.3.insert
+}
+
+define %struct.float32x2x4_t @test_vld4_f32(float* readonly %a) {
+; CHECK-LABEL: test_vld4_f32
+; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %vld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8* %1, i32 4)
+ %vld4.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %vld4.fca.3.extract, 0, 3
+ ret %struct.float32x2x4_t %.fca.0.3.insert
+}
+
+define %struct.float64x1x4_t @test_vld4_f64(double* readonly %a) {
+; CHECK-LABEL: test_vld4_f64
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %vld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8* %1, i32 8)
+ %vld4.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld4, 3
+ %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld4.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld4.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld4.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld4.fca.3.extract, 0, 3
+ ret %struct.float64x1x4_t %.fca.0.3.insert
+}
+
+declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32)
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32)
+declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32)
+declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32)
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32)
+declare <2 x double> @llvm.arm.neon.vld1.v2f64(i8*, i32)
+declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32)
+declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32)
+declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32)
+declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32)
+declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32)
+declare <1 x double> @llvm.arm.neon.vld1.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2.v8i16(i8*, i32)
+declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8*, i32)
+declare { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2.v2i64(i8*, i32)
+declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8*, i32)
+declare { <2 x double>, <2 x double> } @llvm.arm.neon.vld2.v2f64(i8*, i32)
+declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8*, i32)
+declare { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2.v4i16(i8*, i32)
+declare { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8*, i32)
+declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32)
+declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3.v8i16(i8*, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3.v2i64(i8*, i32)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8*, i32)
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3.v2f64(i8*, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8*, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3.v2i32(i8*, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3.v2f32(i8*, i32)
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4.v4i32(i8*, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4.v2i64(i8*, i32)
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4.v4f32(i8*, i32)
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4.v2f64(i8*, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8*, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4.v2i32(i8*, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4.v2f32(i8*, i32)
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8*, i32)
+
+define void @test_vst1q_s8(i8* %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vst1q_s8
+; CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ tail call void @llvm.arm.neon.vst1.v16i8(i8* %a, <16 x i8> %b, i32 1)
+ ret void
+}
+
+define void @test_vst1q_s16(i16* %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vst1q_s16
+; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v8i16(i8* %1, <8 x i16> %b, i32 2)
+ ret void
+}
+
+define void @test_vst1q_s32(i32* %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vst1q_s32
+; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v4i32(i8* %1, <4 x i32> %b, i32 4)
+ ret void
+}
+
+define void @test_vst1q_s64(i64* %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vst1q_s64
+; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v2i64(i8* %1, <2 x i64> %b, i32 8)
+ ret void
+}
+
+define void @test_vst1q_f32(float* %a, <4 x float> %b) {
+; CHECK-LABEL: test_vst1q_f32
+; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v4f32(i8* %1, <4 x float> %b, i32 4)
+ ret void
+}
+
+define void @test_vst1q_f64(double* %a, <2 x double> %b) {
+; CHECK-LABEL: test_vst1q_f64
+; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v2f64(i8* %1, <2 x double> %b, i32 8)
+ ret void
+}
+
+define void @test_vst1_s8(i8* %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vst1_s8
+; CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ tail call void @llvm.arm.neon.vst1.v8i8(i8* %a, <8 x i8> %b, i32 1)
+ ret void
+}
+
+define void @test_vst1_s16(i16* %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vst1_s16
+; CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v4i16(i8* %1, <4 x i16> %b, i32 2)
+ ret void
+}
+
+define void @test_vst1_s32(i32* %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vst1_s32
+; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v2i32(i8* %1, <2 x i32> %b, i32 4)
+ ret void
+}
+
+define void @test_vst1_s64(i64* %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vst1_s64
+; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v1i64(i8* %1, <1 x i64> %b, i32 8)
+ ret void
+}
+
+define void @test_vst1_f32(float* %a, <2 x float> %b) {
+; CHECK-LABEL: test_vst1_f32
+; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v2f32(i8* %1, <2 x float> %b, i32 4)
+ ret void
+}
+
+define void @test_vst1_f64(double* %a, <1 x double> %b) {
+; CHECK-LABEL: test_vst1_f64
+; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst1.v1f64(i8* %1, <1 x double> %b, i32 8)
+ ret void
+}
+
+define void @test_vst2q_s8(i8* %a, [2 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_s8
+; CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ tail call void @llvm.arm.neon.vst2.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, i32 1)
+ ret void
+}
+
+define void @test_vst2q_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_s16
+; CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 2)
+ ret void
+}
+
+define void @test_vst2q_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_s32
+; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 4)
+ ret void
+}
+
+define void @test_vst2q_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_s64
+; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 8)
+ ret void
+}
+
+define void @test_vst2q_f32(float* %a, [2 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_f32
+; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 4)
+ ret void
+}
+
+define void @test_vst2q_f64(double* %a, [2 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_f64
+; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 8)
+ ret void
+}
+
+define void @test_vst2_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst2_s8
+; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
+ tail call void @llvm.arm.neon.vst2.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 1)
+ ret void
+}
+
+define void @test_vst2_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst2_s16
+; CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 2)
+ ret void
+}
+
+define void @test_vst2_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst2_s32
+; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 4)
+ ret void
+}
+
+define void @test_vst2_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst2_s64
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 8)
+ ret void
+}
+
+define void @test_vst2_f32(float* %a, [2 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst2_f32
+; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 4)
+ ret void
+}
+
+define void @test_vst2_f64(double* %a, [2 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst2_f64
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst2.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 8)
+ ret void
+}
+
+define void @test_vst3q_s8(i8* %a, [3 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_s8
+; CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ tail call void @llvm.arm.neon.vst3.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, i32 1)
+ ret void
+}
+
+define void @test_vst3q_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_s16
+; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 2)
+ ret void
+}
+
+define void @test_vst3q_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_s32
+; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 4)
+ ret void
+}
+
+define void @test_vst3q_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_s64
+; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 8)
+ ret void
+}
+
+define void @test_vst3q_f32(float* %a, [3 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_f32
+; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 4)
+ ret void
+}
+
+define void @test_vst3q_f64(double* %a, [3 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_f64
+; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 8)
+ ret void
+}
+
+define void @test_vst3_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst3_s8
+; CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2
+ tail call void @llvm.arm.neon.vst3.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 1)
+ ret void
+}
+
+define void @test_vst3_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst3_s16
+; CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 2)
+ ret void
+}
+
+define void @test_vst3_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst3_s32
+; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 4)
+ ret void
+}
+
+define void @test_vst3_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst3_s64
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 8)
+ ret void
+}
+
+define void @test_vst3_f32(float* %a, [3 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst3_f32
+; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 4)
+ ret void
+}
+
+define void @test_vst3_f64(double* %a, [3 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst3_f64
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst3.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 8)
+ ret void
+}
+
+define void @test_vst4q_s8(i8* %a, [4 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_s8
+; CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ tail call void @llvm.arm.neon.vst4.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 1)
+ ret void
+}
+
+define void @test_vst4q_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_s16
+; CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v8i16(i8* %1, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 2)
+ ret void
+}
+
+define void @test_vst4q_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_s32
+; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v4i32(i8* %1, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 4)
+ ret void
+}
+
+define void @test_vst4q_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_s64
+; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v2i64(i8* %1, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 8)
+ ret void
+}
+
+define void @test_vst4q_f32(float* %a, [4 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_f32
+; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v4f32(i8* %1, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 4)
+ ret void
+}
+
+define void @test_vst4q_f64(double* %a, [4 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_f64
+; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v2f64(i8* %1, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 8)
+ ret void
+}
+
+define void @test_vst4_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst4_s8
+; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3
+ tail call void @llvm.arm.neon.vst4.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 1)
+ ret void
+}
+
+define void @test_vst4_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst4_s16
+; CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3
+ %1 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v4i16(i8* %1, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 2)
+ ret void
+}
+
+define void @test_vst4_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst4_s32
+; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3
+ %1 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v2i32(i8* %1, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 4)
+ ret void
+}
+
+define void @test_vst4_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst4_s64
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3
+ %1 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v1i64(i8* %1, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 8)
+ ret void
+}
+
+define void @test_vst4_f32(float* %a, [4 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst4_f32
+; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3
+ %1 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v2f32(i8* %1, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 4)
+ ret void
+}
+
+define void @test_vst4_f64(double* %a, [4 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst4_f64
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3
+ %1 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst4.v1f64(i8* %1, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 8)
+ ret void
+}
+
+declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32)
+declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32)
+declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32)
+declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>, i32)
+declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32)
+declare void @llvm.arm.neon.vst1.v2f64(i8*, <2 x double>, i32)
+declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32)
+declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32)
+declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32)
+declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>, i32)
+declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32)
+declare void @llvm.arm.neon.vst1.v1f64(i8*, <1 x double>, i32)
+declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.arm.neon.vst2.v2i64(i8*, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32)
+declare void @llvm.arm.neon.vst2.v2f64(i8*, <2 x double>, <2 x double>, i32)
+declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>, i32)
+declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>, i32)
+declare void @llvm.arm.neon.vst2.v1f64(i8*, <1 x double>, <1 x double>, i32)
+declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.arm.neon.vst3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.arm.neon.vst3.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.arm.neon.vst3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32)
+declare void @llvm.arm.neon.vst3.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32)
+declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32)
+declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.arm.neon.vst3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32)
+declare void @llvm.arm.neon.vst3.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32)
+declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.arm.neon.vst4.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
+declare void @llvm.arm.neon.vst4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32)
+declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32)
+declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32)
+declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32)
+
+define %struct.int8x16x2_t @test_vld1q_s8_x2(i8* %a) {
+; CHECK-LABEL: test_vld1q_s8_x2
+; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %1 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
+ %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
+ %3 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
+ %4 = insertvalue %struct.int8x16x2_t undef, <16 x i8> %2, 0, 0
+ %5 = insertvalue %struct.int8x16x2_t %4, <16 x i8> %3, 0, 1
+ ret %struct.int8x16x2_t %5
+}
+
+define %struct.int16x8x2_t @test_vld1q_s16_x2(i16* %a) {
+; CHECK-LABEL: test_vld1q_s16_x2
+; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
+ %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
+ %4 = extractvalue { <8 x i16>, <8 x i16> } %2, 1
+ %5 = insertvalue %struct.int16x8x2_t undef, <8 x i16> %3, 0, 0
+ %6 = insertvalue %struct.int16x8x2_t %5, <8 x i16> %4, 0, 1
+ ret %struct.int16x8x2_t %6
+}
+
+define %struct.int32x4x2_t @test_vld1q_s32_x2(i32* %a) {
+; CHECK-LABEL: test_vld1q_s32_x2
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x i32>, <4 x i32> } %2, 0
+ %4 = extractvalue { <4 x i32>, <4 x i32> } %2, 1
+ %5 = insertvalue %struct.int32x4x2_t undef, <4 x i32> %3, 0, 0
+ %6 = insertvalue %struct.int32x4x2_t %5, <4 x i32> %4, 0, 1
+ ret %struct.int32x4x2_t %6
+}
+
+define %struct.int64x2x2_t @test_vld1q_s64_x2(i64* %a) {
+; CHECK-LABEL: test_vld1q_s64_x2
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x i64>, <2 x i64> } %2, 0
+ %4 = extractvalue { <2 x i64>, <2 x i64> } %2, 1
+ %5 = insertvalue %struct.int64x2x2_t undef, <2 x i64> %3, 0, 0
+ %6 = insertvalue %struct.int64x2x2_t %5, <2 x i64> %4, 0, 1
+ ret %struct.int64x2x2_t %6
+}
+
+define %struct.float32x4x2_t @test_vld1q_f32_x2(float* %a) {
+; CHECK-LABEL: test_vld1q_f32_x2
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x2.v4f32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x float>, <4 x float> } %2, 0
+ %4 = extractvalue { <4 x float>, <4 x float> } %2, 1
+ %5 = insertvalue %struct.float32x4x2_t undef, <4 x float> %3, 0, 0
+ %6 = insertvalue %struct.float32x4x2_t %5, <4 x float> %4, 0, 1
+ ret %struct.float32x4x2_t %6
+}
+
+
+define %struct.float64x2x2_t @test_vld1q_f64_x2(double* %a) {
+; CHECK-LABEL: test_vld1q_f64_x2
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x2.v2f64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x double>, <2 x double> } %2, 0
+ %4 = extractvalue { <2 x double>, <2 x double> } %2, 1
+ %5 = insertvalue %struct.float64x2x2_t undef, <2 x double> %3, 0, 0
+ %6 = insertvalue %struct.float64x2x2_t %5, <2 x double> %4, 0, 1
+ ret %struct.float64x2x2_t %6
+}
+
+define %struct.int8x8x2_t @test_vld1_s8_x2(i8* %a) {
+; CHECK-LABEL: test_vld1_s8_x2
+; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %1 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8* %a, i32 1)
+ %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
+ %3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
+ %4 = insertvalue %struct.int8x8x2_t undef, <8 x i8> %2, 0, 0
+ %5 = insertvalue %struct.int8x8x2_t %4, <8 x i8> %3, 0, 1
+ ret %struct.int8x8x2_t %5
+}
+
+define %struct.int16x4x2_t @test_vld1_s16_x2(i16* %a) {
+; CHECK-LABEL: test_vld1_s16_x2
+; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8* %1, i32 2)
+ %3 = extractvalue { <4 x i16>, <4 x i16> } %2, 0
+ %4 = extractvalue { <4 x i16>, <4 x i16> } %2, 1
+ %5 = insertvalue %struct.int16x4x2_t undef, <4 x i16> %3, 0, 0
+ %6 = insertvalue %struct.int16x4x2_t %5, <4 x i16> %4, 0, 1
+ ret %struct.int16x4x2_t %6
+}
+
+define %struct.int32x2x2_t @test_vld1_s32_x2(i32* %a) {
+; CHECK-LABEL: test_vld1_s32_x2
+; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8* %1, i32 4)
+ %3 = extractvalue { <2 x i32>, <2 x i32> } %2, 0
+ %4 = extractvalue { <2 x i32>, <2 x i32> } %2, 1
+ %5 = insertvalue %struct.int32x2x2_t undef, <2 x i32> %3, 0, 0
+ %6 = insertvalue %struct.int32x2x2_t %5, <2 x i32> %4, 0, 1
+ ret %struct.int32x2x2_t %6
+}
+
+define %struct.int64x1x2_t @test_vld1_s64_x2(i64* %a) {
+; CHECK-LABEL: test_vld1_s64_x2
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8* %1, i32 8)
+ %3 = extractvalue { <1 x i64>, <1 x i64> } %2, 0
+ %4 = extractvalue { <1 x i64>, <1 x i64> } %2, 1
+ %5 = insertvalue %struct.int64x1x2_t undef, <1 x i64> %3, 0, 0
+ %6 = insertvalue %struct.int64x1x2_t %5, <1 x i64> %4, 0, 1
+ ret %struct.int64x1x2_t %6
+}
+
+define %struct.float32x2x2_t @test_vld1_f32_x2(float* %a) {
+; CHECK-LABEL: test_vld1_f32_x2
+; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x2.v2f32(i8* %1, i32 4)
+ %3 = extractvalue { <2 x float>, <2 x float> } %2, 0
+ %4 = extractvalue { <2 x float>, <2 x float> } %2, 1
+ %5 = insertvalue %struct.float32x2x2_t undef, <2 x float> %3, 0, 0
+ %6 = insertvalue %struct.float32x2x2_t %5, <2 x float> %4, 0, 1
+ ret %struct.float32x2x2_t %6
+}
+
+define %struct.float64x1x2_t @test_vld1_f64_x2(double* %a) {
+; CHECK-LABEL: test_vld1_f64_x2
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x2.v1f64(i8* %1, i32 8)
+ %3 = extractvalue { <1 x double>, <1 x double> } %2, 0
+ %4 = extractvalue { <1 x double>, <1 x double> } %2, 1
+ %5 = insertvalue %struct.float64x1x2_t undef, <1 x double> %3, 0, 0
+ %6 = insertvalue %struct.float64x1x2_t %5, <1 x double> %4, 0, 1
+ ret %struct.float64x1x2_t %6
+}
+
+define %struct.int8x16x3_t @test_vld1q_s8_x3(i8* %a) {
+; CHECK-LABEL: test_vld1q_s8_x3
+; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
+; [{{x[0-9]+|sp}}]
+ %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x3.v16i8(i8* %a, i32 1)
+ %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 0
+ %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 1
+ %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
+ %5 = insertvalue %struct.int8x16x3_t undef, <16 x i8> %2, 0, 0
+ %6 = insertvalue %struct.int8x16x3_t %5, <16 x i8> %3, 0, 1
+ %7 = insertvalue %struct.int8x16x3_t %6, <16 x i8> %4, 0, 2
+ ret %struct.int8x16x3_t %7
+}
+
+define %struct.int16x8x3_t @test_vld1q_s16_x3(i16* %a) {
+; CHECK-LABEL: test_vld1q_s16_x3
+; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
+ %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
+ %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 1
+ %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 2
+ %6 = insertvalue %struct.int16x8x3_t undef, <8 x i16> %3, 0, 0
+ %7 = insertvalue %struct.int16x8x3_t %6, <8 x i16> %4, 0, 1
+ %8 = insertvalue %struct.int16x8x3_t %7, <8 x i16> %5, 0, 2
+ ret %struct.int16x8x3_t %8
+}
+
+define %struct.int32x4x3_t @test_vld1q_s32_x3(i32* %a) {
+; CHECK-LABEL: test_vld1q_s32_x3
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x3.v4i32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 0
+ %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 1
+ %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 2
+ %6 = insertvalue %struct.int32x4x3_t undef, <4 x i32> %3, 0, 0
+ %7 = insertvalue %struct.int32x4x3_t %6, <4 x i32> %4, 0, 1
+ %8 = insertvalue %struct.int32x4x3_t %7, <4 x i32> %5, 0, 2
+ ret %struct.int32x4x3_t %8
+}
+
+define %struct.int64x2x3_t @test_vld1q_s64_x3(i64* %a) {
+; CHECK-LABEL: test_vld1q_s64_x3
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
+ %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 1
+ %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 2
+ %6 = insertvalue %struct.int64x2x3_t undef, <2 x i64> %3, 0, 0
+ %7 = insertvalue %struct.int64x2x3_t %6, <2 x i64> %4, 0, 1
+ %8 = insertvalue %struct.int64x2x3_t %7, <2 x i64> %5, 0, 2
+ ret %struct.int64x2x3_t %8
+}
+
+define %struct.float32x4x3_t @test_vld1q_f32_x3(float* %a) {
+; CHECK-LABEL: test_vld1q_f32_x3
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x3.v4f32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 0
+ %4 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 1
+ %5 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 2
+ %6 = insertvalue %struct.float32x4x3_t undef, <4 x float> %3, 0, 0
+ %7 = insertvalue %struct.float32x4x3_t %6, <4 x float> %4, 0, 1
+ %8 = insertvalue %struct.float32x4x3_t %7, <4 x float> %5, 0, 2
+ ret %struct.float32x4x3_t %8
+}
+
+
+define %struct.float64x2x3_t @test_vld1q_f64_x3(double* %a) {
+; CHECK-LABEL: test_vld1q_f64_x3
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x3.v2f64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 0
+ %4 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 1
+ %5 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 2
+ %6 = insertvalue %struct.float64x2x3_t undef, <2 x double> %3, 0, 0
+ %7 = insertvalue %struct.float64x2x3_t %6, <2 x double> %4, 0, 1
+ %8 = insertvalue %struct.float64x2x3_t %7, <2 x double> %5, 0, 2
+ ret %struct.float64x2x3_t %8
+}
+
+define %struct.int8x8x3_t @test_vld1_s8_x3(i8* %a) {
+; CHECK-LABEL: test_vld1_s8_x3
+; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
+; [{{x[0-9]+|sp}}]
+ %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x3.v8i8(i8* %a, i32 1)
+ %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
+ %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
+ %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
+ %5 = insertvalue %struct.int8x8x3_t undef, <8 x i8> %2, 0, 0
+ %6 = insertvalue %struct.int8x8x3_t %5, <8 x i8> %3, 0, 1
+ %7 = insertvalue %struct.int8x8x3_t %6, <8 x i8> %4, 0, 2
+ ret %struct.int8x8x3_t %7
+}
+
+define %struct.int16x4x3_t @test_vld1_s16_x3(i16* %a) {
+; CHECK-LABEL: test_vld1_s16_x3
+; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x3.v4i16(i8* %1, i32 2)
+ %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
+ %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
+ %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
+ %6 = insertvalue %struct.int16x4x3_t undef, <4 x i16> %3, 0, 0
+ %7 = insertvalue %struct.int16x4x3_t %6, <4 x i16> %4, 0, 1
+ %8 = insertvalue %struct.int16x4x3_t %7, <4 x i16> %5, 0, 2
+ ret %struct.int16x4x3_t %8
+}
+
+define %struct.int32x2x3_t @test_vld1_s32_x3(i32* %a) {
+ %1 = bitcast i32* %a to i8*
+; CHECK-LABEL: test_vld1_s32_x3
+; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
+; [{{x[0-9]+|sp}}]
+ %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x3.v2i32(i8* %1, i32 4)
+ %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
+ %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
+ %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
+ %6 = insertvalue %struct.int32x2x3_t undef, <2 x i32> %3, 0, 0
+ %7 = insertvalue %struct.int32x2x3_t %6, <2 x i32> %4, 0, 1
+ %8 = insertvalue %struct.int32x2x3_t %7, <2 x i32> %5, 0, 2
+ ret %struct.int32x2x3_t %8
+}
+
+define %struct.int64x1x3_t @test_vld1_s64_x3(i64* %a) {
+; CHECK-LABEL: test_vld1_s64_x3
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x3.v1i64(i8* %1, i32 8)
+ %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 0
+ %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 1
+ %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 2
+ %6 = insertvalue %struct.int64x1x3_t undef, <1 x i64> %3, 0, 0
+ %7 = insertvalue %struct.int64x1x3_t %6, <1 x i64> %4, 0, 1
+ %8 = insertvalue %struct.int64x1x3_t %7, <1 x i64> %5, 0, 2
+ ret %struct.int64x1x3_t %8
+}
+
+define %struct.float32x2x3_t @test_vld1_f32_x3(float* %a) {
+; CHECK-LABEL: test_vld1_f32_x3
+; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x3.v2f32(i8* %1, i32 4)
+ %3 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 0
+ %4 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 1
+ %5 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 2
+ %6 = insertvalue %struct.float32x2x3_t undef, <2 x float> %3, 0, 0
+ %7 = insertvalue %struct.float32x2x3_t %6, <2 x float> %4, 0, 1
+ %8 = insertvalue %struct.float32x2x3_t %7, <2 x float> %5, 0, 2
+ ret %struct.float32x2x3_t %8
+}
+
+
+define %struct.float64x1x3_t @test_vld1_f64_x3(double* %a) {
+; CHECK-LABEL: test_vld1_f64_x3
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
+; [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x3.v1f64(i8* %1, i32 8)
+ %3 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 0
+ %4 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 1
+ %5 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 2
+ %6 = insertvalue %struct.float64x1x3_t undef, <1 x double> %3, 0, 0
+ %7 = insertvalue %struct.float64x1x3_t %6, <1 x double> %4, 0, 1
+ %8 = insertvalue %struct.float64x1x3_t %7, <1 x double> %5, 0, 2
+ ret %struct.float64x1x3_t %8
+}
+
+define %struct.int8x16x4_t @test_vld1q_s8_x4(i8* %a) {
+; CHECK-LABEL: test_vld1q_s8_x4
+; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
+; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x4.v16i8(i8* %a, i32 1)
+ %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 0
+ %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 1
+ %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
+ %5 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1, 3
+ %6 = insertvalue %struct.int8x16x4_t undef, <16 x i8> %2, 0, 0
+ %7 = insertvalue %struct.int8x16x4_t %6, <16 x i8> %3, 0, 1
+ %8 = insertvalue %struct.int8x16x4_t %7, <16 x i8> %4, 0, 2
+ %9 = insertvalue %struct.int8x16x4_t %8, <16 x i8> %5, 0, 3
+ ret %struct.int8x16x4_t %9
+}
+
+define %struct.int16x8x4_t @test_vld1q_s16_x4(i16* %a) {
+; CHECK-LABEL: test_vld1q_s16_x4
+; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
+; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x4.v8i16(i8* %1, i32 2)
+ %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
+ %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 1
+ %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 2
+ %6 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2, 3
+ %7 = insertvalue %struct.int16x8x4_t undef, <8 x i16> %3, 0, 0
+ %8 = insertvalue %struct.int16x8x4_t %7, <8 x i16> %4, 0, 1
+ %9 = insertvalue %struct.int16x8x4_t %8, <8 x i16> %5, 0, 2
+ %10 = insertvalue %struct.int16x8x4_t %9, <8 x i16> %6, 0, 3
+ ret %struct.int16x8x4_t %10
+}
+
+define %struct.int32x4x4_t @test_vld1q_s32_x4(i32* %a) {
+; CHECK-LABEL: test_vld1q_s32_x4
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
+; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x4.v4i32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 0
+ %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 1
+ %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 2
+ %6 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2, 3
+ %7 = insertvalue %struct.int32x4x4_t undef, <4 x i32> %3, 0, 0
+ %8 = insertvalue %struct.int32x4x4_t %7, <4 x i32> %4, 0, 1
+ %9 = insertvalue %struct.int32x4x4_t %8, <4 x i32> %5, 0, 2
+ %10 = insertvalue %struct.int32x4x4_t %9, <4 x i32> %6, 0, 3
+ ret %struct.int32x4x4_t %10
+}
+
+define %struct.int64x2x4_t @test_vld1q_s64_x4(i64* %a) {
+; CHECK-LABEL: test_vld1q_s64_x4
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
+; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x4.v2i64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
+ %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 1
+ %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 2
+ %6 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2, 3
+ %7 = insertvalue %struct.int64x2x4_t undef, <2 x i64> %3, 0, 0
+ %8 = insertvalue %struct.int64x2x4_t %7, <2 x i64> %4, 0, 1
+ %9 = insertvalue %struct.int64x2x4_t %8, <2 x i64> %5, 0, 2
+ %10 = insertvalue %struct.int64x2x4_t %9, <2 x i64> %6, 0, 3
+ ret %struct.int64x2x4_t %10
+}
+
+define %struct.float32x4x4_t @test_vld1q_f32_x4(float* %a) {
+; CHECK-LABEL: test_vld1q_f32_x4
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
+; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
+ %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 1
+ %5 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 2
+ %6 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 3
+ %7 = insertvalue %struct.float32x4x4_t undef, <4 x float> %3, 0, 0
+ %8 = insertvalue %struct.float32x4x4_t %7, <4 x float> %4, 0, 1
+ %9 = insertvalue %struct.float32x4x4_t %8, <4 x float> %5, 0, 2
+ %10 = insertvalue %struct.float32x4x4_t %9, <4 x float> %6, 0, 3
+ ret %struct.float32x4x4_t %10
+}
+
+define %struct.float64x2x4_t @test_vld1q_f64_x4(double* %a) {
+; CHECK-LABEL: test_vld1q_f64_x4
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
+; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x4.v2f64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 0
+ %4 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 1
+ %5 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 2
+ %6 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 3
+ %7 = insertvalue %struct.float64x2x4_t undef, <2 x double> %3, 0, 0
+ %8 = insertvalue %struct.float64x2x4_t %7, <2 x double> %4, 0, 1
+ %9 = insertvalue %struct.float64x2x4_t %8, <2 x double> %5, 0, 2
+ %10 = insertvalue %struct.float64x2x4_t %9, <2 x double> %6, 0, 3
+ ret %struct.float64x2x4_t %10
+}
+
+define %struct.int8x8x4_t @test_vld1_s8_x4(i8* %a) {
+; CHECK-LABEL: test_vld1_s8_x4
+; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
+; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
+ %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
+ %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
+ %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
+ %5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
+ %6 = insertvalue %struct.int8x8x4_t undef, <8 x i8> %2, 0, 0
+ %7 = insertvalue %struct.int8x8x4_t %6, <8 x i8> %3, 0, 1
+ %8 = insertvalue %struct.int8x8x4_t %7, <8 x i8> %4, 0, 2
+ %9 = insertvalue %struct.int8x8x4_t %8, <8 x i8> %5, 0, 3
+ ret %struct.int8x8x4_t %9
+}
+
+define %struct.int16x4x4_t @test_vld1_s16_x4(i16* %a) {
+; CHECK-LABEL: test_vld1_s16_x4
+; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
+; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x4.v4i16(i8* %1, i32 2)
+ %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
+ %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
+ %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
+ %6 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2, 3
+ %7 = insertvalue %struct.int16x4x4_t undef, <4 x i16> %3, 0, 0
+ %8 = insertvalue %struct.int16x4x4_t %7, <4 x i16> %4, 0, 1
+ %9 = insertvalue %struct.int16x4x4_t %8, <4 x i16> %5, 0, 2
+ %10 = insertvalue %struct.int16x4x4_t %9, <4 x i16> %6, 0, 3
+ ret %struct.int16x4x4_t %10
+}
+
+define %struct.int32x2x4_t @test_vld1_s32_x4(i32* %a) {
+; CHECK-LABEL: test_vld1_s32_x4
+; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
+; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x4.v2i32(i8* %1, i32 4)
+ %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
+ %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
+ %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
+ %6 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 3
+ %7 = insertvalue %struct.int32x2x4_t undef, <2 x i32> %3, 0, 0
+ %8 = insertvalue %struct.int32x2x4_t %7, <2 x i32> %4, 0, 1
+ %9 = insertvalue %struct.int32x2x4_t %8, <2 x i32> %5, 0, 2
+ %10 = insertvalue %struct.int32x2x4_t %9, <2 x i32> %6, 0, 3
+ ret %struct.int32x2x4_t %10
+}
+
+define %struct.int64x1x4_t @test_vld1_s64_x4(i64* %a) {
+; CHECK-LABEL: test_vld1_s64_x4
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
+; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x4.v1i64(i8* %1, i32 8)
+ %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 0
+ %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 1
+ %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 2
+ %6 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2, 3
+ %7 = insertvalue %struct.int64x1x4_t undef, <1 x i64> %3, 0, 0
+ %8 = insertvalue %struct.int64x1x4_t %7, <1 x i64> %4, 0, 1
+ %9 = insertvalue %struct.int64x1x4_t %8, <1 x i64> %5, 0, 2
+ %10 = insertvalue %struct.int64x1x4_t %9, <1 x i64> %6, 0, 3
+ ret %struct.int64x1x4_t %10
+}
+
+define %struct.float32x2x4_t @test_vld1_f32_x4(float* %a) {
+; CHECK-LABEL: test_vld1_f32_x4
+; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
+; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x4.v2f32(i8* %1, i32 4)
+ %3 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 0
+ %4 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 1
+ %5 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 2
+ %6 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %2, 3
+ %7 = insertvalue %struct.float32x2x4_t undef, <2 x float> %3, 0, 0
+ %8 = insertvalue %struct.float32x2x4_t %7, <2 x float> %4, 0, 1
+ %9 = insertvalue %struct.float32x2x4_t %8, <2 x float> %5, 0, 2
+ %10 = insertvalue %struct.float32x2x4_t %9, <2 x float> %6, 0, 3
+ ret %struct.float32x2x4_t %10
+}
+
+
+define %struct.float64x1x4_t @test_vld1_f64_x4(double* %a) {
+; CHECK-LABEL: test_vld1_f64_x4
+; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
+; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x4.v1f64(i8* %1, i32 8)
+ %3 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 0
+ %4 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 1
+ %5 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 2
+ %6 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %2, 3
+ %7 = insertvalue %struct.float64x1x4_t undef, <1 x double> %3, 0, 0
+ %8 = insertvalue %struct.float64x1x4_t %7, <1 x double> %4, 0, 1
+ %9 = insertvalue %struct.float64x1x4_t %8, <1 x double> %5, 0, 2
+ %10 = insertvalue %struct.float64x1x4_t %9, <1 x double> %6, 0, 3
+ ret %struct.float64x1x4_t %10
+}
+
+define void @test_vst1q_s8_x2(i8* %a, [2 x <16 x i8>] %b) {
+; CHECK-LABEL: test_vst1q_s8_x2
+; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <16 x i8>] %b, 0
+ %2 = extractvalue [2 x <16 x i8>] %b, 1
+ tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1)
+ ret void
+}
+
+define void @test_vst1q_s16_x2(i16* %a, [2 x <8 x i16>] %b) {
+; CHECK-LABEL: test_vst1q_s16_x2
+; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <8 x i16>] %b, 0
+ %2 = extractvalue [2 x <8 x i16>] %b, 1
+ %3 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2)
+ ret void
+}
+
+define void @test_vst1q_s32_x2(i32* %a, [2 x <4 x i32>] %b) {
+; CHECK-LABEL: test_vst1q_s32_x2
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <4 x i32>] %b, 0
+ %2 = extractvalue [2 x <4 x i32>] %b, 1
+ %3 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v4i32(i8* %3, <4 x i32> %1, <4 x i32> %2, i32 4)
+ ret void
+}
+
+define void @test_vst1q_s64_x2(i64* %a, [2 x <2 x i64>] %b) {
+; CHECK-LABEL: test_vst1q_s64_x2
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <2 x i64>] %b, 0
+ %2 = extractvalue [2 x <2 x i64>] %b, 1
+ %3 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v2i64(i8* %3, <2 x i64> %1, <2 x i64> %2, i32 8)
+ ret void
+}
+
+define void @test_vst1q_f32_x2(float* %a, [2 x <4 x float>] %b) {
+; CHECK-LABEL: test_vst1q_f32_x2
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <4 x float>] %b, 0
+ %2 = extractvalue [2 x <4 x float>] %b, 1
+ %3 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v4f32(i8* %3, <4 x float> %1, <4 x float> %2, i32 4)
+ ret void
+}
+
+
+define void @test_vst1q_f64_x2(double* %a, [2 x <2 x double>] %b) {
+; CHECK-LABEL: test_vst1q_f64_x2
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <2 x double>] %b, 0
+ %2 = extractvalue [2 x <2 x double>] %b, 1
+ %3 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v2f64(i8* %3, <2 x double> %1, <2 x double> %2, i32 8)
+ ret void
+}
+
+define void @test_vst1_s8_x2(i8* %a, [2 x <8 x i8>] %b) {
+; CHECK-LABEL: test_vst1_s8_x2
+; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <8 x i8>] %b, 0
+ %2 = extractvalue [2 x <8 x i8>] %b, 1
+ tail call void @llvm.aarch64.neon.vst1x2.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 1)
+ ret void
+}
+
+define void @test_vst1_s16_x2(i16* %a, [2 x <4 x i16>] %b) {
+; CHECK-LABEL: test_vst1_s16_x2
+; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <4 x i16>] %b, 0
+ %2 = extractvalue [2 x <4 x i16>] %b, 1
+ %3 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v4i16(i8* %3, <4 x i16> %1, <4 x i16> %2, i32 2)
+ ret void
+}
+
+define void @test_vst1_s32_x2(i32* %a, [2 x <2 x i32>] %b) {
+; CHECK-LABEL: test_vst1_s32_x2
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <2 x i32>] %b, 0
+ %2 = extractvalue [2 x <2 x i32>] %b, 1
+ %3 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v2i32(i8* %3, <2 x i32> %1, <2 x i32> %2, i32 4)
+ ret void
+}
+
+define void @test_vst1_s64_x2(i64* %a, [2 x <1 x i64>] %b) {
+; CHECK-LABEL: test_vst1_s64_x2
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <1 x i64>] %b, 0
+ %2 = extractvalue [2 x <1 x i64>] %b, 1
+ %3 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v1i64(i8* %3, <1 x i64> %1, <1 x i64> %2, i32 8)
+ ret void
+}
+
+define void @test_vst1_f32_x2(float* %a, [2 x <2 x float>] %b) {
+; CHECK-LABEL: test_vst1_f32_x2
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <2 x float>] %b, 0
+ %2 = extractvalue [2 x <2 x float>] %b, 1
+ %3 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v2f32(i8* %3, <2 x float> %1, <2 x float> %2, i32 4)
+ ret void
+}
+
+define void @test_vst1_f64_x2(double* %a, [2 x <1 x double>] %b) {
+; CHECK-LABEL: test_vst1_f64_x2
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [2 x <1 x double>] %b, 0
+ %2 = extractvalue [2 x <1 x double>] %b, 1
+ %3 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v1f64(i8* %3, <1 x double> %1, <1 x double> %2, i32 8)
+ ret void
+}
+
+define void @test_vst1q_s8_x3(i8* %a, [3 x <16 x i8>] %b) {
+; CHECK-LABEL: test_vst1q_s8_x3
+; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <16 x i8>] %b, 0
+ %2 = extractvalue [3 x <16 x i8>] %b, 1
+ %3 = extractvalue [3 x <16 x i8>] %b, 2
+ tail call void @llvm.aarch64.neon.vst1x3.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, <16 x i8> %3, i32 1)
+ ret void
+}
+
+define void @test_vst1q_s16_x3(i16* %a, [3 x <8 x i16>] %b) {
+; CHECK-LABEL: test_vst1q_s16_x3
+; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <8 x i16>] %b, 0
+ %2 = extractvalue [3 x <8 x i16>] %b, 1
+ %3 = extractvalue [3 x <8 x i16>] %b, 2
+ %4 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v8i16(i8* %4, <8 x i16> %1, <8 x i16> %2, <8 x i16> %3, i32 2)
+ ret void
+}
+
+define void @test_vst1q_s32_x3(i32* %a, [3 x <4 x i32>] %b) {
+; CHECK-LABEL: test_vst1q_s32_x3
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <4 x i32>] %b, 0
+ %2 = extractvalue [3 x <4 x i32>] %b, 1
+ %3 = extractvalue [3 x <4 x i32>] %b, 2
+ %4 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v4i32(i8* %4, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, i32 4)
+ ret void
+}
+
+define void @test_vst1q_s64_x3(i64* %a, [3 x <2 x i64>] %b) {
+; CHECK-LABEL: test_vst1q_s64_x3
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <2 x i64>] %b, 0
+ %2 = extractvalue [3 x <2 x i64>] %b, 1
+ %3 = extractvalue [3 x <2 x i64>] %b, 2
+ %4 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v2i64(i8* %4, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, i32 8)
+ ret void
+}
+
+define void @test_vst1q_f32_x3(float* %a, [3 x <4 x float>] %b) {
+; CHECK-LABEL: test_vst1q_f32_x3
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <4 x float>] %b, 0
+ %2 = extractvalue [3 x <4 x float>] %b, 1
+ %3 = extractvalue [3 x <4 x float>] %b, 2
+ %4 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v4f32(i8* %4, <4 x float> %1, <4 x float> %2, <4 x float> %3, i32 4)
+ ret void
+}
+
+define void @test_vst1q_f64_x3(double* %a, [3 x <2 x double>] %b) {
+; CHECK-LABEL: test_vst1q_f64_x3
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <2 x double>] %b, 0
+ %2 = extractvalue [3 x <2 x double>] %b, 1
+ %3 = extractvalue [3 x <2 x double>] %b, 2
+ %4 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v2f64(i8* %4, <2 x double> %1, <2 x double> %2, <2 x double> %3, i32 8)
+ ret void
+}
+
+define void @test_vst1_s8_x3(i8* %a, [3 x <8 x i8>] %b) {
+; CHECK-LABEL: test_vst1_s8_x3
+; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <8 x i8>] %b, 0
+ %2 = extractvalue [3 x <8 x i8>] %b, 1
+ %3 = extractvalue [3 x <8 x i8>] %b, 2
+ tail call void @llvm.aarch64.neon.vst1x3.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, <8 x i8> %3, i32 1)
+ ret void
+}
+
+define void @test_vst1_s16_x3(i16* %a, [3 x <4 x i16>] %b) {
+; CHECK-LABEL: test_vst1_s16_x3
+; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <4 x i16>] %b, 0
+ %2 = extractvalue [3 x <4 x i16>] %b, 1
+ %3 = extractvalue [3 x <4 x i16>] %b, 2
+ %4 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 2)
+ ret void
+}
+
+define void @test_vst1_s32_x3(i32* %a, [3 x <2 x i32>] %b) {
+; CHECK-LABEL: test_vst1_s32_x3
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <2 x i32>] %b, 0
+ %2 = extractvalue [3 x <2 x i32>] %b, 1
+ %3 = extractvalue [3 x <2 x i32>] %b, 2
+ %4 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4)
+ ret void
+}
+
+define void @test_vst1_s64_x3(i64* %a, [3 x <1 x i64>] %b) {
+; CHECK-LABEL: test_vst1_s64_x3
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <1 x i64>] %b, 0
+ %2 = extractvalue [3 x <1 x i64>] %b, 1
+ %3 = extractvalue [3 x <1 x i64>] %b, 2
+ %4 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8)
+ ret void
+}
+
+define void @test_vst1_f32_x3(float* %a, [3 x <2 x float>] %b) {
+; CHECK-LABEL: test_vst1_f32_x3
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <2 x float>] %b, 0
+ %2 = extractvalue [3 x <2 x float>] %b, 1
+ %3 = extractvalue [3 x <2 x float>] %b, 2
+ %4 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v2f32(i8* %4, <2 x float> %1, <2 x float> %2, <2 x float> %3, i32 4)
+ ret void
+}
+
+define void @test_vst1_f64_x3(double* %a, [3 x <1 x double>] %b) {
+; CHECK-LABEL: test_vst1_f64_x3
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
+; [{{x[0-9]+|sp}}]
+ %1 = extractvalue [3 x <1 x double>] %b, 0
+ %2 = extractvalue [3 x <1 x double>] %b, 1
+ %3 = extractvalue [3 x <1 x double>] %b, 2
+ %4 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v1f64(i8* %4, <1 x double> %1, <1 x double> %2, <1 x double> %3, i32 8)
+ ret void
+}
+
+define void @test_vst1q_s8_x4(i8* %a, [4 x <16 x i8>] %b) {
+; CHECK-LABEL: test_vst1q_s8_x4
+; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
+; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <16 x i8>] %b, 0
+ %2 = extractvalue [4 x <16 x i8>] %b, 1
+ %3 = extractvalue [4 x <16 x i8>] %b, 2
+ %4 = extractvalue [4 x <16 x i8>] %b, 3
+ tail call void @llvm.aarch64.neon.vst1x4.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, <16 x i8> %3, <16 x i8> %4, i32 1)
+ ret void
+}
+
+define void @test_vst1q_s16_x4(i16* %a, [4 x <8 x i16>] %b) {
+; CHECK-LABEL: test_vst1q_s16_x4
+; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
+; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <8 x i16>] %b, 0
+ %2 = extractvalue [4 x <8 x i16>] %b, 1
+ %3 = extractvalue [4 x <8 x i16>] %b, 2
+ %4 = extractvalue [4 x <8 x i16>] %b, 3
+ %5 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v8i16(i8* %5, <8 x i16> %1, <8 x i16> %2, <8 x i16> %3, <8 x i16> %4, i32 2)
+ ret void
+}
+
+define void @test_vst1q_s32_x4(i32* %a, [4 x <4 x i32>] %b) {
+; CHECK-LABEL: test_vst1q_s32_x4
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
+; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <4 x i32>] %b, 0
+ %2 = extractvalue [4 x <4 x i32>] %b, 1
+ %3 = extractvalue [4 x <4 x i32>] %b, 2
+ %4 = extractvalue [4 x <4 x i32>] %b, 3
+ %5 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v4i32(i8* %5, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, i32 4)
+ ret void
+}
+
+define void @test_vst1q_s64_x4(i64* %a, [4 x <2 x i64>] %b) {
+; CHECK-LABEL: test_vst1q_s64_x4
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
+; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <2 x i64>] %b, 0
+ %2 = extractvalue [4 x <2 x i64>] %b, 1
+ %3 = extractvalue [4 x <2 x i64>] %b, 2
+ %4 = extractvalue [4 x <2 x i64>] %b, 3
+ %5 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v2i64(i8* %5, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, <2 x i64> %4, i32 8)
+ ret void
+}
+
+define void @test_vst1q_f32_x4(float* %a, [4 x <4 x float>] %b) {
+; CHECK-LABEL: test_vst1q_f32_x4
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
+; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <4 x float>] %b, 0
+ %2 = extractvalue [4 x <4 x float>] %b, 1
+ %3 = extractvalue [4 x <4 x float>] %b, 2
+ %4 = extractvalue [4 x <4 x float>] %b, 3
+ %5 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
+ ret void
+}
+
+define void @test_vst1q_f64_x4(double* %a, [4 x <2 x double>] %b) {
+; CHECK-LABEL: test_vst1q_f64_x4
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
+; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <2 x double>] %b, 0
+ %2 = extractvalue [4 x <2 x double>] %b, 1
+ %3 = extractvalue [4 x <2 x double>] %b, 2
+ %4 = extractvalue [4 x <2 x double>] %b, 3
+ %5 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
+ ret void
+}
+
+define void @test_vst1_s8_x4(i8* %a, [4 x <8 x i8>] %b) {
+; CHECK-LABEL: test_vst1_s8_x4
+; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
+; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <8 x i8>] %b, 0
+ %2 = extractvalue [4 x <8 x i8>] %b, 1
+ %3 = extractvalue [4 x <8 x i8>] %b, 2
+ %4 = extractvalue [4 x <8 x i8>] %b, 3
+ tail call void @llvm.aarch64.neon.vst1x4.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, <8 x i8> %3, <8 x i8> %4, i32 1)
+ ret void
+}
+
+define void @test_vst1_s16_x4(i16* %a, [4 x <4 x i16>] %b) {
+; CHECK-LABEL: test_vst1_s16_x4
+; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
+; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <4 x i16>] %b, 0
+ %2 = extractvalue [4 x <4 x i16>] %b, 1
+ %3 = extractvalue [4 x <4 x i16>] %b, 2
+ %4 = extractvalue [4 x <4 x i16>] %b, 3
+ %5 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v4i16(i8* %5, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, <4 x i16> %4, i32 2)
+ ret void
+}
+
+define void @test_vst1_s32_x4(i32* %a, [4 x <2 x i32>] %b) {
+; CHECK-LABEL: test_vst1_s32_x4
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
+; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <2 x i32>] %b, 0
+ %2 = extractvalue [4 x <2 x i32>] %b, 1
+ %3 = extractvalue [4 x <2 x i32>] %b, 2
+ %4 = extractvalue [4 x <2 x i32>] %b, 3
+ %5 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v2i32(i8* %5, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, <2 x i32> %4, i32 4)
+ ret void
+}
+
+define void @test_vst1_s64_x4(i64* %a, [4 x <1 x i64>] %b) {
+; CHECK-LABEL: test_vst1_s64_x4
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
+; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <1 x i64>] %b, 0
+ %2 = extractvalue [4 x <1 x i64>] %b, 1
+ %3 = extractvalue [4 x <1 x i64>] %b, 2
+ %4 = extractvalue [4 x <1 x i64>] %b, 3
+ %5 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v1i64(i8* %5, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, <1 x i64> %4, i32 8)
+ ret void
+}
+
+define void @test_vst1_f32_x4(float* %a, [4 x <2 x float>] %b) {
+; CHECK-LABEL: test_vst1_f32_x4
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
+; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <2 x float>] %b, 0
+ %2 = extractvalue [4 x <2 x float>] %b, 1
+ %3 = extractvalue [4 x <2 x float>] %b, 2
+ %4 = extractvalue [4 x <2 x float>] %b, 3
+ %5 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v2f32(i8* %5, <2 x float> %1, <2 x float> %2, <2 x float> %3, <2 x float> %4, i32 4)
+ ret void
+}
+
+define void @test_vst1_f64_x4(double* %a, [4 x <1 x double>] %b) {
+; CHECK-LABEL: test_vst1_f64_x4
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
+; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
+ %1 = extractvalue [4 x <1 x double>] %b, 0
+ %2 = extractvalue [4 x <1 x double>] %b, 1
+ %3 = extractvalue [4 x <1 x double>] %b, 2
+ %4 = extractvalue [4 x <1 x double>] %b, 3
+ %5 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v1f64(i8* %5, <1 x double> %1, <1 x double> %2, <1 x double> %3, <1 x double> %4, i32 8)
+ ret void
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32)
+declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8*, i32)
+declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8*, i32)
+declare { <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x2.v4f32(i8*, i32)
+declare { <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x2.v2f64(i8*, i32)
+declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8*, i32)
+declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8*, i32)
+declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8*, i32)
+declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x2.v2f32(i8*, i32)
+declare { <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x2.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x3.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x3.v4i32(i8*, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x3.v4f32(i8*, i32)
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x3.v2f64(i8*, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x3.v8i8(i8*, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x3.v4i16(i8*, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x3.v2i32(i8*, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x3.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x3.v2f32(i8*, i32)
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x3.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x4.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x4.v8i16(i8*, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x4.v4i32(i8*, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x4.v2i64(i8*, i32)
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.vld1x4.v2f64(i8*, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x4.v4i16(i8*, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x4.v2i32(i8*, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x4.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.vld1x4.v2f32(i8*, i32)
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.vld1x4.v1f64(i8*, i32)
+declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v4i32(i8*, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v2i64(i8*, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v4f32(i8*, <4 x float>, <4 x float>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v2f64(i8*, <2 x double>, <2 x double>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v4i16(i8*, <4 x i16>, <4 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v2i32(i8*, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v2f32(i8*, <2 x float>, <2 x float>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v1f64(i8*, <1 x double>, <1 x double>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32)
diff --git a/test/CodeGen/AArch64/neon-simd-ldst-one.ll b/test/CodeGen/AArch64/neon-simd-ldst-one.ll
new file mode 100644
index 000000000000..3f28320f23d5
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-ldst-one.ll
@@ -0,0 +1,2113 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+%struct.int8x16x2_t = type { [2 x <16 x i8>] }
+%struct.int16x8x2_t = type { [2 x <8 x i16>] }
+%struct.int32x4x2_t = type { [2 x <4 x i32>] }
+%struct.int64x2x2_t = type { [2 x <2 x i64>] }
+%struct.float32x4x2_t = type { [2 x <4 x float>] }
+%struct.float64x2x2_t = type { [2 x <2 x double>] }
+%struct.int8x8x2_t = type { [2 x <8 x i8>] }
+%struct.int16x4x2_t = type { [2 x <4 x i16>] }
+%struct.int32x2x2_t = type { [2 x <2 x i32>] }
+%struct.int64x1x2_t = type { [2 x <1 x i64>] }
+%struct.float32x2x2_t = type { [2 x <2 x float>] }
+%struct.float64x1x2_t = type { [2 x <1 x double>] }
+%struct.int8x16x3_t = type { [3 x <16 x i8>] }
+%struct.int16x8x3_t = type { [3 x <8 x i16>] }
+%struct.int32x4x3_t = type { [3 x <4 x i32>] }
+%struct.int64x2x3_t = type { [3 x <2 x i64>] }
+%struct.float32x4x3_t = type { [3 x <4 x float>] }
+%struct.float64x2x3_t = type { [3 x <2 x double>] }
+%struct.int8x8x3_t = type { [3 x <8 x i8>] }
+%struct.int16x4x3_t = type { [3 x <4 x i16>] }
+%struct.int32x2x3_t = type { [3 x <2 x i32>] }
+%struct.int64x1x3_t = type { [3 x <1 x i64>] }
+%struct.float32x2x3_t = type { [3 x <2 x float>] }
+%struct.float64x1x3_t = type { [3 x <1 x double>] }
+%struct.int8x16x4_t = type { [4 x <16 x i8>] }
+%struct.int16x8x4_t = type { [4 x <8 x i16>] }
+%struct.int32x4x4_t = type { [4 x <4 x i32>] }
+%struct.int64x2x4_t = type { [4 x <2 x i64>] }
+%struct.float32x4x4_t = type { [4 x <4 x float>] }
+%struct.float64x2x4_t = type { [4 x <2 x double>] }
+%struct.int8x8x4_t = type { [4 x <8 x i8>] }
+%struct.int16x4x4_t = type { [4 x <4 x i16>] }
+%struct.int32x2x4_t = type { [4 x <2 x i32>] }
+%struct.int64x1x4_t = type { [4 x <1 x i64>] }
+%struct.float32x2x4_t = type { [4 x <2 x float>] }
+%struct.float64x1x4_t = type { [4 x <1 x double>] }
+
+define <16 x i8> @test_vld1q_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld1q_dup_s8
+; CHECK: ld1r {{{v[0-9]+}}.16b}, [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %1 = insertelement <16 x i8> undef, i8 %0, i32 0
+ %lane = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
+ ret <16 x i8> %lane
+}
+
+define <8 x i16> @test_vld1q_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld1q_dup_s16
+; CHECK: ld1r {{{v[0-9]+}}.8h}, [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %1 = insertelement <8 x i16> undef, i16 %0, i32 0
+ %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
+ ret <8 x i16> %lane
+}
+
+define <4 x i32> @test_vld1q_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld1q_dup_s32
+; CHECK: ld1r {{{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %1 = insertelement <4 x i32> undef, i32 %0, i32 0
+ %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
+ ret <4 x i32> %lane
+}
+
+define <2 x i64> @test_vld1q_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld1q_dup_s64
+; CHECK: ld1r {{{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %1 = insertelement <2 x i64> undef, i64 %0, i32 0
+ %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
+ ret <2 x i64> %lane
+}
+
+define <4 x float> @test_vld1q_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld1q_dup_f32
+; CHECK: ld1r {{{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = load float* %a, align 4
+ %1 = insertelement <4 x float> undef, float %0, i32 0
+ %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
+ ret <4 x float> %lane
+}
+
+define <2 x double> @test_vld1q_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld1q_dup_f64
+; CHECK: ld1r {{{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = load double* %a, align 8
+ %1 = insertelement <2 x double> undef, double %0, i32 0
+ %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
+ ret <2 x double> %lane
+}
+
+define <8 x i8> @test_vld1_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld1_dup_s8
+; CHECK: ld1r {{{v[0-9]+}}.8b}, [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %1 = insertelement <8 x i8> undef, i8 %0, i32 0
+ %lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
+ ret <8 x i8> %lane
+}
+
+define <4 x i16> @test_vld1_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld1_dup_s16
+; CHECK: ld1r {{{v[0-9]+}}.4h}, [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %1 = insertelement <4 x i16> undef, i16 %0, i32 0
+ %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
+ ret <4 x i16> %lane
+}
+
+define <2 x i32> @test_vld1_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld1_dup_s32
+; CHECK: ld1r {{{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %1 = insertelement <2 x i32> undef, i32 %0, i32 0
+ %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
+ ret <2 x i32> %lane
+}
+
+define <1 x i64> @test_vld1_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld1_dup_s64
+; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %1 = insertelement <1 x i64> undef, i64 %0, i32 0
+ ret <1 x i64> %1
+}
+
+define <2 x float> @test_vld1_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld1_dup_f32
+; CHECK: ld1r {{{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = load float* %a, align 4
+ %1 = insertelement <2 x float> undef, float %0, i32 0
+ %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
+ ret <2 x float> %lane
+}
+
+define <1 x double> @test_vld1_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld1_dup_f64
+; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = load double* %a, align 8
+ %1 = insertelement <1 x double> undef, double %0, i32 0
+ ret <1 x double> %1
+}
+
+define %struct.int8x16x2_t @test_vld2q_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld2q_dup_s8
+; CHECK: ld2r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]
+entry:
+ %vld_dup = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
+ %0 = extractvalue { <16 x i8>, <16 x i8> } %vld_dup, 0
+ %lane = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
+ %1 = extractvalue { <16 x i8>, <16 x i8> } %vld_dup, 1
+ %lane1 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %lane1, 0, 1
+ ret %struct.int8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x8x2_t @test_vld2q_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld2q_dup_s16
+; CHECK: ld2r {{{v[0-9]+}}.8h, {{v[0-9]+}}.8h}, [x0]
+entry:
+ %0 = bitcast i16* %a to i8*
+ %vld_dup = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2lane.v8i16(i8* %0, <8 x i16> undef, <8 x i16> undef, i32 0, i32 2)
+ %1 = extractvalue { <8 x i16>, <8 x i16> } %vld_dup, 0
+ %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
+ %2 = extractvalue { <8 x i16>, <8 x i16> } %vld_dup, 1
+ %lane1 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %lane1, 0, 1
+ ret %struct.int16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x4x2_t @test_vld2q_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld2q_dup_s32
+; CHECK: ld2r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = bitcast i32* %a to i8*
+ %vld_dup = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %0, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
+ %1 = extractvalue { <4 x i32>, <4 x i32> } %vld_dup, 0
+ %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x i32>, <4 x i32> } %vld_dup, 1
+ %lane1 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %lane1, 0, 1
+ ret %struct.int32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int64x2x2_t @test_vld2q_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld2q_dup_s64
+; CHECK: ld2r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = bitcast i64* %a to i8*
+ %vld_dup = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2lane.v2i64(i8* %0, <2 x i64> undef, <2 x i64> undef, i32 0, i32 8)
+ %1 = extractvalue { <2 x i64>, <2 x i64> } %vld_dup, 0
+ %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x i64>, <2 x i64> } %vld_dup, 1
+ %lane1 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %lane1, 0, 1
+ ret %struct.int64x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x4x2_t @test_vld2q_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld2q_dup_f32
+; CHECK: ld2r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = bitcast float* %a to i8*
+ %vld_dup = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2lane.v4f32(i8* %0, <4 x float> undef, <4 x float> undef, i32 0, i32 4)
+ %1 = extractvalue { <4 x float>, <4 x float> } %vld_dup, 0
+ %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x float>, <4 x float> } %vld_dup, 1
+ %lane1 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %lane1, 0, 1
+ ret %struct.float32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.float64x2x2_t @test_vld2q_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld2q_dup_f64
+; CHECK: ld2r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = bitcast double* %a to i8*
+ %vld_dup = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2lane.v2f64(i8* %0, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
+ %1 = extractvalue { <2 x double>, <2 x double> } %vld_dup, 0
+ %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x double>, <2 x double> } %vld_dup, 1
+ %lane1 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %lane1, 0, 1
+ ret %struct.float64x2x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x8x2_t @test_vld2_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld2_dup_s8
+; CHECK: ld2r {{{v[0-9]+}}.8b, {{v[0-9]+}}.8b}, [x0]
+entry:
+ %vld_dup = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
+ %0 = extractvalue { <8 x i8>, <8 x i8> } %vld_dup, 0
+ %lane = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
+ %1 = extractvalue { <8 x i8>, <8 x i8> } %vld_dup, 1
+ %lane1 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %lane1, 0, 1
+ ret %struct.int8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x4x2_t @test_vld2_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld2_dup_s16
+; CHECK: ld2r {{{v[0-9]+}}.4h, {{v[0-9]+}}.4h}, [x0]
+entry:
+ %0 = bitcast i16* %a to i8*
+ %vld_dup = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2lane.v4i16(i8* %0, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+ %1 = extractvalue { <4 x i16>, <4 x i16> } %vld_dup, 0
+ %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x i16>, <4 x i16> } %vld_dup, 1
+ %lane1 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %lane1, 0, 1
+ ret %struct.int16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x2x2_t @test_vld2_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld2_dup_s32
+; CHECK: ld2r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = bitcast i32* %a to i8*
+ %vld_dup = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2lane.v2i32(i8* %0, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
+ %1 = extractvalue { <2 x i32>, <2 x i32> } %vld_dup, 0
+ %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x i32>, <2 x i32> } %vld_dup, 1
+ %lane1 = shufflevector <2 x i32> %2, <2 x i32> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %lane1, 0, 1
+ ret %struct.int32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.int64x1x2_t @test_vld2_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld2_dup_s64
+; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = bitcast i64* %a to i8*
+ %vld_dup = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8* %0, i32 8)
+ %vld_dup.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld_dup, 0
+ %vld_dup.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld_dup, 1
+ %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld_dup.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld_dup.fca.1.extract, 0, 1
+ ret %struct.int64x1x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x2x2_t @test_vld2_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld2_dup_f32
+; CHECK: ld2r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = bitcast float* %a to i8*
+ %vld_dup = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2lane.v2f32(i8* %0, <2 x float> undef, <2 x float> undef, i32 0, i32 4)
+ %1 = extractvalue { <2 x float>, <2 x float> } %vld_dup, 0
+ %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x float>, <2 x float> } %vld_dup, 1
+ %lane1 = shufflevector <2 x float> %2, <2 x float> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %lane1, 0, 1
+ ret %struct.float32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float64x1x2_t @test_vld2_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld2_dup_f64
+; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = bitcast double* %a to i8*
+ %vld_dup = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8* %0, i32 8)
+ %vld_dup.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld_dup, 0
+ %vld_dup.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld_dup, 1
+ %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld_dup.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld_dup.fca.1.extract, 0, 1
+ ret %struct.float64x1x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x16x3_t @test_vld3q_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld3q_dup_s8
+; CHECK: ld3r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]
+entry:
+ %vld_dup = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
+ %0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 0
+ %lane = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
+ %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 1
+ %lane1 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
+ %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 2
+ %lane2 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int8x16x3_t undef, <16 x i8> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x3_t %.fca.0.0.insert, <16 x i8> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x16x3_t %.fca.0.1.insert, <16 x i8> %lane2, 0, 2
+ ret %struct.int8x16x3_t %.fca.0.2.insert
+}
+
+define %struct.int16x8x3_t @test_vld3q_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld3q_dup_s16
+; CHECK: ld3r {{{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h}, [x0]
+entry:
+ %0 = bitcast i16* %a to i8*
+ %vld_dup = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* %0, <8 x i16> undef, <8 x i16> undef, <8 x i16> undef, i32 0, i32 2)
+ %1 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 0
+ %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
+ %2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 1
+ %lane1 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
+ %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 2
+ %lane2 = shufflevector <8 x i16> %3, <8 x i16> undef, <8 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %lane2, 0, 2
+ ret %struct.int16x8x3_t %.fca.0.2.insert
+}
+
+define %struct.int32x4x3_t @test_vld3q_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld3q_dup_s32
+; CHECK: ld3r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = bitcast i32* %a to i8*
+ %vld_dup = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8* %0, <4 x i32> undef, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
+ %1 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 0
+ %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 1
+ %lane1 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
+ %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 2
+ %lane2 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %lane2, 0, 2
+ ret %struct.int32x4x3_t %.fca.0.2.insert
+}
+
+define %struct.int64x2x3_t @test_vld3q_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld3q_dup_s64
+; CHECK: ld3r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = bitcast i64* %a to i8*
+ %vld_dup = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3lane.v2i64(i8* %0, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, i32 0, i32 8)
+ %1 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 0
+ %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 1
+ %lane1 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 2
+ %lane2 = shufflevector <2 x i64> %3, <2 x i64> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %lane2, 0, 2
+ ret %struct.int64x2x3_t %.fca.0.2.insert
+}
+
+define %struct.float32x4x3_t @test_vld3q_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld3q_dup_f32
+; CHECK: ld3r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = bitcast float* %a to i8*
+ %vld_dup = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3lane.v4f32(i8* %0, <4 x float> undef, <4 x float> undef, <4 x float> undef, i32 0, i32 4)
+ %1 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld_dup, 0
+ %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld_dup, 1
+ %lane1 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
+ %3 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld_dup, 2
+ %lane2 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %lane2, 0, 2
+ ret %struct.float32x4x3_t %.fca.0.2.insert
+}
+
+define %struct.float64x2x3_t @test_vld3q_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld3q_dup_f64
+; CHECK: ld3r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = bitcast double* %a to i8*
+ %vld_dup = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3lane.v2f64(i8* %0, <2 x double> undef, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
+ %1 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld_dup, 0
+ %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld_dup, 1
+ %lane1 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld_dup, 2
+ %lane2 = shufflevector <2 x double> %3, <2 x double> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %lane2, 0, 2
+ ret %struct.float64x2x3_t %.fca.0.2.insert
+}
+
+define %struct.int8x8x3_t @test_vld3_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld3_dup_s8
+; CHECK: ld3r {{{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b}, [x0]
+entry:
+ %vld_dup = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
+ %0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 0
+ %lane = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
+ %1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 1
+ %lane1 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
+ %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 2
+ %lane2 = shufflevector <8 x i8> %2, <8 x i8> undef, <8 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %lane2, 0, 2
+ ret %struct.int8x8x3_t %.fca.0.2.insert
+}
+
+define %struct.int16x4x3_t @test_vld3_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld3_dup_s16
+; CHECK: ld3r {{{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h}, [x0]
+entry:
+ %0 = bitcast i16* %a to i8*
+ %vld_dup = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %0, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+ %1 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 0
+ %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 1
+ %lane1 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> zeroinitializer
+ %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 2
+ %lane2 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %lane2, 0, 2
+ ret %struct.int16x4x3_t %.fca.0.2.insert
+}
+
+define %struct.int32x2x3_t @test_vld3_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld3_dup_s32
+; CHECK: ld3r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = bitcast i32* %a to i8*
+ %vld_dup = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32(i8* %0, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
+ %1 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 0
+ %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 1
+ %lane1 = shufflevector <2 x i32> %2, <2 x i32> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 2
+ %lane2 = shufflevector <2 x i32> %3, <2 x i32> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %lane2, 0, 2
+ ret %struct.int32x2x3_t %.fca.0.2.insert
+}
+
+define %struct.int64x1x3_t @test_vld3_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld3_dup_s64
+; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = bitcast i64* %a to i8*
+ %vld_dup = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8* %0, i32 8)
+ %vld_dup.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 0
+ %vld_dup.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 1
+ %vld_dup.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 2
+ %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld_dup.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld_dup.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld_dup.fca.2.extract, 0, 2
+ ret %struct.int64x1x3_t %.fca.0.2.insert
+}
+
+define %struct.float32x2x3_t @test_vld3_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld3_dup_f32
+; CHECK: ld3r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = bitcast float* %a to i8*
+ %vld_dup = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8* %0, <2 x float> undef, <2 x float> undef, <2 x float> undef, i32 0, i32 4)
+ %1 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld_dup, 0
+ %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld_dup, 1
+ %lane1 = shufflevector <2 x float> %2, <2 x float> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld_dup, 2
+ %lane2 = shufflevector <2 x float> %3, <2 x float> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %lane2, 0, 2
+ ret %struct.float32x2x3_t %.fca.0.2.insert
+}
+
+define %struct.float64x1x3_t @test_vld3_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld3_dup_f64
+; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = bitcast double* %a to i8*
+ %vld_dup = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8* %0, i32 8)
+ %vld_dup.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld_dup, 0
+ %vld_dup.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld_dup, 1
+ %vld_dup.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld_dup, 2
+ %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld_dup.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld_dup.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld_dup.fca.2.extract, 0, 2
+ ret %struct.float64x1x3_t %.fca.0.2.insert
+}
+
+define %struct.int8x16x4_t @test_vld4q_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld4q_dup_s8
+; CHECK: ld4r {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, [x0]
+entry:
+ %vld_dup = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
+ %0 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 0
+ %lane = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
+ %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 1
+ %lane1 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
+ %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 2
+ %lane2 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
+ %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld_dup, 3
+ %lane3 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %lane3, 0, 3
+ ret %struct.int8x16x4_t %.fca.0.3.insert
+}
+
+define %struct.int16x8x4_t @test_vld4q_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld4q_dup_s16
+; CHECK: ld4r {{{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h}, [x0]
+entry:
+ %0 = bitcast i16* %a to i8*
+ %vld_dup = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4lane.v8i16(i8* %0, <8 x i16> undef, <8 x i16> undef, <8 x i16> undef, <8 x i16> undef, i32 0, i32 2)
+ %1 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 0
+ %lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
+ %2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 1
+ %lane1 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> zeroinitializer
+ %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 2
+ %lane2 = shufflevector <8 x i16> %3, <8 x i16> undef, <8 x i32> zeroinitializer
+ %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld_dup, 3
+ %lane3 = shufflevector <8 x i16> %4, <8 x i16> undef, <8 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %lane3, 0, 3
+ ret %struct.int16x8x4_t %.fca.0.3.insert
+}
+
+define %struct.int32x4x4_t @test_vld4q_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld4q_dup_s32
+; CHECK: ld4r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = bitcast i32* %a to i8*
+ %vld_dup = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8* %0, <4 x i32> undef, <4 x i32> undef, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
+ %1 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 0
+ %lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 1
+ %lane1 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
+ %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 2
+ %lane2 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer
+ %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld_dup, 3
+ %lane3 = shufflevector <4 x i32> %4, <4 x i32> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %lane3, 0, 3
+ ret %struct.int32x4x4_t %.fca.0.3.insert
+}
+
+define %struct.int64x2x4_t @test_vld4q_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld4q_dup_s64
+; CHECK: ld4r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = bitcast i64* %a to i8*
+ %vld_dup = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4lane.v2i64(i8* %0, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, <2 x i64> undef, i32 0, i32 8)
+ %1 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 0
+ %lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 1
+ %lane1 = shufflevector <2 x i64> %2, <2 x i64> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 2
+ %lane2 = shufflevector <2 x i64> %3, <2 x i64> undef, <2 x i32> zeroinitializer
+ %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld_dup, 3
+ %lane3 = shufflevector <2 x i64> %4, <2 x i64> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %lane3, 0, 3
+ ret %struct.int64x2x4_t %.fca.0.3.insert
+}
+
+define %struct.float32x4x4_t @test_vld4q_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld4q_dup_f32
+; CHECK: ld4r {{{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s}, [x0]
+entry:
+ %0 = bitcast float* %a to i8*
+ %vld_dup = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4lane.v4f32(i8* %0, <4 x float> undef, <4 x float> undef, <4 x float> undef, <4 x float> undef, i32 0, i32 4)
+ %1 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 0
+ %lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 1
+ %lane1 = shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> zeroinitializer
+ %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 2
+ %lane2 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> zeroinitializer
+ %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld_dup, 3
+ %lane3 = shufflevector <4 x float> %4, <4 x float> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %lane3, 0, 3
+ ret %struct.float32x4x4_t %.fca.0.3.insert
+}
+
+define %struct.float64x2x4_t @test_vld4q_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld4q_dup_f64
+; CHECK: ld4r {{{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d}, [x0]
+entry:
+ %0 = bitcast double* %a to i8*
+ %vld_dup = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %0, <2 x double> undef, <2 x double> undef, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
+ %1 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 0
+ %lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 1
+ %lane1 = shufflevector <2 x double> %2, <2 x double> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 2
+ %lane2 = shufflevector <2 x double> %3, <2 x double> undef, <2 x i32> zeroinitializer
+ %4 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld_dup, 3
+ %lane3 = shufflevector <2 x double> %4, <2 x double> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %lane3, 0, 3
+ ret %struct.float64x2x4_t %.fca.0.3.insert
+}
+
+define %struct.int8x8x4_t @test_vld4_dup_s8(i8* %a) {
+; CHECK-LABEL: test_vld4_dup_s8
+; CHECK: ld4r {{{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b}, [x0]
+entry:
+ %vld_dup = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
+ %0 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 0
+ %lane = shufflevector <8 x i8> %0, <8 x i8> undef, <8 x i32> zeroinitializer
+ %1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 1
+ %lane1 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
+ %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 2
+ %lane2 = shufflevector <8 x i8> %2, <8 x i8> undef, <8 x i32> zeroinitializer
+ %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld_dup, 3
+ %lane3 = shufflevector <8 x i8> %3, <8 x i8> undef, <8 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %lane3, 0, 3
+ ret %struct.int8x8x4_t %.fca.0.3.insert
+}
+
+define %struct.int16x4x4_t @test_vld4_dup_s16(i16* %a) {
+; CHECK-LABEL: test_vld4_dup_s16
+; CHECK: ld4r {{{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h}, [x0]
+entry:
+ %0 = bitcast i16* %a to i8*
+ %vld_dup = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4lane.v4i16(i8* %0, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+ %1 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 0
+ %lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
+ %2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 1
+ %lane1 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> zeroinitializer
+ %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 2
+ %lane2 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld_dup, 3
+ %lane3 = shufflevector <4 x i16> %4, <4 x i16> undef, <4 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %lane3, 0, 3
+ ret %struct.int16x4x4_t %.fca.0.3.insert
+}
+
+define %struct.int32x2x4_t @test_vld4_dup_s32(i32* %a) {
+; CHECK-LABEL: test_vld4_dup_s32
+; CHECK: ld4r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = bitcast i32* %a to i8*
+ %vld_dup = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %0, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
+ %1 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 0
+ %lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 1
+ %lane1 = shufflevector <2 x i32> %2, <2 x i32> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 2
+ %lane2 = shufflevector <2 x i32> %3, <2 x i32> undef, <2 x i32> zeroinitializer
+ %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld_dup, 3
+ %lane3 = shufflevector <2 x i32> %4, <2 x i32> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %lane3, 0, 3
+ ret %struct.int32x2x4_t %.fca.0.3.insert
+}
+
+define %struct.int64x1x4_t @test_vld4_dup_s64(i64* %a) {
+; CHECK-LABEL: test_vld4_dup_s64
+; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = bitcast i64* %a to i8*
+ %vld_dup = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8* %0, i32 8)
+ %vld_dup.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 0
+ %vld_dup.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 1
+ %vld_dup.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 2
+ %vld_dup.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld_dup, 3
+ %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld_dup.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld_dup.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld_dup.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld_dup.fca.3.extract, 0, 3
+ ret %struct.int64x1x4_t %.fca.0.3.insert
+}
+
+define %struct.float32x2x4_t @test_vld4_dup_f32(float* %a) {
+; CHECK-LABEL: test_vld4_dup_f32
+; CHECK: ld4r {{{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s}, [x0]
+entry:
+ %0 = bitcast float* %a to i8*
+ %vld_dup = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4lane.v2f32(i8* %0, <2 x float> undef, <2 x float> undef, <2 x float> undef, <2 x float> undef, i32 0, i32 4)
+ %1 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 0
+ %lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
+ %2 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 1
+ %lane1 = shufflevector <2 x float> %2, <2 x float> undef, <2 x i32> zeroinitializer
+ %3 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 2
+ %lane2 = shufflevector <2 x float> %3, <2 x float> undef, <2 x i32> zeroinitializer
+ %4 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld_dup, 3
+ %lane3 = shufflevector <2 x float> %4, <2 x float> undef, <2 x i32> zeroinitializer
+ %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %lane, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %lane1, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %lane2, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %lane3, 0, 3
+ ret %struct.float32x2x4_t %.fca.0.3.insert
+}
+
+define %struct.float64x1x4_t @test_vld4_dup_f64(double* %a) {
+; CHECK-LABEL: test_vld4_dup_f64
+; CHECK: ld1 {{{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d, {{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = bitcast double* %a to i8*
+ %vld_dup = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8* %0, i32 8)
+ %vld_dup.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 0
+ %vld_dup.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 1
+ %vld_dup.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 2
+ %vld_dup.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld_dup, 3
+ %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld_dup.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld_dup.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld_dup.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld_dup.fca.3.extract, 0, 3
+ ret %struct.float64x1x4_t %.fca.0.3.insert
+}
+
+define <16 x i8> @test_vld1q_lane_s8(i8* %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vld1q_lane_s8
+; CHECK: ld1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %vld1_lane = insertelement <16 x i8> %b, i8 %0, i32 15
+ ret <16 x i8> %vld1_lane
+}
+
+define <8 x i16> @test_vld1q_lane_s16(i16* %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vld1q_lane_s16
+; CHECK: ld1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %vld1_lane = insertelement <8 x i16> %b, i16 %0, i32 7
+ ret <8 x i16> %vld1_lane
+}
+
+define <4 x i32> @test_vld1q_lane_s32(i32* %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vld1q_lane_s32
+; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %vld1_lane = insertelement <4 x i32> %b, i32 %0, i32 3
+ ret <4 x i32> %vld1_lane
+}
+
+define <2 x i64> @test_vld1q_lane_s64(i64* %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vld1q_lane_s64
+; CHECK: ld1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %vld1_lane = insertelement <2 x i64> %b, i64 %0, i32 1
+ ret <2 x i64> %vld1_lane
+}
+
+define <4 x float> @test_vld1q_lane_f32(float* %a, <4 x float> %b) {
+; CHECK-LABEL: test_vld1q_lane_f32
+; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load float* %a, align 4
+ %vld1_lane = insertelement <4 x float> %b, float %0, i32 3
+ ret <4 x float> %vld1_lane
+}
+
+define <2 x double> @test_vld1q_lane_f64(double* %a, <2 x double> %b) {
+; CHECK-LABEL: test_vld1q_lane_f64
+; CHECK: ld1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load double* %a, align 8
+ %vld1_lane = insertelement <2 x double> %b, double %0, i32 1
+ ret <2 x double> %vld1_lane
+}
+
+define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vld1_lane_s8
+; CHECK: ld1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i8* %a, align 1
+ %vld1_lane = insertelement <8 x i8> %b, i8 %0, i32 7
+ ret <8 x i8> %vld1_lane
+}
+
+define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vld1_lane_s16
+; CHECK: ld1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i16* %a, align 2
+ %vld1_lane = insertelement <4 x i16> %b, i16 %0, i32 3
+ ret <4 x i16> %vld1_lane
+}
+
+define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vld1_lane_s32
+; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load i32* %a, align 4
+ %vld1_lane = insertelement <2 x i32> %b, i32 %0, i32 1
+ ret <2 x i32> %vld1_lane
+}
+
+define <1 x i64> @test_vld1_lane_s64(i64* %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vld1_lane_s64
+; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = load i64* %a, align 8
+ %vld1_lane = insertelement <1 x i64> undef, i64 %0, i32 0
+ ret <1 x i64> %vld1_lane
+}
+
+define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) {
+; CHECK-LABEL: test_vld1_lane_f32
+; CHECK: ld1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = load float* %a, align 4
+ %vld1_lane = insertelement <2 x float> %b, float %0, i32 1
+ ret <2 x float> %vld1_lane
+}
+
+define <1 x double> @test_vld1_lane_f64(double* %a, <1 x double> %b) {
+; CHECK-LABEL: test_vld1_lane_f64
+; CHECK: ld1r {{{v[0-9]+}}.1d}, [x0]
+entry:
+ %0 = load double* %a, align 8
+ %vld1_lane = insertelement <1 x double> undef, double %0, i32 0
+ ret <1 x double> %vld1_lane
+}
+
+define %struct.int16x8x2_t @test_vld2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vld2q_lane_s16
+; CHECK: ld2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
+ %0 = bitcast i16* %a to i8*
+ %vld2_lane = tail call { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 7, i32 2)
+ %vld2_lane.fca.0.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <8 x i16>, <8 x i16> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int16x8x2_t undef, <8 x i16> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x2_t %.fca.0.0.insert, <8 x i16> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int16x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x4x2_t @test_vld2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vld2q_lane_s32
+; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
+ %0 = bitcast i32* %a to i8*
+ %vld2_lane = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 3, i32 4)
+ %vld2_lane.fca.0.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <4 x i32>, <4 x i32> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int32x4x2_t undef, <4 x i32> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x2_t %.fca.0.0.insert, <4 x i32> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int64x2x2_t @test_vld2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vld2q_lane_s64
+; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
+ %0 = bitcast i64* %a to i8*
+ %vld2_lane = tail call { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 1, i32 8)
+ %vld2_lane.fca.0.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <2 x i64>, <2 x i64> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int64x2x2_t undef, <2 x i64> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x2_t %.fca.0.0.insert, <2 x i64> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int64x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x4x2_t @test_vld2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vld2q_lane_f32
+; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
+ %0 = bitcast float* %a to i8*
+ %vld2_lane = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 3, i32 4)
+ %vld2_lane.fca.0.extract = extractvalue { <4 x float>, <4 x float> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <4 x float>, <4 x float> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.float32x4x2_t undef, <4 x float> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x2_t %.fca.0.0.insert, <4 x float> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.float32x4x2_t %.fca.0.1.insert
+}
+
+define %struct.float64x2x2_t @test_vld2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vld2q_lane_f64
+; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1
+ %0 = bitcast double* %a to i8*
+ %vld2_lane = tail call { <2 x double>, <2 x double> } @llvm.arm.neon.vld2lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 1, i32 8)
+ %vld2_lane.fca.0.extract = extractvalue { <2 x double>, <2 x double> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <2 x double>, <2 x double> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.float64x2x2_t undef, <2 x double> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x2_t %.fca.0.0.insert, <2 x double> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.float64x2x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x8x2_t @test_vld2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vld2_lane_s8
+; CHECK: ld2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
+ %vld2_lane = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 7, i32 1)
+ %vld2_lane.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int8x8x2_t undef, <8 x i8> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x2_t %.fca.0.0.insert, <8 x i8> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int8x8x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x4x2_t @test_vld2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vld2_lane_s16
+; CHECK: ld2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
+ %0 = bitcast i16* %a to i8*
+ %vld2_lane = tail call { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 3, i32 2)
+ %vld2_lane.fca.0.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <4 x i16>, <4 x i16> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int16x4x2_t undef, <4 x i16> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x2_t %.fca.0.0.insert, <4 x i16> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int16x4x2_t %.fca.0.1.insert
+}
+
+define %struct.int32x2x2_t @test_vld2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vld2_lane_s32
+; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
+ %0 = bitcast i32* %a to i8*
+ %vld2_lane = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 1, i32 4)
+ %vld2_lane.fca.0.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <2 x i32>, <2 x i32> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int32x2x2_t undef, <2 x i32> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x2_t %.fca.0.0.insert, <2 x i32> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.int64x1x2_t @test_vld2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vld2_lane_s64
+; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
+ %0 = bitcast i64* %a to i8*
+ %vld2_lane = tail call { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 0, i32 8)
+ %vld2_lane.fca.0.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <1 x i64>, <1 x i64> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.int64x1x2_t undef, <1 x i64> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x2_t %.fca.0.0.insert, <1 x i64> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.int64x1x2_t %.fca.0.1.insert
+}
+
+define %struct.float32x2x2_t @test_vld2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vld2_lane_f32
+; CHECK: ld2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
+ %0 = bitcast float* %a to i8*
+ %vld2_lane = tail call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 1, i32 4)
+ %vld2_lane.fca.0.extract = extractvalue { <2 x float>, <2 x float> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <2 x float>, <2 x float> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.float32x2x2_t undef, <2 x float> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x2_t %.fca.0.0.insert, <2 x float> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.float32x2x2_t %.fca.0.1.insert
+}
+
+define %struct.float64x1x2_t @test_vld2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vld2_lane_f64
+; CHECK: ld2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1
+ %0 = bitcast double* %a to i8*
+ %vld2_lane = tail call { <1 x double>, <1 x double> } @llvm.arm.neon.vld2lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 0, i32 8)
+ %vld2_lane.fca.0.extract = extractvalue { <1 x double>, <1 x double> } %vld2_lane, 0
+ %vld2_lane.fca.1.extract = extractvalue { <1 x double>, <1 x double> } %vld2_lane, 1
+ %.fca.0.0.insert = insertvalue %struct.float64x1x2_t undef, <1 x double> %vld2_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x2_t %.fca.0.0.insert, <1 x double> %vld2_lane.fca.1.extract, 0, 1
+ ret %struct.float64x1x2_t %.fca.0.1.insert
+}
+
+define %struct.int16x8x3_t @test_vld3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vld3q_lane_s16
+; CHECK: ld3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2
+ %0 = bitcast i16* %a to i8*
+ %vld3_lane = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 7, i32 2)
+ %vld3_lane.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int16x8x3_t undef, <8 x i16> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x3_t %.fca.0.0.insert, <8 x i16> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x8x3_t %.fca.0.1.insert, <8 x i16> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int16x8x3_t %.fca.0.2.insert
+}
+
+define %struct.int32x4x3_t @test_vld3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vld3q_lane_s32
+; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2
+ %0 = bitcast i32* %a to i8*
+ %vld3_lane = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 3, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int32x4x3_t undef, <4 x i32> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x3_t %.fca.0.0.insert, <4 x i32> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x4x3_t %.fca.0.1.insert, <4 x i32> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int32x4x3_t %.fca.0.2.insert
+}
+
+define %struct.int64x2x3_t @test_vld3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vld3q_lane_s64
+; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2
+ %0 = bitcast i64* %a to i8*
+ %vld3_lane = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 1, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int64x2x3_t undef, <2 x i64> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x3_t %.fca.0.0.insert, <2 x i64> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x2x3_t %.fca.0.1.insert, <2 x i64> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int64x2x3_t %.fca.0.2.insert
+}
+
+define %struct.float32x4x3_t @test_vld3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vld3q_lane_f32
+; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2
+ %0 = bitcast float* %a to i8*
+ %vld3_lane = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 3, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.float32x4x3_t undef, <4 x float> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x3_t %.fca.0.0.insert, <4 x float> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x4x3_t %.fca.0.1.insert, <4 x float> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.float32x4x3_t %.fca.0.2.insert
+}
+
+define %struct.float64x2x3_t @test_vld3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vld3q_lane_f64
+; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2
+ %0 = bitcast double* %a to i8*
+ %vld3_lane = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 1, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.float64x2x3_t undef, <2 x double> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x3_t %.fca.0.0.insert, <2 x double> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x2x3_t %.fca.0.1.insert, <2 x double> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.float64x2x3_t %.fca.0.2.insert
+}
+
+define %struct.int8x8x3_t @test_vld3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vld3_lane_s8
+; CHECK: ld3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2
+ %vld3_lane = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 7, i32 1)
+ %vld3_lane.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int8x8x3_t undef, <8 x i8> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x3_t %.fca.0.0.insert, <8 x i8> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x8x3_t %.fca.0.1.insert, <8 x i8> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int8x8x3_t %.fca.0.2.insert
+}
+
+define %struct.int16x4x3_t @test_vld3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vld3_lane_s16
+; CHECK: ld3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2
+ %0 = bitcast i16* %a to i8*
+ %vld3_lane = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 3, i32 2)
+ %vld3_lane.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int16x4x3_t undef, <4 x i16> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x3_t %.fca.0.0.insert, <4 x i16> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x4x3_t %.fca.0.1.insert, <4 x i16> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int16x4x3_t %.fca.0.2.insert
+}
+
+define %struct.int32x2x3_t @test_vld3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vld3_lane_s32
+; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2
+ %0 = bitcast i32* %a to i8*
+ %vld3_lane = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 1, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int32x2x3_t undef, <2 x i32> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x3_t %.fca.0.0.insert, <2 x i32> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x2x3_t %.fca.0.1.insert, <2 x i32> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int32x2x3_t %.fca.0.2.insert
+}
+
+define %struct.int64x1x3_t @test_vld3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vld3_lane_s64
+; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2
+ %0 = bitcast i64* %a to i8*
+ %vld3_lane = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 0, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.int64x1x3_t undef, <1 x i64> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x3_t %.fca.0.0.insert, <1 x i64> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x1x3_t %.fca.0.1.insert, <1 x i64> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.int64x1x3_t %.fca.0.2.insert
+}
+
+define %struct.float32x2x3_t @test_vld3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vld3_lane_f32
+; CHECK: ld3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2
+ %0 = bitcast float* %a to i8*
+ %vld3_lane = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 1, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.float32x2x3_t undef, <2 x float> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x3_t %.fca.0.0.insert, <2 x float> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x2x3_t %.fca.0.1.insert, <2 x float> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.float32x2x3_t %.fca.0.2.insert
+}
+
+define %struct.float64x1x3_t @test_vld3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vld3_lane_f64
+; CHECK: ld3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2
+ %0 = bitcast double* %a to i8*
+ %vld3_lane = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 0, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 2
+ %.fca.0.0.insert = insertvalue %struct.float64x1x3_t undef, <1 x double> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x3_t %.fca.0.0.insert, <1 x double> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x1x3_t %.fca.0.1.insert, <1 x double> %vld3_lane.fca.2.extract, 0, 2
+ ret %struct.float64x1x3_t %.fca.0.2.insert
+}
+
+define %struct.int8x16x4_t @test_vld4q_lane_s8(i8* %a, [4 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vld4q_lane_s8
+; CHECK: ld4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vld3_lane = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4lane.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 15, i32 1)
+ %vld3_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int8x16x4_t undef, <16 x i8> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x16x4_t %.fca.0.0.insert, <16 x i8> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x16x4_t %.fca.0.1.insert, <16 x i8> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int8x16x4_t %.fca.0.2.insert, <16 x i8> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int8x16x4_t %.fca.0.3.insert
+}
+
+define %struct.int16x8x4_t @test_vld4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vld4q_lane_s16
+; CHECK: ld4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3
+ %0 = bitcast i16* %a to i8*
+ %vld3_lane = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 7, i32 2)
+ %vld3_lane.fca.0.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int16x8x4_t undef, <8 x i16> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x8x4_t %.fca.0.0.insert, <8 x i16> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x8x4_t %.fca.0.1.insert, <8 x i16> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int16x8x4_t %.fca.0.2.insert, <8 x i16> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int16x8x4_t %.fca.0.3.insert
+}
+
+define %struct.int32x4x4_t @test_vld4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vld4q_lane_s32
+; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3
+ %0 = bitcast i32* %a to i8*
+ %vld3_lane = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 3, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int32x4x4_t undef, <4 x i32> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x4x4_t %.fca.0.0.insert, <4 x i32> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x4x4_t %.fca.0.1.insert, <4 x i32> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int32x4x4_t %.fca.0.2.insert, <4 x i32> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int32x4x4_t %.fca.0.3.insert
+}
+
+define %struct.int64x2x4_t @test_vld4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vld4q_lane_s64
+; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3
+ %0 = bitcast i64* %a to i8*
+ %vld3_lane = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 1, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int64x2x4_t undef, <2 x i64> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x2x4_t %.fca.0.0.insert, <2 x i64> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x2x4_t %.fca.0.1.insert, <2 x i64> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int64x2x4_t %.fca.0.2.insert, <2 x i64> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int64x2x4_t %.fca.0.3.insert
+}
+
+define %struct.float32x4x4_t @test_vld4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vld4q_lane_f32
+; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3
+ %0 = bitcast float* %a to i8*
+ %vld3_lane = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 3, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.float32x4x4_t undef, <4 x float> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x4x4_t %.fca.0.0.insert, <4 x float> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x4x4_t %.fca.0.1.insert, <4 x float> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float32x4x4_t %.fca.0.2.insert, <4 x float> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.float32x4x4_t %.fca.0.3.insert
+}
+
+define %struct.float64x2x4_t @test_vld4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vld4q_lane_f64
+; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3
+ %0 = bitcast double* %a to i8*
+ %vld3_lane = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 1, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.float64x2x4_t undef, <2 x double> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x2x4_t %.fca.0.0.insert, <2 x double> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x2x4_t %.fca.0.1.insert, <2 x double> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float64x2x4_t %.fca.0.2.insert, <2 x double> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.float64x2x4_t %.fca.0.3.insert
+}
+
+define %struct.int8x8x4_t @test_vld4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vld4_lane_s8
+; CHECK: ld4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3
+ %vld3_lane = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 7, i32 1)
+ %vld3_lane.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int8x8x4_t undef, <8 x i8> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int8x8x4_t %.fca.0.0.insert, <8 x i8> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int8x8x4_t %.fca.0.1.insert, <8 x i8> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int8x8x4_t %.fca.0.2.insert, <8 x i8> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int8x8x4_t %.fca.0.3.insert
+}
+
+define %struct.int16x4x4_t @test_vld4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vld4_lane_s16
+; CHECK: ld4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3
+ %0 = bitcast i16* %a to i8*
+ %vld3_lane = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 3, i32 2)
+ %vld3_lane.fca.0.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int16x4x4_t undef, <4 x i16> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int16x4x4_t %.fca.0.0.insert, <4 x i16> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int16x4x4_t %.fca.0.1.insert, <4 x i16> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int16x4x4_t %.fca.0.2.insert, <4 x i16> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int16x4x4_t %.fca.0.3.insert
+}
+
+define %struct.int32x2x4_t @test_vld4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vld4_lane_s32
+; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3
+ %0 = bitcast i32* %a to i8*
+ %vld3_lane = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 1, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int32x2x4_t undef, <2 x i32> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int32x2x4_t %.fca.0.0.insert, <2 x i32> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int32x2x4_t %.fca.0.1.insert, <2 x i32> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int32x2x4_t %.fca.0.2.insert, <2 x i32> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int32x2x4_t %.fca.0.3.insert
+}
+
+define %struct.int64x1x4_t @test_vld4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vld4_lane_s64
+; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3
+ %0 = bitcast i64* %a to i8*
+ %vld3_lane = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 0, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.int64x1x4_t undef, <1 x i64> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.int64x1x4_t %.fca.0.0.insert, <1 x i64> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.int64x1x4_t %.fca.0.1.insert, <1 x i64> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.int64x1x4_t %.fca.0.2.insert, <1 x i64> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.int64x1x4_t %.fca.0.3.insert
+}
+
+define %struct.float32x2x4_t @test_vld4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vld4_lane_f32
+; CHECK: ld4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3
+ %0 = bitcast float* %a to i8*
+ %vld3_lane = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 1, i32 4)
+ %vld3_lane.fca.0.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.float32x2x4_t undef, <2 x float> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float32x2x4_t %.fca.0.0.insert, <2 x float> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float32x2x4_t %.fca.0.1.insert, <2 x float> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float32x2x4_t %.fca.0.2.insert, <2 x float> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.float32x2x4_t %.fca.0.3.insert
+}
+
+define %struct.float64x1x4_t @test_vld4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vld4_lane_f64
+; CHECK: ld4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3
+ %0 = bitcast double* %a to i8*
+ %vld3_lane = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 0, i32 8)
+ %vld3_lane.fca.0.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 0
+ %vld3_lane.fca.1.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 1
+ %vld3_lane.fca.2.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 2
+ %vld3_lane.fca.3.extract = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %vld3_lane, 3
+ %.fca.0.0.insert = insertvalue %struct.float64x1x4_t undef, <1 x double> %vld3_lane.fca.0.extract, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float64x1x4_t %.fca.0.0.insert, <1 x double> %vld3_lane.fca.1.extract, 0, 1
+ %.fca.0.2.insert = insertvalue %struct.float64x1x4_t %.fca.0.1.insert, <1 x double> %vld3_lane.fca.2.extract, 0, 2
+ %.fca.0.3.insert = insertvalue %struct.float64x1x4_t %.fca.0.2.insert, <1 x double> %vld3_lane.fca.3.extract, 0, 3
+ ret %struct.float64x1x4_t %.fca.0.3.insert
+}
+
+define void @test_vst1q_lane_s8(i8* %a, <16 x i8> %b) {
+; CHECK-LABEL: test_vst1q_lane_s8
+; CHECK: st1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <16 x i8> %b, i32 15
+ store i8 %0, i8* %a, align 1
+ ret void
+}
+
+define void @test_vst1q_lane_s16(i16* %a, <8 x i16> %b) {
+; CHECK-LABEL: test_vst1q_lane_s16
+; CHECK: st1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <8 x i16> %b, i32 7
+ store i16 %0, i16* %a, align 2
+ ret void
+}
+
+define void @test_vst1q_lane_s32(i32* %a, <4 x i32> %b) {
+; CHECK-LABEL: test_vst1q_lane_s32
+; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <4 x i32> %b, i32 3
+ store i32 %0, i32* %a, align 4
+ ret void
+}
+
+define void @test_vst1q_lane_s64(i64* %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vst1q_lane_s64
+; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x i64> %b, i32 1
+ store i64 %0, i64* %a, align 8
+ ret void
+}
+
+define void @test_vst1q_lane_f32(float* %a, <4 x float> %b) {
+; CHECK-LABEL: test_vst1q_lane_f32
+; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <4 x float> %b, i32 3
+ store float %0, float* %a, align 4
+ ret void
+}
+
+define void @test_vst1q_lane_f64(double* %a, <2 x double> %b) {
+; CHECK-LABEL: test_vst1q_lane_f64
+; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x double> %b, i32 1
+ store double %0, double* %a, align 8
+ ret void
+}
+
+define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) {
+; CHECK-LABEL: test_vst1_lane_s8
+; CHECK: st1 {{{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <8 x i8> %b, i32 7
+ store i8 %0, i8* %a, align 1
+ ret void
+}
+
+define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) {
+; CHECK-LABEL: test_vst1_lane_s16
+; CHECK: st1 {{{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <4 x i16> %b, i32 3
+ store i16 %0, i16* %a, align 2
+ ret void
+}
+
+define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) {
+; CHECK-LABEL: test_vst1_lane_s32
+; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x i32> %b, i32 1
+ store i32 %0, i32* %a, align 4
+ ret void
+}
+
+define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vst1_lane_s64
+; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <1 x i64> %b, i32 0
+ store i64 %0, i64* %a, align 8
+ ret void
+}
+
+define void @test_vst1_lane_f32(float* %a, <2 x float> %b) {
+; CHECK-LABEL: test_vst1_lane_f32
+; CHECK: st1 {{{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <2 x float> %b, i32 1
+ store float %0, float* %a, align 4
+ ret void
+}
+
+define void @test_vst1_lane_f64(double* %a, <1 x double> %b) {
+; CHECK-LABEL: test_vst1_lane_f64
+; CHECK: st1 {{{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %0 = extractelement <1 x double> %b, i32 0
+ store double %0, double* %a, align 8
+ ret void
+}
+
+define void @test_vst2q_lane_s8(i8* %a, [2 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_lane_s8
+; CHECK: st2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ tail call void @llvm.arm.neon.vst2lane.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, i32 15, i32 1)
+ ret void
+}
+
+define void @test_vst2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_lane_s16
+; CHECK: st2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, i32 7, i32 2)
+ ret void
+}
+
+define void @test_vst2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_lane_s32
+; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
+ %0 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, i32 3, i32 4)
+ ret void
+}
+
+define void @test_vst2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_lane_s64
+; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
+ %0 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, i32 1, i32 8)
+ ret void
+}
+
+define void @test_vst2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_lane_f32
+; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
+ %0 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, i32 3, i32 4)
+ ret void
+}
+
+define void @test_vst2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst2q_lane_f64
+; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce, 1
+ %0 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, i32 1, i32 8)
+ ret void
+}
+
+define void @test_vst2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst2_lane_s8
+; CHECK: st2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
+ tail call void @llvm.arm.neon.vst2lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, i32 7, i32 1)
+ ret void
+}
+
+define void @test_vst2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst2_lane_s16
+; CHECK: st2 {{{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, i32 3, i32 2)
+ ret void
+}
+
+define void @test_vst2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst2_lane_s32
+; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
+ %0 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, i32 1, i32 4)
+ ret void
+}
+
+define void @test_vst2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst2_lane_s64
+; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
+ %0 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, i32 0, i32 8)
+ ret void
+}
+
+define void @test_vst2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst2_lane_f32
+; CHECK: st2 {{{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
+ %0 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, i32 1, i32 4)
+ ret void
+}
+
+define void @test_vst2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst2_lane_f64
+; CHECK: st2 {{{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce, 1
+ %0 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, i32 0, i32 8)
+ ret void
+}
+
+define void @test_vst3q_lane_s8(i8* %a, [3 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_lane_s8
+; CHECK: st3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ tail call void @llvm.arm.neon.vst3lane.v16i8(i8* %a, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, i32 15, i32 1)
+ ret void
+}
+
+define void @test_vst3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_lane_s16
+; CHECK: st3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <8 x i16>] %b.coerce, 2
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, i32 7, i32 2)
+ ret void
+}
+
+define void @test_vst3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_lane_s32
+; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x i32>] %b.coerce, 2
+ %0 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, i32 3, i32 4)
+ ret void
+}
+
+define void @test_vst3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_lane_s64
+; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x i64>] %b.coerce, 2
+ %0 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, i32 1, i32 8)
+ ret void
+}
+
+define void @test_vst3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_lane_f32
+; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x float>] %b.coerce, 2
+ %0 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, i32 3, i32 4)
+ ret void
+}
+
+define void @test_vst3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst3q_lane_f64
+; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x double>] %b.coerce, 2
+ %0 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, i32 1, i32 8)
+ ret void
+}
+
+define void @test_vst3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst3_lane_s8
+; CHECK: st3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <8 x i8>] %b.coerce, 2
+ tail call void @llvm.arm.neon.vst3lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, i32 7, i32 1)
+ ret void
+}
+
+define void @test_vst3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst3_lane_s16
+; CHECK: st3 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <4 x i16>] %b.coerce, 2
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, i32 3, i32 2)
+ ret void
+}
+
+define void @test_vst3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst3_lane_s32
+; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x i32>] %b.coerce, 2
+ %0 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, i32 1, i32 4)
+ ret void
+}
+
+define void @test_vst3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst3_lane_s64
+; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <1 x i64>] %b.coerce, 2
+ %0 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, i32 0, i32 8)
+ ret void
+}
+
+define void @test_vst3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst3_lane_f32
+; CHECK: st3 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <2 x float>] %b.coerce, 2
+ %0 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, i32 1, i32 4)
+ ret void
+}
+
+define void @test_vst3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst3_lane_f64
+; CHECK: st3 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [3 x <1 x double>] %b.coerce, 2
+ %0 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, i32 0, i32 8)
+ ret void
+}
+
+define void @test_vst4q_lane_s8(i16* %a, [4 x <16 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_lane_s8
+; CHECK: st4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v16i8(i8* %0, <16 x i8> %b.coerce.fca.0.extract, <16 x i8> %b.coerce.fca.1.extract, <16 x i8> %b.coerce.fca.2.extract, <16 x i8> %b.coerce.fca.3.extract, i32 15, i32 2)
+ ret void
+}
+
+define void @test_vst4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_lane_s16
+; CHECK: st4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <8 x i16>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <8 x i16>] %b.coerce, 3
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v8i16(i8* %0, <8 x i16> %b.coerce.fca.0.extract, <8 x i16> %b.coerce.fca.1.extract, <8 x i16> %b.coerce.fca.2.extract, <8 x i16> %b.coerce.fca.3.extract, i32 7, i32 2)
+ ret void
+}
+
+define void @test_vst4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_lane_s32
+; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x i32>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x i32>] %b.coerce, 3
+ %0 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v4i32(i8* %0, <4 x i32> %b.coerce.fca.0.extract, <4 x i32> %b.coerce.fca.1.extract, <4 x i32> %b.coerce.fca.2.extract, <4 x i32> %b.coerce.fca.3.extract, i32 3, i32 4)
+ ret void
+}
+
+define void @test_vst4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_lane_s64
+; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x i64>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x i64>] %b.coerce, 3
+ %0 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v2i64(i8* %0, <2 x i64> %b.coerce.fca.0.extract, <2 x i64> %b.coerce.fca.1.extract, <2 x i64> %b.coerce.fca.2.extract, <2 x i64> %b.coerce.fca.3.extract, i32 1, i32 8)
+ ret void
+}
+
+define void @test_vst4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_lane_f32
+; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x float>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x float>] %b.coerce, 3
+ %0 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v4f32(i8* %0, <4 x float> %b.coerce.fca.0.extract, <4 x float> %b.coerce.fca.1.extract, <4 x float> %b.coerce.fca.2.extract, <4 x float> %b.coerce.fca.3.extract, i32 3, i32 4)
+ ret void
+}
+
+define void @test_vst4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst4q_lane_f64
+; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x double>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x double>] %b.coerce, 3
+ %0 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v2f64(i8* %0, <2 x double> %b.coerce.fca.0.extract, <2 x double> %b.coerce.fca.1.extract, <2 x double> %b.coerce.fca.2.extract, <2 x double> %b.coerce.fca.3.extract, i32 1, i32 8)
+ ret void
+}
+
+define void @test_vst4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
+; CHECK-LABEL: test_vst4_lane_s8
+; CHECK: st4 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <8 x i8>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <8 x i8>] %b.coerce, 3
+ tail call void @llvm.arm.neon.vst4lane.v8i8(i8* %a, <8 x i8> %b.coerce.fca.0.extract, <8 x i8> %b.coerce.fca.1.extract, <8 x i8> %b.coerce.fca.2.extract, <8 x i8> %b.coerce.fca.3.extract, i32 7, i32 1)
+ ret void
+}
+
+define void @test_vst4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
+; CHECK-LABEL: test_vst4_lane_s16
+; CHECK: st4 {{{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h, {{v[0-9]+}}.h}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <4 x i16>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <4 x i16>] %b.coerce, 3
+ %0 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v4i16(i8* %0, <4 x i16> %b.coerce.fca.0.extract, <4 x i16> %b.coerce.fca.1.extract, <4 x i16> %b.coerce.fca.2.extract, <4 x i16> %b.coerce.fca.3.extract, i32 3, i32 2)
+ ret void
+}
+
+define void @test_vst4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
+; CHECK-LABEL: test_vst4_lane_s32
+; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x i32>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x i32>] %b.coerce, 3
+ %0 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v2i32(i8* %0, <2 x i32> %b.coerce.fca.0.extract, <2 x i32> %b.coerce.fca.1.extract, <2 x i32> %b.coerce.fca.2.extract, <2 x i32> %b.coerce.fca.3.extract, i32 1, i32 4)
+ ret void
+}
+
+define void @test_vst4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
+; CHECK-LABEL: test_vst4_lane_s64
+; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <1 x i64>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <1 x i64>] %b.coerce, 3
+ %0 = bitcast i64* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v1i64(i8* %0, <1 x i64> %b.coerce.fca.0.extract, <1 x i64> %b.coerce.fca.1.extract, <1 x i64> %b.coerce.fca.2.extract, <1 x i64> %b.coerce.fca.3.extract, i32 0, i32 8)
+ ret void
+}
+
+define void @test_vst4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) {
+; CHECK-LABEL: test_vst4_lane_f32
+; CHECK: st4 {{{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s, {{v[0-9]+}}.s}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <2 x float>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <2 x float>] %b.coerce, 3
+ %0 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v2f32(i8* %0, <2 x float> %b.coerce.fca.0.extract, <2 x float> %b.coerce.fca.1.extract, <2 x float> %b.coerce.fca.2.extract, <2 x float> %b.coerce.fca.3.extract, i32 1, i32 4)
+ ret void
+}
+
+define void @test_vst4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) {
+; CHECK-LABEL: test_vst4_lane_f64
+; CHECK: st4 {{{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d, {{v[0-9]+}}.d}[{{[0-9]+}}], [x0]
+entry:
+ %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce, 0
+ %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce, 1
+ %b.coerce.fca.2.extract = extractvalue [4 x <1 x double>] %b.coerce, 2
+ %b.coerce.fca.3.extract = extractvalue [4 x <1 x double>] %b.coerce, 3
+ %0 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v1f64(i8* %0, <1 x double> %b.coerce.fca.0.extract, <1 x double> %b.coerce.fca.1.extract, <1 x double> %b.coerce.fca.2.extract, <1 x double> %b.coerce.fca.3.extract, i32 0, i32 8)
+ ret void
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
+declare { <8 x i16>, <8 x i16> } @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32)
+declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
+declare { <2 x i64>, <2 x i64> } @llvm.arm.neon.vld2lane.v2i64(i8*, <2 x i64>, <2 x i64>, i32, i32)
+declare { <4 x float>, <4 x float> } @llvm.arm.neon.vld2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32)
+declare { <2 x double>, <2 x double> } @llvm.arm.neon.vld2lane.v2f64(i8*, <2 x double>, <2 x double>, i32, i32)
+declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
+declare { <4 x i16>, <4 x i16> } @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32)
+declare { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32)
+declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32)
+declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld3lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
+declare { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32)
+declare { <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld3lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32)
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3.v1f64(i8*, i32)
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.arm.neon.vld4lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32)
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4.v1i64(i8*, i32)
+declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4.v1f64(i8*, i32)
+declare { <1 x i64>, <1 x i64> } @llvm.arm.neon.vld2lane.v1i64(i8*, <1 x i64>, <1 x i64>, i32, i32)
+declare { <1 x double>, <1 x double> } @llvm.arm.neon.vld2lane.v1f64(i8*, <1 x double>, <1 x double>, i32, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld3lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
+declare { <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld3lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32, i32)
+declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.arm.neon.vld4lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
+declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.arm.neon.vld4lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v2i64(i8*, <2 x i64>, <2 x i64>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v2f64(i8*, <2 x double>, <2 x double>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v1i64(i8*, <1 x i64>, <1 x i64>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v1f64(i8*, <1 x double>, <1 x double>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v2i64(i8*, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32, i32) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-simd-ldst.ll b/test/CodeGen/AArch64/neon-simd-ldst.ll
new file mode 100644
index 000000000000..afc0901bbc0b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-ldst.ll
@@ -0,0 +1,164 @@
+; RUN: llc < %s -O2 -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define void @test_ldstq_4v(i8* noalias %io, i32 %count) {
+; CHECK-LABEL: test_ldstq_4v
+; CHECK: ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
+; CHECK: st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0]
+entry:
+ %tobool62 = icmp eq i32 %count, 0
+ br i1 %tobool62, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %count.addr.063 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
+ %dec = add i32 %count.addr.063, -1
+ %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %io, i32 1)
+ %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld4, 3
+ tail call void @llvm.arm.neon.vst4.v16i8(i8* %io, <16 x i8> %vld4.fca.0.extract, <16 x i8> %vld4.fca.1.extract, <16 x i8> %vld4.fca.2.extract, <16 x i8> %vld4.fca.3.extract, i32 1)
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+
+define void @test_ldstq_3v(i8* noalias %io, i32 %count) {
+; CHECK-LABEL: test_ldstq_3v
+; CHECK: ld3 {v0.16b, v1.16b, v2.16b}, [x0]
+; CHECK: st3 {v0.16b, v1.16b, v2.16b}, [x0]
+entry:
+ %tobool47 = icmp eq i32 %count, 0
+ br i1 %tobool47, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %count.addr.048 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
+ %dec = add i32 %count.addr.048, -1
+ %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %io, i32 1)
+ %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3, 2
+ tail call void @llvm.arm.neon.vst3.v16i8(i8* %io, <16 x i8> %vld3.fca.0.extract, <16 x i8> %vld3.fca.1.extract, <16 x i8> %vld3.fca.2.extract, i32 1)
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32)
+
+define void @test_ldstq_2v(i8* noalias %io, i32 %count) {
+; CHECK-LABEL: test_ldstq_2v
+; CHECK: ld2 {v0.16b, v1.16b}, [x0]
+; CHECK: st2 {v0.16b, v1.16b}, [x0]
+entry:
+ %tobool22 = icmp eq i32 %count, 0
+ br i1 %tobool22, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %count.addr.023 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
+ %dec = add i32 %count.addr.023, -1
+ %vld2 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %io, i32 1)
+ %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 1
+ tail call void @llvm.arm.neon.vst2.v16i8(i8* %io, <16 x i8> %vld2.fca.0.extract, <16 x i8> %vld2.fca.1.extract, i32 1)
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
+
+define void @test_ldst_4v(i8* noalias %io, i32 %count) {
+; CHECK-LABEL: test_ldst_4v
+; CHECK: ld4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
+; CHECK: st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [x0]
+entry:
+ %tobool42 = icmp eq i32 %count, 0
+ br i1 %tobool42, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %count.addr.043 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
+ %dec = add i32 %count.addr.043, -1
+ %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %io, i32 1)
+ %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 0
+ %vld4.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 1
+ %vld4.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 2
+ %vld4.fca.3.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %vld4, 3
+ tail call void @llvm.arm.neon.vst4.v8i8(i8* %io, <8 x i8> %vld4.fca.0.extract, <8 x i8> %vld4.fca.1.extract, <8 x i8> %vld4.fca.2.extract, <8 x i8> %vld4.fca.3.extract, i32 1)
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+
+define void @test_ldst_3v(i8* noalias %io, i32 %count) {
+; CHECK-LABEL: test_ldst_3v
+; CHECK: ld3 {v0.8b, v1.8b, v2.8b}, [x0]
+; CHECK: st3 {v0.8b, v1.8b, v2.8b}, [x0]
+entry:
+ %tobool32 = icmp eq i32 %count, 0
+ br i1 %tobool32, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %count.addr.033 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
+ %dec = add i32 %count.addr.033, -1
+ %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8* %io, i32 1)
+ %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 0
+ %vld3.fca.1.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 1
+ %vld3.fca.2.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %vld3, 2
+ tail call void @llvm.arm.neon.vst3.v8i8(i8* %io, <8 x i8> %vld3.fca.0.extract, <8 x i8> %vld3.fca.1.extract, <8 x i8> %vld3.fca.2.extract, i32 1)
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3.v8i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+
+define void @test_ldst_2v(i8* noalias %io, i32 %count) {
+; CHECK-LABEL: test_ldst_2v
+; CHECK: ld2 {v0.8b, v1.8b}, [x0]
+; CHECK: st2 {v0.8b, v1.8b}, [x0]
+entry:
+ %tobool22 = icmp eq i32 %count, 0
+ br i1 %tobool22, label %while.end, label %while.body
+
+while.body: ; preds = %entry, %while.body
+ %count.addr.023 = phi i32 [ %dec, %while.body ], [ %count, %entry ]
+ %dec = add i32 %count.addr.023, -1
+ %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8* %io, i32 1)
+ %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0
+ %vld2.fca.1.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 1
+ tail call void @llvm.arm.neon.vst2.v8i8(i8* %io, <8 x i8> %vld2.fca.0.extract, <8 x i8> %vld2.fca.1.extract, i32 1)
+ %tobool = icmp eq i32 %dec, 0
+ br i1 %tobool, label %while.end, label %while.body
+
+while.end: ; preds = %while.body, %entry
+ ret void
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
+
diff --git a/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll b/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
new file mode 100644
index 000000000000..156fe1db0ff5
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
@@ -0,0 +1,354 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+;Check for a post-increment updating load.
+define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
+; CHECK: test_vld1_fx_update
+; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 2)
+ %tmp2 = getelementptr i16* %A, i32 4
+ store i16* %tmp2, i16** %ptr
+ ret <4 x i16> %tmp1
+}
+
+;Check for a post-increment updating load with register increment.
+define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
+; CHECK: test_vld1_reg_update
+; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 4)
+ %tmp2 = getelementptr i32* %A, i32 %inc
+ store i32* %tmp2, i32** %ptr
+ ret <2 x i32> %tmp1
+}
+
+define <2 x float> @test_vld2_fx_update(float** %ptr) nounwind {
+; CHECK: test_vld2_fx_update
+; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #16
+ %A = load float** %ptr
+ %tmp0 = bitcast float* %A to i8*
+ %tmp1 = call { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 4)
+ %tmp2 = extractvalue { <2 x float>, <2 x float> } %tmp1, 0
+ %tmp3 = getelementptr float* %A, i32 4
+ store float* %tmp3, float** %ptr
+ ret <2 x float> %tmp2
+}
+
+define <16 x i8> @test_vld2_reg_update(i8** %ptr, i32 %inc) nounwind {
+; CHECK: test_vld2_reg_update
+; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i8** %ptr
+ %tmp0 = call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8* %A, i32 1)
+ %tmp1 = extractvalue { <16 x i8>, <16 x i8> } %tmp0, 0
+ %tmp2 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp2, i8** %ptr
+ ret <16 x i8> %tmp1
+}
+
+define <4 x i32> @test_vld3_fx_update(i32** %ptr) nounwind {
+; CHECK: test_vld3_fx_update
+; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #48
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ %tmp1 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 4)
+ %tmp2 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %tmp1, 0
+ %tmp3 = getelementptr i32* %A, i32 12
+ store i32* %tmp3, i32** %ptr
+ ret <4 x i32> %tmp2
+}
+
+define <4 x i16> @test_vld3_reg_update(i16** %ptr, i32 %inc) nounwind {
+; CHECK: test_vld3_reg_update
+; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 2)
+ %tmp2 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %tmp1, 0
+ %tmp3 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp3, i16** %ptr
+ ret <4 x i16> %tmp2
+}
+
+define <8 x i16> @test_vld4_fx_update(i16** %ptr) nounwind {
+; CHECK: test_vld4_fx_update
+; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], #64
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ %tmp1 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 8)
+ %tmp2 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %tmp1, 0
+ %tmp3 = getelementptr i16* %A, i32 32
+ store i16* %tmp3, i16** %ptr
+ ret <8 x i16> %tmp2
+}
+
+define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
+; CHECK: test_vld4_reg_update
+; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i8** %ptr
+ %tmp0 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8* %A, i32 1)
+ %tmp1 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %tmp0, 0
+ %tmp2 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp2, i8** %ptr
+ ret <8 x i8> %tmp1
+}
+
+define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind {
+; CHECK: test_vst1_fx_update
+; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
+ %A = load float** %ptr
+ %tmp0 = bitcast float* %A to i8*
+ call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %B, i32 4)
+ %tmp2 = getelementptr float* %A, i32 2
+ store float* %tmp2, float** %ptr
+ ret void
+}
+
+define void @test_vst1_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
+; CHECK: test_vst1_reg_update
+; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %B, i32 2)
+ %tmp1 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret void
+}
+
+define void @test_vst2_fx_update(i64** %ptr, <1 x i64> %B) nounwind {
+; CHECK: test_vst2_fx_update
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}], #16
+ %A = load i64** %ptr
+ %tmp0 = bitcast i64* %A to i8*
+ call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %B, <1 x i64> %B, i32 8)
+ %tmp1 = getelementptr i64* %A, i32 2
+ store i64* %tmp1, i64** %ptr
+ ret void
+}
+
+define void @test_vst2_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
+; CHECK: test_vst2_reg_update
+; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i8** %ptr
+ call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, i32 4)
+ %tmp0 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp0, i8** %ptr
+ ret void
+}
+
+define void @test_vst3_fx_update(i32** %ptr, <2 x i32> %B) nounwind {
+; CHECK: test_vst3_fx_update
+; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #24
+ %A = load i32** %ptr
+ %tmp0 = bitcast i32* %A to i8*
+ call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %B, <2 x i32> %B, <2 x i32> %B, i32 4)
+ %tmp1 = getelementptr i32* %A, i32 6
+ store i32* %tmp1, i32** %ptr
+ ret void
+}
+
+define void @test_vst3_reg_update(i16** %ptr, <8 x i16> %B, i32 %inc) nounwind {
+; CHECK: test_vst3_reg_update
+; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i16** %ptr
+ %tmp0 = bitcast i16* %A to i8*
+ call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %B, <8 x i16> %B, <8 x i16> %B, i32 2)
+ %tmp1 = getelementptr i16* %A, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret void
+}
+
+define void @test_vst4_fx_update(float** %ptr, <4 x float> %B) nounwind {
+; CHECK: test_vst4_fx_update
+; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}], #64
+ %A = load float** %ptr
+ %tmp0 = bitcast float* %A to i8*
+ call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %B, <4 x float> %B, <4 x float> %B, <4 x float> %B, i32 4)
+ %tmp1 = getelementptr float* %A, i32 16
+ store float* %tmp1, float** %ptr
+ ret void
+}
+
+define void @test_vst4_reg_update(i8** %ptr, <8 x i8> %B, i32 %inc) nounwind {
+; CHECK: test_vst4_reg_update
+; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}], x{{[0-9]+}}
+ %A = load i8** %ptr
+ call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, <8 x i8> %B, i32 1)
+ %tmp0 = getelementptr i8* %A, i32 %inc
+ store i8* %tmp0, i8** %ptr
+ ret void
+}
+
+
+declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32)
+declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32)
+declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2.v16i8(i8*, i32)
+declare { <2 x float>, <2 x float> } @llvm.arm.neon.vld2.v2f32(i8*, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
+declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.arm.neon.vld3.v4i32(i8*, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.arm.neon.vld4.v8i16(i8*, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld4.v8i8(i8*, i32)
+
+declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32)
+declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32)
+declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32)
+declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32)
+declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+
+define <16 x i8> @test_vld1x2_fx_update(i8* %a, i8** %ptr) {
+; CHECK: test_vld1x2_fx_update
+; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
+ %1 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
+ %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
+ %tmp1 = getelementptr i8* %a, i32 32
+ store i8* %tmp1, i8** %ptr
+ ret <16 x i8> %2
+}
+
+define <8 x i16> @test_vld1x2_reg_update(i16* %a, i16** %ptr, i32 %inc) {
+; CHECK: test_vld1x2_reg_update
+; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
+ %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
+ %tmp1 = getelementptr i16* %a, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret <8 x i16> %3
+}
+
+define <2 x i64> @test_vld1x3_fx_update(i64* %a, i64** %ptr) {
+; CHECK: test_vld1x3_fx_update
+; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], #48
+ %1 = bitcast i64* %a to i8*
+ %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
+ %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
+ %tmp1 = getelementptr i64* %a, i32 6
+ store i64* %tmp1, i64** %ptr
+ ret <2 x i64> %3
+}
+
+define <8 x i16> @test_vld1x3_reg_update(i16* %a, i16** %ptr, i32 %inc) {
+; CHECK: test_vld1x3_reg_update
+; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
+ %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
+ %tmp1 = getelementptr i16* %a, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret <8 x i16> %3
+}
+
+define <4 x float> @test_vld1x4_fx_update(float* %a, float** %ptr) {
+; CHECK: test_vld1x4_fx_update
+; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
+ %1 = bitcast float* %a to i8*
+ %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
+ %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %2, 0
+ %tmp1 = getelementptr float* %a, i32 16
+ store float* %tmp1, float** %ptr
+ ret <4 x float> %3
+}
+
+define <8 x i8> @test_vld1x4_reg_update(i8* readonly %a, i8** %ptr, i32 %inc) #0 {
+; CHECK: test_vld1x4_reg_update
+; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
+ %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
+ %tmp1 = getelementptr i8* %a, i32 %inc
+ store i8* %tmp1, i8** %ptr
+ ret <8 x i8> %2
+}
+
+define void @test_vst1x2_fx_update(i8* %a, [2 x <16 x i8>] %b.coerce, i8** %ptr) #2 {
+; CHECK: test_vst1x2_fx_update
+; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
+ %1 = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %2 = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1, <16 x i8> %2, i32 1)
+ %tmp1 = getelementptr i8* %a, i32 32
+ store i8* %tmp1, i8** %ptr
+ ret void
+}
+
+define void @test_vst1x2_reg_update(i16* %a, [2 x <8 x i16>] %b.coerce, i16** %ptr, i32 %inc) #2 {
+; CHECK: test_vst1x2_reg_update
+; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [2 x <8 x i16>] %b.coerce, 0
+ %2 = extractvalue [2 x <8 x i16>] %b.coerce, 1
+ %3 = bitcast i16* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1, <8 x i16> %2, i32 2)
+ %tmp1 = getelementptr i16* %a, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret void
+}
+
+define void @test_vst1x3_fx_update(i32* %a, [3 x <2 x i32>] %b.coerce, i32** %ptr) #2 {
+; CHECK: test_vst1x3_fx_update
+; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #24
+ %1 = extractvalue [3 x <2 x i32>] %b.coerce, 0
+ %2 = extractvalue [3 x <2 x i32>] %b.coerce, 1
+ %3 = extractvalue [3 x <2 x i32>] %b.coerce, 2
+ %4 = bitcast i32* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, i32 4)
+ %tmp1 = getelementptr i32* %a, i32 6
+ store i32* %tmp1, i32** %ptr
+ ret void
+}
+
+define void @test_vst1x3_reg_update(i64* %a, [3 x <1 x i64>] %b.coerce, i64** %ptr, i32 %inc) #2 {
+; CHECK: test_vst1x3_reg_update
+; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [3 x <1 x i64>] %b.coerce, 0
+ %2 = extractvalue [3 x <1 x i64>] %b.coerce, 1
+ %3 = extractvalue [3 x <1 x i64>] %b.coerce, 2
+ %4 = bitcast i64* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1, <1 x i64> %2, <1 x i64> %3, i32 8)
+ %tmp1 = getelementptr i64* %a, i32 %inc
+ store i64* %tmp1, i64** %ptr
+ ret void
+}
+
+define void @test_vst1x4_fx_update(float* %a, [4 x <4 x float>] %b.coerce, float** %ptr) #2 {
+; CHECK: test_vst1x4_fx_update
+; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
+ %1 = extractvalue [4 x <4 x float>] %b.coerce, 0
+ %2 = extractvalue [4 x <4 x float>] %b.coerce, 1
+ %3 = extractvalue [4 x <4 x float>] %b.coerce, 2
+ %4 = extractvalue [4 x <4 x float>] %b.coerce, 3
+ %5 = bitcast float* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
+ %tmp1 = getelementptr float* %a, i32 16
+ store float* %tmp1, float** %ptr
+ ret void
+}
+
+define void @test_vst1x4_reg_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr, i32 %inc) #2 {
+; CHECK: test_vst1x4_reg_update
+; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
+ %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
+ %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
+ %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
+ %5 = bitcast double* %a to i8*
+ tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
+ %tmp1 = getelementptr double* %a, i32 %inc
+ store double* %tmp1, double** %ptr
+ ret void
+}
+
+declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*, i32)
+declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*, i32)
+declare { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
+declare { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
+declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
+declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>, i32)
+declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32)
+declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32)
+declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) #3
+declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32) #3
diff --git a/test/CodeGen/AArch64/neon-simd-post-ldst-one.ll b/test/CodeGen/AArch64/neon-simd-post-ldst-one.ll
new file mode 100644
index 000000000000..80a934700c6b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-post-ldst-one.ll
@@ -0,0 +1,319 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define { [2 x <16 x i8>] } @test_vld2q_dup_fx_update(i8* %a, i8** %ptr) {
+; CHECK-LABEL: test_vld2q_dup_fx_update
+; CHECK: ld2r {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #2
+ %1 = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %a, <16 x i8> undef, <16 x i8> undef, i32 0, i32 1)
+ %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
+ %3 = shufflevector <16 x i8> %2, <16 x i8> undef, <16 x i32> zeroinitializer
+ %4 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
+ %5 = shufflevector <16 x i8> %4, <16 x i8> undef, <16 x i32> zeroinitializer
+ %6 = insertvalue { [2 x <16 x i8>] } undef, <16 x i8> %3, 0, 0
+ %7 = insertvalue { [2 x <16 x i8>] } %6, <16 x i8> %5, 0, 1
+ %tmp1 = getelementptr i8* %a, i32 2
+ store i8* %tmp1, i8** %ptr
+ ret { [2 x <16 x i8>] } %7
+}
+
+define { [2 x <4 x i32>] } @test_vld2q_dup_reg_update(i32* %a, i32** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vld2q_dup_reg_update
+; CHECK: ld2r {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8* %1, <4 x i32> undef, <4 x i32> undef, i32 0, i32 4)
+ %3 = extractvalue { <4 x i32>, <4 x i32> } %2, 0
+ %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer
+ %5 = extractvalue { <4 x i32>, <4 x i32> } %2, 1
+ %6 = shufflevector <4 x i32> %5, <4 x i32> undef, <4 x i32> zeroinitializer
+ %7 = insertvalue { [2 x <4 x i32>] } undef, <4 x i32> %4, 0, 0
+ %8 = insertvalue { [2 x <4 x i32>] } %7, <4 x i32> %6, 0, 1
+ %tmp1 = getelementptr i32* %a, i32 %inc
+ store i32* %tmp1, i32** %ptr
+ ret { [2 x <4 x i32>] } %8
+}
+
+define { [3 x <4 x i16>] } @test_vld3_dup_fx_update(i16* %a, i16** %ptr) {
+; CHECK-LABEL: test_vld3_dup_fx_update
+; CHECK: ld3r {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #6
+ %1 = bitcast i16* %a to i8*
+ %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %1, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
+ %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
+ %4 = shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer
+ %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
+ %6 = shufflevector <4 x i16> %5, <4 x i16> undef, <4 x i32> zeroinitializer
+ %7 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
+ %8 = shufflevector <4 x i16> %7, <4 x i16> undef, <4 x i32> zeroinitializer
+ %9 = insertvalue { [3 x <4 x i16>] } undef, <4 x i16> %4, 0, 0
+ %10 = insertvalue { [3 x <4 x i16>] } %9, <4 x i16> %6, 0, 1
+ %11 = insertvalue { [3 x <4 x i16>] } %10, <4 x i16> %8, 0, 2
+ %tmp1 = getelementptr i16* %a, i32 3
+ store i16* %tmp1, i16** %ptr
+ ret { [3 x <4 x i16>] } %11
+}
+
+define { [3 x <8 x i8>] } @test_vld3_dup_reg_update(i8* %a, i8** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vld3_dup_reg_update
+; CHECK: ld3r {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8* %a, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
+ %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
+ %3 = shufflevector <8 x i8> %2, <8 x i8> undef, <8 x i32> zeroinitializer
+ %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
+ %5 = shufflevector <8 x i8> %4, <8 x i8> undef, <8 x i32> zeroinitializer
+ %6 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
+ %7 = shufflevector <8 x i8> %6, <8 x i8> undef, <8 x i32> zeroinitializer
+ %8 = insertvalue { [3 x <8 x i8>] } undef, <8 x i8> %3, 0, 0
+ %9 = insertvalue { [3 x <8 x i8>] } %8, <8 x i8> %5, 0, 1
+ %10 = insertvalue { [3 x <8 x i8>] } %9, <8 x i8> %7, 0, 2
+ %tmp1 = getelementptr i8* %a, i32 %inc
+ store i8* %tmp1, i8** %ptr
+ ret { [3 x <8 x i8>] }%10
+}
+
+define { [4 x <2 x i32>] } @test_vld4_dup_fx_update(i32* %a, i32** %ptr) #0 {
+; CHECK-LABEL: test_vld4_dup_fx_update
+; CHECK: ld4r {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], #16
+ %1 = bitcast i32* %a to i8*
+ %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %1, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 4)
+ %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
+ %4 = shufflevector <2 x i32> %3, <2 x i32> undef, <2 x i32> zeroinitializer
+ %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
+ %6 = shufflevector <2 x i32> %5, <2 x i32> undef, <2 x i32> zeroinitializer
+ %7 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
+ %8 = shufflevector <2 x i32> %7, <2 x i32> undef, <2 x i32> zeroinitializer
+ %9 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2, 3
+ %10 = shufflevector <2 x i32> %9, <2 x i32> undef, <2 x i32> zeroinitializer
+ %11 = insertvalue { [4 x <2 x i32>] } undef, <2 x i32> %4, 0, 0
+ %12 = insertvalue { [4 x <2 x i32>] } %11, <2 x i32> %6, 0, 1
+ %13 = insertvalue { [4 x <2 x i32>] } %12, <2 x i32> %8, 0, 2
+ %14 = insertvalue { [4 x <2 x i32>] } %13, <2 x i32> %10, 0, 3
+ %tmp1 = getelementptr i32* %a, i32 4
+ store i32* %tmp1, i32** %ptr
+ ret { [4 x <2 x i32>] } %14
+}
+
+define { [4 x <2 x double>] } @test_vld4_dup_reg_update(double* %a, double** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vld4_dup_reg_update
+; CHECK: ld4r {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = bitcast double* %a to i8*
+ %2 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %1, <2 x double> undef, <2 x double> undef, <2 x double> undef, <2 x double> undef, i32 0, i32 8)
+ %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 0
+ %4 = shufflevector <2 x double> %3, <2 x double> undef, <2 x i32> zeroinitializer
+ %5 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 1
+ %6 = shufflevector <2 x double> %5, <2 x double> undef, <2 x i32> zeroinitializer
+ %7 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 2
+ %8 = shufflevector <2 x double> %7, <2 x double> undef, <2 x i32> zeroinitializer
+ %9 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %2, 3
+ %10 = shufflevector <2 x double> %9, <2 x double> undef, <2 x i32> zeroinitializer
+ %11 = insertvalue { [4 x <2 x double>] } undef, <2 x double> %4, 0, 0
+ %12 = insertvalue { [4 x <2 x double>] } %11, <2 x double> %6, 0, 1
+ %13 = insertvalue { [4 x <2 x double>] } %12, <2 x double> %8, 0, 2
+ %14 = insertvalue { [4 x <2 x double>] } %13, <2 x double> %10, 0, 3
+ %tmp1 = getelementptr double* %a, i32 %inc
+ store double* %tmp1, double** %ptr
+ ret { [4 x <2 x double>] } %14
+}
+
+define { [2 x <8 x i8>] } @test_vld2_lane_fx_update(i8* %a, [2 x <8 x i8>] %b, i8** %ptr) {
+; CHECK-LABEL: test_vld2_lane_fx_update
+; CHECK: ld2 {v{{[0-9]+}}.b, v{{[0-9]+}}.b}[7], [x{{[0-9]+|sp}}], #2
+ %1 = extractvalue [2 x <8 x i8>] %b, 0
+ %2 = extractvalue [2 x <8 x i8>] %b, 1
+ %3 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 7, i32 1)
+ %4 = extractvalue { <8 x i8>, <8 x i8> } %3, 0
+ %5 = extractvalue { <8 x i8>, <8 x i8> } %3, 1
+ %6 = insertvalue { [2 x <8 x i8>] } undef, <8 x i8> %4, 0, 0
+ %7 = insertvalue { [2 x <8 x i8>] } %6, <8 x i8> %5, 0, 1
+ %tmp1 = getelementptr i8* %a, i32 2
+ store i8* %tmp1, i8** %ptr
+ ret { [2 x <8 x i8>] } %7
+}
+
+define { [2 x <8 x i8>] } @test_vld2_lane_reg_update(i8* %a, [2 x <8 x i8>] %b, i8** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vld2_lane_reg_update
+; CHECK: ld2 {v{{[0-9]+}}.b, v{{[0-9]+}}.b}[6], [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [2 x <8 x i8>] %b, 0
+ %2 = extractvalue [2 x <8 x i8>] %b, 1
+ %3 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 6, i32 1)
+ %4 = extractvalue { <8 x i8>, <8 x i8> } %3, 0
+ %5 = extractvalue { <8 x i8>, <8 x i8> } %3, 1
+ %6 = insertvalue { [2 x <8 x i8>] } undef, <8 x i8> %4, 0, 0
+ %7 = insertvalue { [2 x <8 x i8>] } %6, <8 x i8> %5, 0, 1
+ %tmp1 = getelementptr i8* %a, i32 %inc
+ store i8* %tmp1, i8** %ptr
+ ret { [2 x <8 x i8>] } %7
+}
+
+define { [3 x <2 x float>] } @test_vld3_lane_fx_update(float* %a, [3 x <2 x float>] %b, float** %ptr) {
+; CHECK-LABEL: test_vld3_lane_fx_update
+; CHECK: ld3 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], #12
+ %1 = extractvalue [3 x <2 x float>] %b, 0
+ %2 = extractvalue [3 x <2 x float>] %b, 1
+ %3 = extractvalue [3 x <2 x float>] %b, 2
+ %4 = bitcast float* %a to i8*
+ %5 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8* %4, <2 x float> %1, <2 x float> %2, <2 x float> %3, i32 1, i32 4)
+ %6 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %5, 0
+ %7 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %5, 1
+ %8 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %5, 2
+ %9 = insertvalue { [3 x <2 x float>] } undef, <2 x float> %6, 0, 0
+ %10 = insertvalue { [3 x <2 x float>] } %9, <2 x float> %7, 0, 1
+ %11 = insertvalue { [3 x <2 x float>] } %10, <2 x float> %8, 0, 2
+ %tmp1 = getelementptr float* %a, i32 3
+ store float* %tmp1, float** %ptr
+ ret { [3 x <2 x float>] } %11
+}
+
+define { [3 x <4 x i16>] } @test_vld3_lane_reg_update(i16* %a, [3 x <4 x i16>] %b, i16** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vld3_lane_reg_update
+; CHECK: ld3 {v{{[0-9]+}}.h, v{{[0-9]+}}.h, v{{[0-9]+}}.h}[3], [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [3 x <4 x i16>] %b, 0
+ %2 = extractvalue [3 x <4 x i16>] %b, 1
+ %3 = extractvalue [3 x <4 x i16>] %b, 2
+ %4 = bitcast i16* %a to i8*
+ %5 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 3, i32 2)
+ %6 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %5, 0
+ %7 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %5, 1
+ %8 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %5, 2
+ %9 = insertvalue { [3 x <4 x i16>] } undef, <4 x i16> %6, 0, 0
+ %10 = insertvalue { [3 x <4 x i16>] } %9, <4 x i16> %7, 0, 1
+ %11 = insertvalue { [3 x <4 x i16>] } %10, <4 x i16> %8, 0, 2
+ %tmp1 = getelementptr i16* %a, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret { [3 x <4 x i16>] } %11
+}
+
+define { [4 x <2 x i32>] } @test_vld4_lane_fx_update(i32* readonly %a, [4 x <2 x i32>] %b, i32** %ptr) {
+; CHECK-LABEL: test_vld4_lane_fx_update
+; CHECK: ld4 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], #16
+ %1 = extractvalue [4 x <2 x i32>] %b, 0
+ %2 = extractvalue [4 x <2 x i32>] %b, 1
+ %3 = extractvalue [4 x <2 x i32>] %b, 2
+ %4 = extractvalue [4 x <2 x i32>] %b, 3
+ %5 = bitcast i32* %a to i8*
+ %6 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8* %5, <2 x i32> %1, <2 x i32> %2, <2 x i32> %3, <2 x i32> %4, i32 1, i32 4)
+ %7 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 0
+ %8 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 1
+ %9 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 2
+ %10 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %6, 3
+ %11 = insertvalue { [4 x <2 x i32>] } undef, <2 x i32> %7, 0, 0
+ %12 = insertvalue { [4 x <2 x i32>] } %11, <2 x i32> %8, 0, 1
+ %13 = insertvalue { [4 x <2 x i32>] } %12, <2 x i32> %9, 0, 2
+ %14 = insertvalue { [4 x <2 x i32>] } %13, <2 x i32> %10, 0, 3
+ %tmp1 = getelementptr i32* %a, i32 4
+ store i32* %tmp1, i32** %ptr
+ ret { [4 x <2 x i32>] } %14
+}
+
+define { [4 x <2 x double>] } @test_vld4_lane_reg_update(double* readonly %a, [4 x <2 x double>] %b, double** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vld4_lane_reg_update
+; CHECK: ld4 {v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d}[1], [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [4 x <2 x double>] %b, 0
+ %2 = extractvalue [4 x <2 x double>] %b, 1
+ %3 = extractvalue [4 x <2 x double>] %b, 2
+ %4 = extractvalue [4 x <2 x double>] %b, 3
+ %5 = bitcast double* %a to i8*
+ %6 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 1, i32 8)
+ %7 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 0
+ %8 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 1
+ %9 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 2
+ %10 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %6, 3
+ %11 = insertvalue { [4 x <2 x double>] } undef, <2 x double> %7, 0, 0
+ %12 = insertvalue { [4 x <2 x double>] } %11, <2 x double> %8, 0, 1
+ %13 = insertvalue { [4 x <2 x double>] } %12, <2 x double> %9, 0, 2
+ %14 = insertvalue { [4 x <2 x double>] } %13, <2 x double> %10, 0, 3
+ %tmp1 = getelementptr double* %a, i32 %inc
+ store double* %tmp1, double** %ptr
+ ret { [4 x <2 x double>] } %14
+}
+
+define void @test_vst2_lane_fx_update(i8* %a, [2 x <8 x i8>] %b, i8** %ptr) {
+; CHECK-LABEL: test_vst2_lane_fx_update
+; CHECK: st2 {v{{[0-9]+}}.b, v{{[0-9]+}}.b}[7], [x{{[0-9]+|sp}}], #2
+ %1 = extractvalue [2 x <8 x i8>] %b, 0
+ %2 = extractvalue [2 x <8 x i8>] %b, 1
+ call void @llvm.arm.neon.vst2lane.v8i8(i8* %a, <8 x i8> %1, <8 x i8> %2, i32 7, i32 1)
+ %tmp1 = getelementptr i8* %a, i32 2
+ store i8* %tmp1, i8** %ptr
+ ret void
+}
+
+define void @test_vst2_lane_reg_update(i32* %a, [2 x <2 x i32>] %b.coerce, i32** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vst2_lane_reg_update
+; CHECK: st2 {v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [2 x <2 x i32>] %b.coerce, 0
+ %2 = extractvalue [2 x <2 x i32>] %b.coerce, 1
+ %3 = bitcast i32* %a to i8*
+ tail call void @llvm.arm.neon.vst2lane.v2i32(i8* %3, <2 x i32> %1, <2 x i32> %2, i32 1, i32 4)
+ %tmp1 = getelementptr i32* %a, i32 %inc
+ store i32* %tmp1, i32** %ptr
+ ret void
+}
+
+define void @test_vst3_lane_fx_update(float* %a, [3 x <4 x float>] %b, float** %ptr) {
+; CHECK-LABEL: test_vst3_lane_fx_update
+; CHECK: st3 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[3], [x{{[0-9]+|sp}}], #12
+ %1 = extractvalue [3 x <4 x float>] %b, 0
+ %2 = extractvalue [3 x <4 x float>] %b, 1
+ %3 = extractvalue [3 x <4 x float>] %b, 2
+ %4 = bitcast float* %a to i8*
+ call void @llvm.arm.neon.vst3lane.v4f32(i8* %4, <4 x float> %1, <4 x float> %2, <4 x float> %3, i32 3, i32 4)
+ %tmp1 = getelementptr float* %a, i32 3
+ store float* %tmp1, float** %ptr
+ ret void
+}
+
+; Function Attrs: nounwind
+define void @test_vst3_lane_reg_update(i16* %a, [3 x <4 x i16>] %b, i16** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vst3_lane_reg_update
+; CHECK: st3 {v{{[0-9]+}}.h, v{{[0-9]+}}.h, v{{[0-9]+}}.h}[3], [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [3 x <4 x i16>] %b, 0
+ %2 = extractvalue [3 x <4 x i16>] %b, 1
+ %3 = extractvalue [3 x <4 x i16>] %b, 2
+ %4 = bitcast i16* %a to i8*
+ tail call void @llvm.arm.neon.vst3lane.v4i16(i8* %4, <4 x i16> %1, <4 x i16> %2, <4 x i16> %3, i32 3, i32 2)
+ %tmp1 = getelementptr i16* %a, i32 %inc
+ store i16* %tmp1, i16** %ptr
+ ret void
+}
+
+define void @test_vst4_lane_fx_update(double* %a, [4 x <2 x double>] %b.coerce, double** %ptr) {
+; CHECK-LABEL: test_vst4_lane_fx_update
+; CHECK: st4 {v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d, v{{[0-9]+}}.d}[1], [x{{[0-9]+|sp}}], #32
+ %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
+ %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
+ %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
+ %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
+ %5 = bitcast double* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v2f64(i8* %5, <2 x double> %1, <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 1, i32 8)
+ %tmp1 = getelementptr double* %a, i32 4
+ store double* %tmp1, double** %ptr
+ ret void
+}
+
+
+define void @test_vst4_lane_reg_update(float* %a, [4 x <2 x float>] %b.coerce, float** %ptr, i32 %inc) {
+; CHECK-LABEL: test_vst4_lane_reg_update
+; CHECK: st4 {v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s, v{{[0-9]+}}.s}[1], [x{{[0-9]+|sp}}], x{{[0-9]+}}
+ %1 = extractvalue [4 x <2 x float>] %b.coerce, 0
+ %2 = extractvalue [4 x <2 x float>] %b.coerce, 1
+ %3 = extractvalue [4 x <2 x float>] %b.coerce, 2
+ %4 = extractvalue [4 x <2 x float>] %b.coerce, 3
+ %5 = bitcast float* %a to i8*
+ tail call void @llvm.arm.neon.vst4lane.v2f32(i8* %5, <2 x float> %1, <2 x float> %2, <2 x float> %3, <2 x float> %4, i32 1, i32 4)
+ %tmp1 = getelementptr float* %a, i32 %inc
+ store float* %tmp1, float** %ptr
+ ret void
+}
+
+declare { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
+declare { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8*, <16 x i8>, <16 x i8>, i32, i32)
+declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32)
+declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
+declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32)
+declare { <2 x float>, <2 x float>, <2 x float> } @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32)
+declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.arm.neon.vld4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
+declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32)
+declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32)
+declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
+declare void @llvm.arm.neon.vst4lane.v2f64(i8*, <2 x double>, <2 x double>, <2 x double>, <2 x double>, i32, i32)
diff --git a/test/CodeGen/AArch64/neon-simd-shift.ll b/test/CodeGen/AArch64/neon-simd-shift.ll
new file mode 100644
index 000000000000..fd762656e56e
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-shift.ll
@@ -0,0 +1,1556 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vshr_n_s8(<8 x i8> %a) {
+; CHECK: test_vshr_n_s8
+; CHECK: sshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vshr_n = ashr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <8 x i8> %vshr_n
+}
+
+define <4 x i16> @test_vshr_n_s16(<4 x i16> %a) {
+; CHECK: test_vshr_n_s16
+; CHECK: sshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vshr_n = ashr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
+ ret <4 x i16> %vshr_n
+}
+
+define <2 x i32> @test_vshr_n_s32(<2 x i32> %a) {
+; CHECK: test_vshr_n_s32
+; CHECK: sshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vshr_n = ashr <2 x i32> %a, <i32 3, i32 3>
+ ret <2 x i32> %vshr_n
+}
+
+define <16 x i8> @test_vshrq_n_s8(<16 x i8> %a) {
+; CHECK: test_vshrq_n_s8
+; CHECK: sshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vshr_n = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %vshr_n
+}
+
+define <8 x i16> @test_vshrq_n_s16(<8 x i16> %a) {
+; CHECK: test_vshrq_n_s16
+; CHECK: sshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vshr_n = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %vshr_n
+}
+
+define <4 x i32> @test_vshrq_n_s32(<4 x i32> %a) {
+; CHECK: test_vshrq_n_s32
+; CHECK: sshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vshr_n = ashr <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %vshr_n
+}
+
+define <2 x i64> @test_vshrq_n_s64(<2 x i64> %a) {
+; CHECK: test_vshrq_n_s64
+; CHECK: sshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vshr_n = ashr <2 x i64> %a, <i64 3, i64 3>
+ ret <2 x i64> %vshr_n
+}
+
+define <8 x i8> @test_vshr_n_u8(<8 x i8> %a) {
+; CHECK: test_vshr_n_u8
+; CHECK: ushr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vshr_n = lshr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <8 x i8> %vshr_n
+}
+
+define <4 x i16> @test_vshr_n_u16(<4 x i16> %a) {
+; CHECK: test_vshr_n_u16
+; CHECK: ushr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vshr_n = lshr <4 x i16> %a, <i16 3, i16 3, i16 3, i16 3>
+ ret <4 x i16> %vshr_n
+}
+
+define <2 x i32> @test_vshr_n_u32(<2 x i32> %a) {
+; CHECK: test_vshr_n_u32
+; CHECK: ushr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vshr_n = lshr <2 x i32> %a, <i32 3, i32 3>
+ ret <2 x i32> %vshr_n
+}
+
+define <16 x i8> @test_vshrq_n_u8(<16 x i8> %a) {
+; CHECK: test_vshrq_n_u8
+; CHECK: ushr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vshr_n = lshr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %vshr_n
+}
+
+define <8 x i16> @test_vshrq_n_u16(<8 x i16> %a) {
+; CHECK: test_vshrq_n_u16
+; CHECK: ushr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vshr_n = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ ret <8 x i16> %vshr_n
+}
+
+define <4 x i32> @test_vshrq_n_u32(<4 x i32> %a) {
+; CHECK: test_vshrq_n_u32
+; CHECK: ushr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vshr_n = lshr <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %vshr_n
+}
+
+define <2 x i64> @test_vshrq_n_u64(<2 x i64> %a) {
+; CHECK: test_vshrq_n_u64
+; CHECK: ushr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vshr_n = lshr <2 x i64> %a, <i64 3, i64 3>
+ ret <2 x i64> %vshr_n
+}
+
+define <8 x i8> @test_vsra_n_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsra_n_s8
+; CHECK: ssra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsra_n = ashr <8 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <8 x i8> %vsra_n, %a
+ ret <8 x i8> %1
+}
+
+define <4 x i16> @test_vsra_n_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsra_n_s16
+; CHECK: ssra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vsra_n = ashr <4 x i16> %b, <i16 3, i16 3, i16 3, i16 3>
+ %1 = add <4 x i16> %vsra_n, %a
+ ret <4 x i16> %1
+}
+
+define <2 x i32> @test_vsra_n_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsra_n_s32
+; CHECK: ssra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vsra_n = ashr <2 x i32> %b, <i32 3, i32 3>
+ %1 = add <2 x i32> %vsra_n, %a
+ ret <2 x i32> %1
+}
+
+define <16 x i8> @test_vsraq_n_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsraq_n_s8
+; CHECK: ssra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsra_n = ashr <16 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <16 x i8> %vsra_n, %a
+ ret <16 x i8> %1
+}
+
+define <8 x i16> @test_vsraq_n_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsraq_n_s16
+; CHECK: ssra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vsra_n = ashr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %1 = add <8 x i16> %vsra_n, %a
+ ret <8 x i16> %1
+}
+
+define <4 x i32> @test_vsraq_n_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsraq_n_s32
+; CHECK: ssra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vsra_n = ashr <4 x i32> %b, <i32 3, i32 3, i32 3, i32 3>
+ %1 = add <4 x i32> %vsra_n, %a
+ ret <4 x i32> %1
+}
+
+define <2 x i64> @test_vsraq_n_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsraq_n_s64
+; CHECK: ssra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vsra_n = ashr <2 x i64> %b, <i64 3, i64 3>
+ %1 = add <2 x i64> %vsra_n, %a
+ ret <2 x i64> %1
+}
+
+define <8 x i8> @test_vsra_n_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsra_n_u8
+; CHECK: usra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsra_n = lshr <8 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <8 x i8> %vsra_n, %a
+ ret <8 x i8> %1
+}
+
+define <4 x i16> @test_vsra_n_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsra_n_u16
+; CHECK: usra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vsra_n = lshr <4 x i16> %b, <i16 3, i16 3, i16 3, i16 3>
+ %1 = add <4 x i16> %vsra_n, %a
+ ret <4 x i16> %1
+}
+
+define <2 x i32> @test_vsra_n_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsra_n_u32
+; CHECK: usra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vsra_n = lshr <2 x i32> %b, <i32 3, i32 3>
+ %1 = add <2 x i32> %vsra_n, %a
+ ret <2 x i32> %1
+}
+
+define <16 x i8> @test_vsraq_n_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsraq_n_u8
+; CHECK: usra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsra_n = lshr <16 x i8> %b, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
+ %1 = add <16 x i8> %vsra_n, %a
+ ret <16 x i8> %1
+}
+
+define <8 x i16> @test_vsraq_n_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsraq_n_u16
+; CHECK: usra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vsra_n = lshr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %1 = add <8 x i16> %vsra_n, %a
+ ret <8 x i16> %1
+}
+
+define <4 x i32> @test_vsraq_n_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsraq_n_u32
+; CHECK: usra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vsra_n = lshr <4 x i32> %b, <i32 3, i32 3, i32 3, i32 3>
+ %1 = add <4 x i32> %vsra_n, %a
+ ret <4 x i32> %1
+}
+
+define <2 x i64> @test_vsraq_n_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsraq_n_u64
+; CHECK: usra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vsra_n = lshr <2 x i64> %b, <i64 3, i64 3>
+ %1 = add <2 x i64> %vsra_n, %a
+ ret <2 x i64> %1
+}
+
+define <8 x i8> @test_vrshr_n_s8(<8 x i8> %a) {
+; CHECK: test_vrshr_n_s8
+; CHECK: srshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vrshr_n = tail call <8 x i8> @llvm.aarch64.neon.vsrshr.v8i8(<8 x i8> %a, i32 3)
+ ret <8 x i8> %vrshr_n
+}
+
+
+define <4 x i16> @test_vrshr_n_s16(<4 x i16> %a) {
+; CHECK: test_vrshr_n_s16
+; CHECK: srshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vrshr_n = tail call <4 x i16> @llvm.aarch64.neon.vsrshr.v4i16(<4 x i16> %a, i32 3)
+ ret <4 x i16> %vrshr_n
+}
+
+
+define <2 x i32> @test_vrshr_n_s32(<2 x i32> %a) {
+; CHECK: test_vrshr_n_s32
+; CHECK: srshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vrshr_n = tail call <2 x i32> @llvm.aarch64.neon.vsrshr.v2i32(<2 x i32> %a, i32 3)
+ ret <2 x i32> %vrshr_n
+}
+
+
+define <16 x i8> @test_vrshrq_n_s8(<16 x i8> %a) {
+; CHECK: test_vrshrq_n_s8
+; CHECK: srshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vrshr_n = tail call <16 x i8> @llvm.aarch64.neon.vsrshr.v16i8(<16 x i8> %a, i32 3)
+ ret <16 x i8> %vrshr_n
+}
+
+
+define <8 x i16> @test_vrshrq_n_s16(<8 x i16> %a) {
+; CHECK: test_vrshrq_n_s16
+; CHECK: srshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vrshr_n = tail call <8 x i16> @llvm.aarch64.neon.vsrshr.v8i16(<8 x i16> %a, i32 3)
+ ret <8 x i16> %vrshr_n
+}
+
+
+define <4 x i32> @test_vrshrq_n_s32(<4 x i32> %a) {
+; CHECK: test_vrshrq_n_s32
+; CHECK: srshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vrshr_n = tail call <4 x i32> @llvm.aarch64.neon.vsrshr.v4i32(<4 x i32> %a, i32 3)
+ ret <4 x i32> %vrshr_n
+}
+
+
+define <2 x i64> @test_vrshrq_n_s64(<2 x i64> %a) {
+; CHECK: test_vrshrq_n_s64
+; CHECK: srshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vrshr_n = tail call <2 x i64> @llvm.aarch64.neon.vsrshr.v2i64(<2 x i64> %a, i32 3)
+ ret <2 x i64> %vrshr_n
+}
+
+
+define <8 x i8> @test_vrshr_n_u8(<8 x i8> %a) {
+; CHECK: test_vrshr_n_u8
+; CHECK: urshr {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vrshr_n = tail call <8 x i8> @llvm.aarch64.neon.vurshr.v8i8(<8 x i8> %a, i32 3)
+ ret <8 x i8> %vrshr_n
+}
+
+
+define <4 x i16> @test_vrshr_n_u16(<4 x i16> %a) {
+; CHECK: test_vrshr_n_u16
+; CHECK: urshr {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vrshr_n = tail call <4 x i16> @llvm.aarch64.neon.vurshr.v4i16(<4 x i16> %a, i32 3)
+ ret <4 x i16> %vrshr_n
+}
+
+
+define <2 x i32> @test_vrshr_n_u32(<2 x i32> %a) {
+; CHECK: test_vrshr_n_u32
+; CHECK: urshr {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vrshr_n = tail call <2 x i32> @llvm.aarch64.neon.vurshr.v2i32(<2 x i32> %a, i32 3)
+ ret <2 x i32> %vrshr_n
+}
+
+
+define <16 x i8> @test_vrshrq_n_u8(<16 x i8> %a) {
+; CHECK: test_vrshrq_n_u8
+; CHECK: urshr {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vrshr_n = tail call <16 x i8> @llvm.aarch64.neon.vurshr.v16i8(<16 x i8> %a, i32 3)
+ ret <16 x i8> %vrshr_n
+}
+
+
+define <8 x i16> @test_vrshrq_n_u16(<8 x i16> %a) {
+; CHECK: test_vrshrq_n_u16
+; CHECK: urshr {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vrshr_n = tail call <8 x i16> @llvm.aarch64.neon.vurshr.v8i16(<8 x i16> %a, i32 3)
+ ret <8 x i16> %vrshr_n
+}
+
+
+define <4 x i32> @test_vrshrq_n_u32(<4 x i32> %a) {
+; CHECK: test_vrshrq_n_u32
+; CHECK: urshr {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vrshr_n = tail call <4 x i32> @llvm.aarch64.neon.vurshr.v4i32(<4 x i32> %a, i32 3)
+ ret <4 x i32> %vrshr_n
+}
+
+
+define <2 x i64> @test_vrshrq_n_u64(<2 x i64> %a) {
+; CHECK: test_vrshrq_n_u64
+; CHECK: urshr {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vrshr_n = tail call <2 x i64> @llvm.aarch64.neon.vurshr.v2i64(<2 x i64> %a, i32 3)
+ ret <2 x i64> %vrshr_n
+}
+
+
+define <8 x i8> @test_vrsra_n_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vrsra_n_s8
+; CHECK: srsra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %1 = tail call <8 x i8> @llvm.aarch64.neon.vsrshr.v8i8(<8 x i8> %b, i32 3)
+ %vrsra_n = add <8 x i8> %1, %a
+ ret <8 x i8> %vrsra_n
+}
+
+define <4 x i16> @test_vrsra_n_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vrsra_n_s16
+; CHECK: srsra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %1 = tail call <4 x i16> @llvm.aarch64.neon.vsrshr.v4i16(<4 x i16> %b, i32 3)
+ %vrsra_n = add <4 x i16> %1, %a
+ ret <4 x i16> %vrsra_n
+}
+
+define <2 x i32> @test_vrsra_n_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vrsra_n_s32
+; CHECK: srsra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %1 = tail call <2 x i32> @llvm.aarch64.neon.vsrshr.v2i32(<2 x i32> %b, i32 3)
+ %vrsra_n = add <2 x i32> %1, %a
+ ret <2 x i32> %vrsra_n
+}
+
+define <16 x i8> @test_vrsraq_n_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vrsraq_n_s8
+; CHECK: srsra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %1 = tail call <16 x i8> @llvm.aarch64.neon.vsrshr.v16i8(<16 x i8> %b, i32 3)
+ %vrsra_n = add <16 x i8> %1, %a
+ ret <16 x i8> %vrsra_n
+}
+
+define <8 x i16> @test_vrsraq_n_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vrsraq_n_s16
+; CHECK: srsra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %1 = tail call <8 x i16> @llvm.aarch64.neon.vsrshr.v8i16(<8 x i16> %b, i32 3)
+ %vrsra_n = add <8 x i16> %1, %a
+ ret <8 x i16> %vrsra_n
+}
+
+define <4 x i32> @test_vrsraq_n_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vrsraq_n_s32
+; CHECK: srsra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %1 = tail call <4 x i32> @llvm.aarch64.neon.vsrshr.v4i32(<4 x i32> %b, i32 3)
+ %vrsra_n = add <4 x i32> %1, %a
+ ret <4 x i32> %vrsra_n
+}
+
+define <2 x i64> @test_vrsraq_n_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vrsraq_n_s64
+; CHECK: srsra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %1 = tail call <2 x i64> @llvm.aarch64.neon.vsrshr.v2i64(<2 x i64> %b, i32 3)
+ %vrsra_n = add <2 x i64> %1, %a
+ ret <2 x i64> %vrsra_n
+}
+
+define <8 x i8> @test_vrsra_n_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vrsra_n_u8
+; CHECK: ursra {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %1 = tail call <8 x i8> @llvm.aarch64.neon.vurshr.v8i8(<8 x i8> %b, i32 3)
+ %vrsra_n = add <8 x i8> %1, %a
+ ret <8 x i8> %vrsra_n
+}
+
+define <4 x i16> @test_vrsra_n_u16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vrsra_n_u16
+; CHECK: ursra {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %1 = tail call <4 x i16> @llvm.aarch64.neon.vurshr.v4i16(<4 x i16> %b, i32 3)
+ %vrsra_n = add <4 x i16> %1, %a
+ ret <4 x i16> %vrsra_n
+}
+
+define <2 x i32> @test_vrsra_n_u32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vrsra_n_u32
+; CHECK: ursra {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %1 = tail call <2 x i32> @llvm.aarch64.neon.vurshr.v2i32(<2 x i32> %b, i32 3)
+ %vrsra_n = add <2 x i32> %1, %a
+ ret <2 x i32> %vrsra_n
+}
+
+define <16 x i8> @test_vrsraq_n_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vrsraq_n_u8
+; CHECK: ursra {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %1 = tail call <16 x i8> @llvm.aarch64.neon.vurshr.v16i8(<16 x i8> %b, i32 3)
+ %vrsra_n = add <16 x i8> %1, %a
+ ret <16 x i8> %vrsra_n
+}
+
+define <8 x i16> @test_vrsraq_n_u16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vrsraq_n_u16
+; CHECK: ursra {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %1 = tail call <8 x i16> @llvm.aarch64.neon.vurshr.v8i16(<8 x i16> %b, i32 3)
+ %vrsra_n = add <8 x i16> %1, %a
+ ret <8 x i16> %vrsra_n
+}
+
+define <4 x i32> @test_vrsraq_n_u32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vrsraq_n_u32
+; CHECK: ursra {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %1 = tail call <4 x i32> @llvm.aarch64.neon.vurshr.v4i32(<4 x i32> %b, i32 3)
+ %vrsra_n = add <4 x i32> %1, %a
+ ret <4 x i32> %vrsra_n
+}
+
+define <2 x i64> @test_vrsraq_n_u64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vrsraq_n_u64
+; CHECK: ursra {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %1 = tail call <2 x i64> @llvm.aarch64.neon.vurshr.v2i64(<2 x i64> %b, i32 3)
+ %vrsra_n = add <2 x i64> %1, %a
+ ret <2 x i64> %vrsra_n
+}
+
+define <8 x i8> @test_vsri_n_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsri_n_s8
+; CHECK: sri {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsri_n = tail call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
+ ret <8 x i8> %vsri_n
+}
+
+
+define <4 x i16> @test_vsri_n_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsri_n_s16
+; CHECK: sri {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vsri = tail call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> %a, <4 x i16> %b, i32 3)
+ ret <4 x i16> %vsri
+}
+
+
+define <2 x i32> @test_vsri_n_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsri_n_s32
+; CHECK: sri {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vsri = tail call <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32> %a, <2 x i32> %b, i32 3)
+ ret <2 x i32> %vsri
+}
+
+
+define <16 x i8> @test_vsriq_n_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsriq_n_s8
+; CHECK: sri {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsri_n = tail call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
+ ret <16 x i8> %vsri_n
+}
+
+
+define <8 x i16> @test_vsriq_n_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsriq_n_s16
+; CHECK: sri {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vsri = tail call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> %a, <8 x i16> %b, i32 3)
+ ret <8 x i16> %vsri
+}
+
+
+define <4 x i32> @test_vsriq_n_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsriq_n_s32
+; CHECK: sri {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vsri = tail call <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32> %a, <4 x i32> %b, i32 3)
+ ret <4 x i32> %vsri
+}
+
+
+define <2 x i64> @test_vsriq_n_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsriq_n_s64
+; CHECK: sri {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vsri = tail call <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64> %a, <2 x i64> %b, i32 3)
+ ret <2 x i64> %vsri
+}
+
+define <8 x i8> @test_vsri_n_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsri_n_p8
+; CHECK: sri {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsri_n = tail call <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
+ ret <8 x i8> %vsri_n
+}
+
+define <4 x i16> @test_vsri_n_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsri_n_p16
+; CHECK: sri {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #15
+ %vsri = tail call <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16> %a, <4 x i16> %b, i32 15)
+ ret <4 x i16> %vsri
+}
+
+define <16 x i8> @test_vsriq_n_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsriq_n_p8
+; CHECK: sri {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsri_n = tail call <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
+ ret <16 x i8> %vsri_n
+}
+
+define <8 x i16> @test_vsriq_n_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsriq_n_p16
+; CHECK: sri {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #15
+ %vsri = tail call <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16> %a, <8 x i16> %b, i32 15)
+ ret <8 x i16> %vsri
+}
+
+define <8 x i8> @test_vsli_n_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsli_n_s8
+; CHECK: sli {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsli_n = tail call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
+ ret <8 x i8> %vsli_n
+}
+
+define <4 x i16> @test_vsli_n_s16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsli_n_s16
+; CHECK: sli {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vsli = tail call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %a, <4 x i16> %b, i32 3)
+ ret <4 x i16> %vsli
+}
+
+define <2 x i32> @test_vsli_n_s32(<2 x i32> %a, <2 x i32> %b) {
+; CHECK: test_vsli_n_s32
+; CHECK: sli {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vsli = tail call <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32> %a, <2 x i32> %b, i32 3)
+ ret <2 x i32> %vsli
+}
+
+define <16 x i8> @test_vsliq_n_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsliq_n_s8
+; CHECK: sli {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsli_n = tail call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
+ ret <16 x i8> %vsli_n
+}
+
+define <8 x i16> @test_vsliq_n_s16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsliq_n_s16
+; CHECK: sli {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vsli = tail call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %a, <8 x i16> %b, i32 3)
+ ret <8 x i16> %vsli
+}
+
+define <4 x i32> @test_vsliq_n_s32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK: test_vsliq_n_s32
+; CHECK: sli {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vsli = tail call <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32> %a, <4 x i32> %b, i32 3)
+ ret <4 x i32> %vsli
+}
+
+define <2 x i64> @test_vsliq_n_s64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK: test_vsliq_n_s64
+; CHECK: sli {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vsli = tail call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %a, <2 x i64> %b, i32 3)
+ ret <2 x i64> %vsli
+}
+
+define <8 x i8> @test_vsli_n_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vsli_n_p8
+; CHECK: sli {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vsli_n = tail call <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8> %a, <8 x i8> %b, i32 3)
+ ret <8 x i8> %vsli_n
+}
+
+define <4 x i16> @test_vsli_n_p16(<4 x i16> %a, <4 x i16> %b) {
+; CHECK: test_vsli_n_p16
+; CHECK: sli {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #15
+ %vsli = tail call <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16> %a, <4 x i16> %b, i32 15)
+ ret <4 x i16> %vsli
+}
+
+define <16 x i8> @test_vsliq_n_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vsliq_n_p8
+; CHECK: sli {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vsli_n = tail call <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8> %a, <16 x i8> %b, i32 3)
+ ret <16 x i8> %vsli_n
+}
+
+define <8 x i16> @test_vsliq_n_p16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK: test_vsliq_n_p16
+; CHECK: sli {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #15
+ %vsli = tail call <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16> %a, <8 x i16> %b, i32 15)
+ ret <8 x i16> %vsli
+}
+
+define <8 x i8> @test_vqshl_n_s8(<8 x i8> %a) {
+; CHECK: test_vqshl_n_s8
+; CHECK: sqshl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vqshl = tail call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %a, <8 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
+ ret <8 x i8> %vqshl
+}
+
+
+define <4 x i16> @test_vqshl_n_s16(<4 x i16> %a) {
+; CHECK: test_vqshl_n_s16
+; CHECK: sqshl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vqshl = tail call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %a, <4 x i16> <i16 3, i16 3, i16 3, i16 3>)
+ ret <4 x i16> %vqshl
+}
+
+
+define <2 x i32> @test_vqshl_n_s32(<2 x i32> %a) {
+; CHECK: test_vqshl_n_s32
+; CHECK: sqshl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vqshl = tail call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %a, <2 x i32> <i32 3, i32 3>)
+ ret <2 x i32> %vqshl
+}
+
+
+define <16 x i8> @test_vqshlq_n_s8(<16 x i8> %a) {
+; CHECK: test_vqshlq_n_s8
+; CHECK: sqshl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vqshl_n = tail call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %a, <16 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
+ ret <16 x i8> %vqshl_n
+}
+
+
+define <8 x i16> @test_vqshlq_n_s16(<8 x i16> %a) {
+; CHECK: test_vqshlq_n_s16
+; CHECK: sqshl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vqshl = tail call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %a, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
+ ret <8 x i16> %vqshl
+}
+
+
+define <4 x i32> @test_vqshlq_n_s32(<4 x i32> %a) {
+; CHECK: test_vqshlq_n_s32
+; CHECK: sqshl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vqshl = tail call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %a, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
+ ret <4 x i32> %vqshl
+}
+
+
+define <2 x i64> @test_vqshlq_n_s64(<2 x i64> %a) {
+; CHECK: test_vqshlq_n_s64
+; CHECK: sqshl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vqshl = tail call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %a, <2 x i64> <i64 3, i64 3>)
+ ret <2 x i64> %vqshl
+}
+
+
+define <8 x i8> @test_vqshl_n_u8(<8 x i8> %a) {
+; CHECK: test_vqshl_n_u8
+; CHECK: uqshl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vqshl_n = tail call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %a, <8 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
+ ret <8 x i8> %vqshl_n
+}
+
+
+define <4 x i16> @test_vqshl_n_u16(<4 x i16> %a) {
+; CHECK: test_vqshl_n_u16
+; CHECK: uqshl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vqshl = tail call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %a, <4 x i16> <i16 3, i16 3, i16 3, i16 3>)
+ ret <4 x i16> %vqshl
+}
+
+
+define <2 x i32> @test_vqshl_n_u32(<2 x i32> %a) {
+; CHECK: test_vqshl_n_u32
+; CHECK: uqshl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vqshl = tail call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %a, <2 x i32> <i32 3, i32 3>)
+ ret <2 x i32> %vqshl
+}
+
+
+define <16 x i8> @test_vqshlq_n_u8(<16 x i8> %a) {
+; CHECK: test_vqshlq_n_u8
+; CHECK: uqshl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vqshl_n = tail call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %a, <16 x i8> <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>)
+ ret <16 x i8> %vqshl_n
+}
+
+
+define <8 x i16> @test_vqshlq_n_u16(<8 x i16> %a) {
+; CHECK: test_vqshlq_n_u16
+; CHECK: uqshl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vqshl = tail call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %a, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>)
+ ret <8 x i16> %vqshl
+}
+
+
+define <4 x i32> @test_vqshlq_n_u32(<4 x i32> %a) {
+; CHECK: test_vqshlq_n_u32
+; CHECK: uqshl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vqshl = tail call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %a, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
+ ret <4 x i32> %vqshl
+}
+
+
+define <2 x i64> @test_vqshlq_n_u64(<2 x i64> %a) {
+; CHECK: test_vqshlq_n_u64
+; CHECK: uqshl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vqshl = tail call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %a, <2 x i64> <i64 3, i64 3>)
+ ret <2 x i64> %vqshl
+}
+
+define <8 x i8> @test_vqshlu_n_s8(<8 x i8> %a) {
+; CHECK: test_vqshlu_n_s8
+; CHECK: sqshlu {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #3
+ %vqshlu = tail call <8 x i8> @llvm.aarch64.neon.vsqshlu.v8i8(<8 x i8> %a, i32 3)
+ ret <8 x i8> %vqshlu
+}
+
+
+define <4 x i16> @test_vqshlu_n_s16(<4 x i16> %a) {
+; CHECK: test_vqshlu_n_s16
+; CHECK: sqshlu {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #3
+ %vqshlu = tail call <4 x i16> @llvm.aarch64.neon.vsqshlu.v4i16(<4 x i16> %a, i32 3)
+ ret <4 x i16> %vqshlu
+}
+
+
+define <2 x i32> @test_vqshlu_n_s32(<2 x i32> %a) {
+; CHECK: test_vqshlu_n_s32
+; CHECK: sqshlu {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #3
+ %vqshlu = tail call <2 x i32> @llvm.aarch64.neon.vsqshlu.v2i32(<2 x i32> %a, i32 3)
+ ret <2 x i32> %vqshlu
+}
+
+
+define <16 x i8> @test_vqshluq_n_s8(<16 x i8> %a) {
+; CHECK: test_vqshluq_n_s8
+; CHECK: sqshlu {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #3
+ %vqshlu = tail call <16 x i8> @llvm.aarch64.neon.vsqshlu.v16i8(<16 x i8> %a, i32 3)
+ ret <16 x i8> %vqshlu
+}
+
+
+define <8 x i16> @test_vqshluq_n_s16(<8 x i16> %a) {
+; CHECK: test_vqshluq_n_s16
+; CHECK: sqshlu {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #3
+ %vqshlu = tail call <8 x i16> @llvm.aarch64.neon.vsqshlu.v8i16(<8 x i16> %a, i32 3)
+ ret <8 x i16> %vqshlu
+}
+
+
+define <4 x i32> @test_vqshluq_n_s32(<4 x i32> %a) {
+; CHECK: test_vqshluq_n_s32
+; CHECK: sqshlu {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #3
+ %vqshlu = tail call <4 x i32> @llvm.aarch64.neon.vsqshlu.v4i32(<4 x i32> %a, i32 3)
+ ret <4 x i32> %vqshlu
+}
+
+
+define <2 x i64> @test_vqshluq_n_s64(<2 x i64> %a) {
+; CHECK: test_vqshluq_n_s64
+; CHECK: sqshlu {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #3
+ %vqshlu = tail call <2 x i64> @llvm.aarch64.neon.vsqshlu.v2i64(<2 x i64> %a, i32 3)
+ ret <2 x i64> %vqshlu
+}
+
+
+define <8 x i8> @test_vshrn_n_s16(<8 x i16> %a) {
+; CHECK: test_vshrn_n_s16
+; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %1 = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ ret <8 x i8> %vshrn_n
+}
+
+define <4 x i16> @test_vshrn_n_s32(<4 x i32> %a) {
+; CHECK: test_vshrn_n_s32
+; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %1 = ashr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ ret <4 x i16> %vshrn_n
+}
+
+define <2 x i32> @test_vshrn_n_s64(<2 x i64> %a) {
+; CHECK: test_vshrn_n_s64
+; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %1 = ashr <2 x i64> %a, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %vshrn_n
+}
+
+define <8 x i8> @test_vshrn_n_u16(<8 x i16> %a) {
+; CHECK: test_vshrn_n_u16
+; CHECK: shrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %1 = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ ret <8 x i8> %vshrn_n
+}
+
+define <4 x i16> @test_vshrn_n_u32(<4 x i32> %a) {
+; CHECK: test_vshrn_n_u32
+; CHECK: shrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %1 = lshr <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ ret <4 x i16> %vshrn_n
+}
+
+define <2 x i32> @test_vshrn_n_u64(<2 x i64> %a) {
+; CHECK: test_vshrn_n_u64
+; CHECK: shrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %1 = lshr <2 x i64> %a, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %1 to <2 x i32>
+ ret <2 x i32> %vshrn_n
+}
+
+define <16 x i8> @test_vshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vshrn_high_n_s16
+; CHECK: shrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %1 = ashr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ %2 = bitcast <8 x i8> %a to <1 x i64>
+ %3 = bitcast <8 x i8> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %4
+}
+
+define <8 x i16> @test_vshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vshrn_high_n_s32
+; CHECK: shrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %1 = ashr <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ %2 = bitcast <4 x i16> %a to <1 x i64>
+ %3 = bitcast <4 x i16> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %4
+}
+
+define <4 x i32> @test_vshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vshrn_high_n_s64
+; CHECK: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %2 = ashr <2 x i64> %b, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
+ %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %4
+}
+
+define <16 x i8> @test_vshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vshrn_high_n_u16
+; CHECK: shrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %1 = lshr <8 x i16> %b, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+ %vshrn_n = trunc <8 x i16> %1 to <8 x i8>
+ %2 = bitcast <8 x i8> %a to <1 x i64>
+ %3 = bitcast <8 x i8> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %4
+}
+
+define <8 x i16> @test_vshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vshrn_high_n_u32
+; CHECK: shrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %1 = lshr <4 x i32> %b, <i32 9, i32 9, i32 9, i32 9>
+ %vshrn_n = trunc <4 x i32> %1 to <4 x i16>
+ %2 = bitcast <4 x i16> %a to <1 x i64>
+ %3 = bitcast <4 x i16> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %2, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %4
+}
+
+define <4 x i32> @test_vshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vshrn_high_n_u64
+; CHECK: shrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %2 = lshr <2 x i64> %b, <i64 19, i64 19>
+ %vshrn_n = trunc <2 x i64> %2 to <2 x i32>
+ %3 = bitcast <2 x i32> %vshrn_n to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %3, <2 x i32> <i32 0, i32 1>
+ %4 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %4
+}
+
+define <8 x i8> @test_vqshrun_n_s16(<8 x i16> %a) {
+; CHECK: test_vqshrun_n_s16
+; CHECK: sqshrun {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vqshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqshrun.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vqshrun
+}
+
+
+define <4 x i16> @test_vqshrun_n_s32(<4 x i32> %a) {
+; CHECK: test_vqshrun_n_s32
+; CHECK: sqshrun {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vqshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqshrun.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vqshrun
+}
+
+define <2 x i32> @test_vqshrun_n_s64(<2 x i64> %a) {
+; CHECK: test_vqshrun_n_s64
+; CHECK: sqshrun {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vqshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqshrun.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vqshrun
+}
+
+define <16 x i8> @test_vqshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqshrun_high_n_s16
+; CHECK: sqshrun2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqshrun.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqshrun_high_n_s32
+; CHECK: sqshrun2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqshrun.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqshrun_high_n_s64
+; CHECK: sqshrun2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqshrun.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i8> @test_vrshrn_n_s16(<8 x i16> %a) {
+; CHECK: test_vrshrn_n_s16
+; CHECK: rshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vrshrn = tail call <8 x i8> @llvm.aarch64.neon.vrshrn.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vrshrn
+}
+
+
+define <4 x i16> @test_vrshrn_n_s32(<4 x i32> %a) {
+; CHECK: test_vrshrn_n_s32
+; CHECK: rshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vrshrn = tail call <4 x i16> @llvm.aarch64.neon.vrshrn.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vrshrn
+}
+
+
+define <2 x i32> @test_vrshrn_n_s64(<2 x i64> %a) {
+; CHECK: test_vrshrn_n_s64
+; CHECK: rshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vrshrn = tail call <2 x i32> @llvm.aarch64.neon.vrshrn.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vrshrn
+}
+
+define <16 x i8> @test_vrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vrshrn_high_n_s16
+; CHECK: rshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vrshrn = tail call <8 x i8> @llvm.aarch64.neon.vrshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vrshrn_high_n_s32
+; CHECK: rshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vrshrn = tail call <4 x i16> @llvm.aarch64.neon.vrshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vrshrn_high_n_s64
+; CHECK: rshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vrshrn = tail call <2 x i32> @llvm.aarch64.neon.vrshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i8> @test_vqrshrun_n_s16(<8 x i16> %a) {
+; CHECK: test_vqrshrun_n_s16
+; CHECK: sqrshrun {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrun.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vqrshrun
+}
+
+define <4 x i16> @test_vqrshrun_n_s32(<4 x i32> %a) {
+; CHECK: test_vqrshrun_n_s32
+; CHECK: sqrshrun {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrun.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vqrshrun
+}
+
+define <2 x i32> @test_vqrshrun_n_s64(<2 x i64> %a) {
+; CHECK: test_vqrshrun_n_s64
+; CHECK: sqrshrun {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrun.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vqrshrun
+}
+
+define <16 x i8> @test_vqrshrun_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqrshrun_high_n_s16
+; CHECK: sqrshrun2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqrshrun = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrun.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqrshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqrshrun_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqrshrun_high_n_s32
+; CHECK: sqrshrun2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqrshrun = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrun.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqrshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqrshrun_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqrshrun_high_n_s64
+; CHECK: sqrshrun2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqrshrun = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrun.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqrshrun to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i8> @test_vqshrn_n_s16(<8 x i16> %a) {
+; CHECK: test_vqshrn_n_s16
+; CHECK: sqshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqshrn.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vqshrn
+}
+
+
+define <4 x i16> @test_vqshrn_n_s32(<4 x i32> %a) {
+; CHECK: test_vqshrn_n_s32
+; CHECK: sqshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqshrn.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vqshrn
+}
+
+
+define <2 x i32> @test_vqshrn_n_s64(<2 x i64> %a) {
+; CHECK: test_vqshrn_n_s64
+; CHECK: sqshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqshrn.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vqshrn
+}
+
+
+define <8 x i8> @test_vqshrn_n_u16(<8 x i16> %a) {
+; CHECK: test_vqshrn_n_u16
+; CHECK: uqshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqshrn.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vqshrn
+}
+
+
+define <4 x i16> @test_vqshrn_n_u32(<4 x i32> %a) {
+; CHECK: test_vqshrn_n_u32
+; CHECK: uqshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqshrn.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vqshrn
+}
+
+
+define <2 x i32> @test_vqshrn_n_u64(<2 x i64> %a) {
+; CHECK: test_vqshrn_n_u64
+; CHECK: uqshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqshrn.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vqshrn
+}
+
+
+define <16 x i8> @test_vqshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqshrn_high_n_s16
+; CHECK: sqshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqshrn_high_n_s32
+; CHECK: sqshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqshrn_high_n_s64
+; CHECK: sqshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqshrn_high_n_u16
+; CHECK: uqshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqshrn_high_n_u32
+; CHECK: uqshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqshrn_high_n_u64
+; CHECK: uqshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <8 x i8> @test_vqrshrn_n_s16(<8 x i16> %a) {
+; CHECK: test_vqrshrn_n_s16
+; CHECK: sqrshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrn.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vqrshrn
+}
+
+
+define <4 x i16> @test_vqrshrn_n_s32(<4 x i32> %a) {
+; CHECK: test_vqrshrn_n_s32
+; CHECK: sqrshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrn.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vqrshrn
+}
+
+
+define <2 x i32> @test_vqrshrn_n_s64(<2 x i64> %a) {
+; CHECK: test_vqrshrn_n_s64
+; CHECK: sqrshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrn.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vqrshrn
+}
+
+
+define <8 x i8> @test_vqrshrn_n_u16(<8 x i16> %a) {
+; CHECK: test_vqrshrn_n_u16
+; CHECK: uqrshrn {{v[0-9]+}}.8b, {{v[0-9]+}}.8h, #3
+ %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqrshrn.v8i8(<8 x i16> %a, i32 3)
+ ret <8 x i8> %vqrshrn
+}
+
+
+define <4 x i16> @test_vqrshrn_n_u32(<4 x i32> %a) {
+; CHECK: test_vqrshrn_n_u32
+; CHECK: uqrshrn {{v[0-9]+}}.4h, {{v[0-9]+}}.4s, #9
+ %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqrshrn.v4i16(<4 x i32> %a, i32 9)
+ ret <4 x i16> %vqrshrn
+}
+
+
+define <2 x i32> @test_vqrshrn_n_u64(<2 x i64> %a) {
+; CHECK: test_vqrshrn_n_u64
+; CHECK: uqrshrn {{v[0-9]+}}.2s, {{v[0-9]+}}.2d, #19
+ %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqrshrn.v2i32(<2 x i64> %a, i32 19)
+ ret <2 x i32> %vqrshrn
+}
+
+
+define <16 x i8> @test_vqrshrn_high_n_s16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqrshrn_high_n_s16
+; CHECK: sqrshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vsqrshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqrshrn_high_n_s32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqrshrn_high_n_s32
+; CHECK: sqrshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vsqrshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqrshrn_high_n_s64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqrshrn_high_n_s64
+; CHECK: sqrshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vsqrshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <16 x i8> @test_vqrshrn_high_n_u16(<8 x i8> %a, <8 x i16> %b) {
+; CHECK: test_vqrshrn_high_n_u16
+; CHECK: uqrshrn2 {{v[0-9]+}}.16b, {{v[0-9]+}}.8h, #3
+ %vqrshrn = tail call <8 x i8> @llvm.aarch64.neon.vuqrshrn.v8i8(<8 x i16> %b, i32 3)
+ %1 = bitcast <8 x i8> %a to <1 x i64>
+ %2 = bitcast <8 x i8> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <16 x i8>
+ ret <16 x i8> %3
+}
+
+define <8 x i16> @test_vqrshrn_high_n_u32(<4 x i16> %a, <4 x i32> %b) {
+; CHECK: test_vqrshrn_high_n_u32
+; CHECK: uqrshrn2 {{v[0-9]+}}.8h, {{v[0-9]+}}.4s, #9
+ %vqrshrn = tail call <4 x i16> @llvm.aarch64.neon.vuqrshrn.v4i16(<4 x i32> %b, i32 9)
+ %1 = bitcast <4 x i16> %a to <1 x i64>
+ %2 = bitcast <4 x i16> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <8 x i16>
+ ret <8 x i16> %3
+}
+
+define <4 x i32> @test_vqrshrn_high_n_u64(<2 x i32> %a, <2 x i64> %b) {
+; CHECK: test_vqrshrn_high_n_u64
+; CHECK: uqrshrn2 {{v[0-9]+}}.4s, {{v[0-9]+}}.2d, #19
+ %1 = bitcast <2 x i32> %a to <1 x i64>
+ %vqrshrn = tail call <2 x i32> @llvm.aarch64.neon.vuqrshrn.v2i32(<2 x i64> %b, i32 19)
+ %2 = bitcast <2 x i32> %vqrshrn to <1 x i64>
+ %shuffle.i = shufflevector <1 x i64> %1, <1 x i64> %2, <2 x i32> <i32 0, i32 1>
+ %3 = bitcast <2 x i64> %shuffle.i to <4 x i32>
+ ret <4 x i32> %3
+}
+
+define <2 x float> @test_vcvt_n_f32_s32(<2 x i32> %a) {
+; CHECK: test_vcvt_n_f32_s32
+; CHECK: scvtf {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
+ %vcvt = tail call <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %a, i32 31)
+ ret <2 x float> %vcvt
+}
+
+define <4 x float> @test_vcvtq_n_f32_s32(<4 x i32> %a) {
+; CHECK: test_vcvtq_n_f32_s32
+; CHECK: scvtf {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
+ %vcvt = tail call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %a, i32 31)
+ ret <4 x float> %vcvt
+}
+
+define <2 x double> @test_vcvtq_n_f64_s64(<2 x i64> %a) {
+; CHECK: test_vcvtq_n_f64_s64
+; CHECK: scvtf {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
+ %vcvt = tail call <2 x double> @llvm.arm.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64> %a, i32 50)
+ ret <2 x double> %vcvt
+}
+
+define <2 x float> @test_vcvt_n_f32_u32(<2 x i32> %a) {
+; CHECK: test_vcvt_n_f32_u32
+; CHECK: ucvtf {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
+ %vcvt = tail call <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %a, i32 31)
+ ret <2 x float> %vcvt
+}
+
+define <4 x float> @test_vcvtq_n_f32_u32(<4 x i32> %a) {
+; CHECK: test_vcvtq_n_f32_u32
+; CHECK: ucvtf {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
+ %vcvt = tail call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %a, i32 31)
+ ret <4 x float> %vcvt
+}
+
+define <2 x double> @test_vcvtq_n_f64_u64(<2 x i64> %a) {
+; CHECK: test_vcvtq_n_f64_u64
+; CHECK: ucvtf {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
+ %vcvt = tail call <2 x double> @llvm.arm.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64> %a, i32 50)
+ ret <2 x double> %vcvt
+}
+
+define <2 x i32> @test_vcvt_n_s32_f32(<2 x float> %a) {
+; CHECK: test_vcvt_n_s32_f32
+; CHECK: fcvtzs {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
+ %vcvt = tail call <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %a, i32 31)
+ ret <2 x i32> %vcvt
+}
+
+define <4 x i32> @test_vcvtq_n_s32_f32(<4 x float> %a) {
+; CHECK: test_vcvtq_n_s32_f32
+; CHECK: fcvtzs {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
+ %vcvt = tail call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %a, i32 31)
+ ret <4 x i32> %vcvt
+}
+
+define <2 x i64> @test_vcvtq_n_s64_f64(<2 x double> %a) {
+; CHECK: test_vcvtq_n_s64_f64
+; CHECK: fcvtzs {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
+ %vcvt = tail call <2 x i64> @llvm.arm.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double> %a, i32 50)
+ ret <2 x i64> %vcvt
+}
+
+define <2 x i32> @test_vcvt_n_u32_f32(<2 x float> %a) {
+; CHECK: test_vcvt_n_u32_f32
+; CHECK: fcvtzu {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #31
+ %vcvt = tail call <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %a, i32 31)
+ ret <2 x i32> %vcvt
+}
+
+define <4 x i32> @test_vcvtq_n_u32_f32(<4 x float> %a) {
+; CHECK: test_vcvt_n_u32_f32
+; CHECK: fcvtzu {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #31
+ %vcvt = tail call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %a, i32 31)
+ ret <4 x i32> %vcvt
+}
+
+define <2 x i64> @test_vcvtq_n_u64_f64(<2 x double> %a) {
+; CHECK: test_vcvtq_n_u64_f64
+; CHECK: fcvtzu {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #50
+ %vcvt = tail call <2 x i64> @llvm.arm.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double> %a, i32 50)
+ ret <2 x i64> %vcvt
+}
+
+declare <8 x i8> @llvm.aarch64.neon.vsrshr.v8i8(<8 x i8>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsrshr.v4i16(<4 x i16>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsrshr.v2i32(<2 x i32>, i32)
+
+declare <16 x i8> @llvm.aarch64.neon.vsrshr.v16i8(<16 x i8>, i32)
+
+declare <8 x i16> @llvm.aarch64.neon.vsrshr.v8i16(<8 x i16>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vsrshr.v4i32(<4 x i32>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vsrshr.v2i64(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vurshr.v8i8(<8 x i8>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vurshr.v4i16(<4 x i16>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vurshr.v2i32(<2 x i32>, i32)
+
+declare <16 x i8> @llvm.aarch64.neon.vurshr.v16i8(<16 x i8>, i32)
+
+declare <8 x i16> @llvm.aarch64.neon.vurshr.v8i16(<8 x i16>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vurshr.v4i32(<4 x i32>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vurshr.v2i64(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vsri.v8i8(<8 x i8>, <8 x i8>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsri.v4i16(<4 x i16>, <4 x i16>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsri.v2i32(<2 x i32>, <2 x i32>, i32)
+
+declare <16 x i8> @llvm.aarch64.neon.vsri.v16i8(<16 x i8>, <16 x i8>, i32)
+
+declare <8 x i16> @llvm.aarch64.neon.vsri.v8i16(<8 x i16>, <8 x i16>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vsri.v4i32(<4 x i32>, <4 x i32>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vsri.v2i64(<2 x i64>, <2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vsli.v8i8(<8 x i8>, <8 x i8>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsli.v4i16(<4 x i16>, <4 x i16>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsli.v2i32(<2 x i32>, <2 x i32>, i32)
+
+declare <16 x i8> @llvm.aarch64.neon.vsli.v16i8(<16 x i8>, <16 x i8>, i32)
+
+declare <8 x i16> @llvm.aarch64.neon.vsli.v8i16(<8 x i16>, <8 x i16>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vsli.v4i32(<4 x i32>, <4 x i32>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64>, <2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vsqshlu.v8i8(<8 x i8>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsqshlu.v4i16(<4 x i16>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsqshlu.v2i32(<2 x i32>, i32)
+
+declare <16 x i8> @llvm.aarch64.neon.vsqshlu.v16i8(<16 x i8>, i32)
+
+declare <8 x i16> @llvm.aarch64.neon.vsqshlu.v8i16(<8 x i16>, i32)
+
+declare <4 x i32> @llvm.aarch64.neon.vsqshlu.v4i32(<4 x i32>, i32)
+
+declare <2 x i64> @llvm.aarch64.neon.vsqshlu.v2i64(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>)
+
+declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>)
+
+declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>)
+
+declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>)
+
+declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>)
+
+declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>)
+
+declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>)
+
+declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>)
+
+declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>)
+
+declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>)
+
+declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>)
+
+declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>)
+
+declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>)
+
+declare <8 x i8> @llvm.aarch64.neon.vsqshrun.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsqshrun.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsqshrun.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vrshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vrshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vrshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vsqrshrun.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsqrshrun.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsqrshrun.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vsqshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsqshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsqshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vuqshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vuqshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vuqshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vsqrshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vsqrshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vsqrshrn.v2i32(<2 x i64>, i32)
+
+declare <8 x i8> @llvm.aarch64.neon.vuqrshrn.v8i8(<8 x i16>, i32)
+
+declare <4 x i16> @llvm.aarch64.neon.vuqrshrn.v4i16(<4 x i32>, i32)
+
+declare <2 x i32> @llvm.aarch64.neon.vuqrshrn.v2i32(<2 x i64>, i32)
+
+declare <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32)
+
+declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32)
+
+declare <2 x double> @llvm.arm.neon.vcvtfxs2fp.v2f64.v2i64(<2 x i64>, i32)
+
+declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32)
+
+declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32)
+
+declare <2 x double> @llvm.arm.neon.vcvtfxu2fp.v2f64.v2i64(<2 x i64>, i32)
+
+declare <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32)
+
+declare <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32)
+
+declare <2 x i64> @llvm.arm.neon.vcvtfp2fxs.v2i64.v2f64(<2 x double>, i32)
+
+declare <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32)
+
+declare <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32)
+
+declare <2 x i64> @llvm.arm.neon.vcvtfp2fxu.v2i64.v2f64(<2 x double>, i32)
+
+define <1 x i64> @test_vcvt_n_s64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvt_n_s64_f64
+; CHECK: fcvtzs d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x i64> @llvm.arm.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double> %a, i32 64)
+ ret <1 x i64> %1
+}
+
+define <1 x i64> @test_vcvt_n_u64_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vcvt_n_u64_f64
+; CHECK: fcvtzu d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x i64> @llvm.arm.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double> %a, i32 64)
+ ret <1 x i64> %1
+}
+
+define <1 x double> @test_vcvt_n_f64_s64(<1 x i64> %a) {
+; CHECK-LABEL: test_vcvt_n_f64_s64
+; CHECK: scvtf d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x double> @llvm.arm.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64> %a, i32 64)
+ ret <1 x double> %1
+}
+
+define <1 x double> @test_vcvt_n_f64_u64(<1 x i64> %a) {
+; CHECK-LABEL: test_vcvt_n_f64_u64
+; CHECK: ucvtf d{{[0-9]+}}, d{{[0-9]+}}, #64
+ %1 = tail call <1 x double> @llvm.arm.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64> %a, i32 64)
+ ret <1 x double> %1
+}
+
+declare <1 x i64> @llvm.arm.neon.vcvtfp2fxs.v1i64.v1f64(<1 x double>, i32)
+declare <1 x i64> @llvm.arm.neon.vcvtfp2fxu.v1i64.v1f64(<1 x double>, i32)
+declare <1 x double> @llvm.arm.neon.vcvtfxs2fp.v1f64.v1i64(<1 x i64>, i32)
+declare <1 x double> @llvm.arm.neon.vcvtfxu2fp.v1f64.v1i64(<1 x i64>, i32) \ No newline at end of file
diff --git a/test/CodeGen/AArch64/neon-simd-tbl.ll b/test/CodeGen/AArch64/neon-simd-tbl.ll
new file mode 100644
index 000000000000..8eac1e88c4a5
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-tbl.ll
@@ -0,0 +1,828 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8>, <16 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8>, <16 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8>, <8 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
+
+declare <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8>, <16 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
+
+declare <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <8 x i8>)
+
+define <8 x i8> @test_vtbl1_s8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtbl1_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl11.i
+}
+
+define <8 x i8> @test_vqtbl1_s8(<16 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vqtbl1_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %a, <8 x i8> %b)
+ ret <8 x i8> %vtbl1.i
+}
+
+define <8 x i8> @test_vtbl2_s8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl2_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 1
+ %vtbl1.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl17.i
+}
+
+define <8 x i8> @test_vqtbl2_s8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl2_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
+ %vtbl2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl2.i
+}
+
+define <8 x i8> @test_vtbl3_s8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl3_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 2
+ %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl211.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl212.i
+}
+
+define <8 x i8> @test_vqtbl3_s8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl3_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
+ %vtbl3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl3.i
+}
+
+define <8 x i8> @test_vtbl4_s8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl4_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 3
+ %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl215.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %__a.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl215.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl216.i
+}
+
+define <8 x i8> @test_vqtbl4_s8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl4_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
+ %vtbl4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl4.i
+}
+
+define <16 x i8> @test_vqtbl1q_s8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vqtbl1q_s8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %vtbl1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %vtbl1.i
+}
+
+define <16 x i8> @test_vqtbl2q_s8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl2q_s8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
+ %vtbl2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl2.i
+}
+
+define <16 x i8> @test_vqtbl3q_s8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl3q_s8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
+ %vtbl3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl3.i
+}
+
+define <16 x i8> @test_vqtbl4q_s8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl4q_s8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
+ %vtbl4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl4.i
+}
+
+define <8 x i8> @test_vtbx1_s8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vtbx1_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %c)
+ %0 = icmp uge <8 x i8> %c, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ %1 = sext <8 x i1> %0 to <8 x i8>
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl11.i)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vtbx2_s8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx2_s8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 1
+ %vtbx1.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx1.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx17.i
+}
+
+define <8 x i8> @test_vtbx3_s8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx3_s8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 2
+ %vtbl2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl211.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %c)
+ %0 = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
+ %1 = sext <8 x i1> %0 to <8 x i8>
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl212.i)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vtbx4_s8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx4_s8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 3
+ %vtbx2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx215.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %__b.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx2.i, <16 x i8> %vtbx215.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx216.i
+}
+
+define <8 x i8> @test_vqtbx1_s8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vqtbx1_s8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbx1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c)
+ ret <8 x i8> %vtbx1.i
+}
+
+define <8 x i8> @test_vqtbx2_s8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx2_s8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ %vtbx2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx2.i
+}
+
+define <8 x i8> @test_vqtbx3_s8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx3_s8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ %vtbx3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx3.i
+}
+
+define <8 x i8> @test_vqtbx4_s8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx4_s8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vtbx4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx4.i
+}
+
+define <16 x i8> @test_vqtbx1q_s8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vqtbx1q_s8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %vtbx1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c)
+ ret <16 x i8> %vtbx1.i
+}
+
+define <16 x i8> @test_vqtbx2q_s8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx2q_s8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ %vtbx2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx2.i
+}
+
+define <16 x i8> @test_vqtbx3q_s8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx3q_s8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ %vtbx3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx3.i
+}
+
+define <16 x i8> @test_vqtbx4q_s8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx4q_s8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vtbx4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx4.i
+}
+
+define <8 x i8> @test_vtbl1_u8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtbl1_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl11.i
+}
+
+define <8 x i8> @test_vqtbl1_u8(<16 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vqtbl1_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %a, <8 x i8> %b)
+ ret <8 x i8> %vtbl1.i
+}
+
+define <8 x i8> @test_vtbl2_u8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl2_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 1
+ %vtbl1.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl17.i
+}
+
+define <8 x i8> @test_vqtbl2_u8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl2_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
+ %vtbl2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl2.i
+}
+
+define <8 x i8> @test_vtbl3_u8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl3_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 2
+ %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl211.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl212.i
+}
+
+define <8 x i8> @test_vqtbl3_u8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl3_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
+ %vtbl3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl3.i
+}
+
+define <8 x i8> @test_vtbl4_u8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl4_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 3
+ %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl215.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %__a.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl215.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl216.i
+}
+
+define <8 x i8> @test_vqtbl4_u8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl4_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
+ %vtbl4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl4.i
+}
+
+define <16 x i8> @test_vqtbl1q_u8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vqtbl1q_u8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %vtbl1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %vtbl1.i
+}
+
+define <16 x i8> @test_vqtbl2q_u8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl2q_u8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
+ %vtbl2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl2.i
+}
+
+define <16 x i8> @test_vqtbl3q_u8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl3q_u8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
+ %vtbl3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl3.i
+}
+
+define <16 x i8> @test_vqtbl4q_u8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl4q_u8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
+ %vtbl4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl4.i
+}
+
+define <8 x i8> @test_vtbx1_u8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vtbx1_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %c)
+ %0 = icmp uge <8 x i8> %c, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ %1 = sext <8 x i1> %0 to <8 x i8>
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl11.i)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vtbx2_u8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx2_u8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 1
+ %vtbx1.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx1.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx17.i
+}
+
+define <8 x i8> @test_vtbx3_u8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx3_u8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 2
+ %vtbl2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl211.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %c)
+ %0 = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
+ %1 = sext <8 x i1> %0 to <8 x i8>
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl212.i)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vtbx4_u8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx4_u8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 3
+ %vtbx2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx215.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %__b.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx2.i, <16 x i8> %vtbx215.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx216.i
+}
+
+define <8 x i8> @test_vqtbx1_u8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vqtbx1_u8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbx1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c)
+ ret <8 x i8> %vtbx1.i
+}
+
+define <8 x i8> @test_vqtbx2_u8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx2_u8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ %vtbx2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx2.i
+}
+
+define <8 x i8> @test_vqtbx3_u8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx3_u8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ %vtbx3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx3.i
+}
+
+define <8 x i8> @test_vqtbx4_u8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx4_u8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vtbx4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx4.i
+}
+
+define <16 x i8> @test_vqtbx1q_u8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vqtbx1q_u8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %vtbx1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c)
+ ret <16 x i8> %vtbx1.i
+}
+
+define <16 x i8> @test_vqtbx2q_u8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx2q_u8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ %vtbx2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx2.i
+}
+
+define <16 x i8> @test_vqtbx3q_u8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx3q_u8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ %vtbx3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx3.i
+}
+
+define <16 x i8> @test_vqtbx4q_u8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx4q_u8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vtbx4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx4.i
+}
+
+define <8 x i8> @test_vtbl1_p8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vtbl1_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = shufflevector <8 x i8> %a, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl11.i
+}
+
+define <8 x i8> @test_vqtbl1_p8(<16 x i8> %a, <8 x i8> %b) {
+; CHECK: test_vqtbl1_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %a, <8 x i8> %b)
+ ret <8 x i8> %vtbl1.i
+}
+
+define <8 x i8> @test_vtbl2_p8([2 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl2_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %a.coerce, 1
+ %vtbl1.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl17.i
+}
+
+define <8 x i8> @test_vqtbl2_p8([2 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl2_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
+ %vtbl2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl2.i
+}
+
+define <8 x i8> @test_vtbl3_p8([3 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl3_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %a.coerce, 2
+ %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl211.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl212.i
+}
+
+define <8 x i8> @test_vqtbl3_p8([3 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl3_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
+ %vtbl3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl3.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl3.i
+}
+
+define <8 x i8> @test_vtbl4_p8([4 x <8 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vtbl4_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %a.coerce, 3
+ %vtbl2.i = shufflevector <8 x i8> %__a.coerce.fca.0.extract.i, <8 x i8> %__a.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl215.i = shufflevector <8 x i8> %__a.coerce.fca.2.extract.i, <8 x i8> %__a.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl215.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl216.i
+}
+
+define <8 x i8> @test_vqtbl4_p8([4 x <16 x i8>] %a.coerce, <8 x i8> %b) {
+; CHECK: test_vqtbl4_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
+ %vtbl4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl4.v8i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <8 x i8> %b)
+ ret <8 x i8> %vtbl4.i
+}
+
+define <16 x i8> @test_vqtbl1q_p8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK: test_vqtbl1q_p8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %vtbl1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b)
+ ret <16 x i8> %vtbl1.i
+}
+
+define <16 x i8> @test_vqtbl2q_p8([2 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl2q_p8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %a.coerce, 1
+ %vtbl2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl2.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl2.i
+}
+
+define <16 x i8> @test_vqtbl3q_p8([3 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl3q_p8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %a.coerce, 2
+ %vtbl3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl3.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl3.i
+}
+
+define <16 x i8> @test_vqtbl4q_p8([4 x <16 x i8>] %a.coerce, <16 x i8> %b) {
+; CHECK: test_vqtbl4q_p8:
+; CHECK: tbl {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__a.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 0
+ %__a.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 1
+ %__a.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 2
+ %__a.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %a.coerce, 3
+ %vtbl4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbl4.v16i8.v16i8(<16 x i8> %__a.coerce.fca.0.extract.i, <16 x i8> %__a.coerce.fca.1.extract.i, <16 x i8> %__a.coerce.fca.2.extract.i, <16 x i8> %__a.coerce.fca.3.extract.i, <16 x i8> %b)
+ ret <16 x i8> %vtbl4.i
+}
+
+define <8 x i8> @test_vtbx1_p8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vtbx1_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbl1.i = shufflevector <8 x i8> %b, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl11.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl1.v8i8.v16i8(<16 x i8> %vtbl1.i, <8 x i8> %c)
+ %0 = icmp uge <8 x i8> %c, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
+ %1 = sext <8 x i1> %0 to <8 x i8>
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl11.i)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vtbx2_p8(<8 x i8> %a, [2 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx2_p8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <8 x i8>] %b.coerce, 1
+ %vtbx1.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx17.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx1.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx17.i
+}
+
+define <8 x i8> @test_vtbx3_p8(<8 x i8> %a, [3 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx3_p8:
+; CHECK: tbl {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <8 x i8>] %b.coerce, 2
+ %vtbl2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl211.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbl212.i = tail call <8 x i8> @llvm.aarch64.neon.vtbl2.v8i8.v16i8(<16 x i8> %vtbl2.i, <16 x i8> %vtbl211.i, <8 x i8> %c)
+ %0 = icmp uge <8 x i8> %c, <i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24, i8 24>
+ %1 = sext <8 x i1> %0 to <8 x i8>
+ %vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %1, <8 x i8> %a, <8 x i8> %vtbl212.i)
+ ret <8 x i8> %vbsl.i
+}
+
+define <8 x i8> @test_vtbx4_p8(<8 x i8> %a, [4 x <8 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vtbx4_p8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <8 x i8>] %b.coerce, 3
+ %vtbx2.i = shufflevector <8 x i8> %__b.coerce.fca.0.extract.i, <8 x i8> %__b.coerce.fca.1.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx215.i = shufflevector <8 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %__b.coerce.fca.3.extract.i, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %vtbx216.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %vtbx2.i, <16 x i8> %vtbx215.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx216.i
+}
+
+define <8 x i8> @test_vqtbx1_p8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c) {
+; CHECK: test_vqtbx1_p8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %vtbx1.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx1.v8i8.v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c)
+ ret <8 x i8> %vtbx1.i
+}
+
+define <8 x i8> @test_vqtbx2_p8(<8 x i8> %a, [2 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx2_p8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ %vtbx2.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx2.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx2.i
+}
+
+define <8 x i8> @test_vqtbx3_p8(<8 x i8> %a, [3 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx3_p8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ %vtbx3.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx3.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx3.i
+}
+
+define <8 x i8> @test_vqtbx4_p8(<8 x i8> %a, [4 x <16 x i8>] %b.coerce, <8 x i8> %c) {
+; CHECK: test_vqtbx4_p8:
+; CHECK: tbx {{v[0-9]+}}.8b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.8b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vtbx4.i = tail call <8 x i8> @llvm.aarch64.neon.vtbx4.v8i8.v16i8(<8 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <8 x i8> %c)
+ ret <8 x i8> %vtbx4.i
+}
+
+define <16 x i8> @test_vqtbx1q_p8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) {
+; CHECK: test_vqtbx1q_p8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %vtbx1.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx1.v16i8.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c)
+ ret <16 x i8> %vtbx1.i
+}
+
+define <16 x i8> @test_vqtbx2q_p8(<16 x i8> %a, [2 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx2q_p8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [2 x <16 x i8>] %b.coerce, 1
+ %vtbx2.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx2.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx2.i
+}
+
+define <16 x i8> @test_vqtbx3q_p8(<16 x i8> %a, [3 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx3q_p8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [3 x <16 x i8>] %b.coerce, 2
+ %vtbx3.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx3.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx3.i
+}
+
+define <16 x i8> @test_vqtbx4q_p8(<16 x i8> %a, [4 x <16 x i8>] %b.coerce, <16 x i8> %c) {
+; CHECK: test_vqtbx4q_p8:
+; CHECK: tbx {{v[0-9]+}}.16b, {{{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b}, {{v[0-9]+}}.16b
+entry:
+ %__b.coerce.fca.0.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 0
+ %__b.coerce.fca.1.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 1
+ %__b.coerce.fca.2.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 2
+ %__b.coerce.fca.3.extract.i = extractvalue [4 x <16 x i8>] %b.coerce, 3
+ %vtbx4.i = tail call <16 x i8> @llvm.aarch64.neon.vtbx4.v16i8.v16i8(<16 x i8> %a, <16 x i8> %__b.coerce.fca.0.extract.i, <16 x i8> %__b.coerce.fca.1.extract.i, <16 x i8> %__b.coerce.fca.2.extract.i, <16 x i8> %__b.coerce.fca.3.extract.i, <16 x i8> %c)
+ ret <16 x i8> %vtbx4.i
+}
+
diff --git a/test/CodeGen/AArch64/neon-simd-vget.ll b/test/CodeGen/AArch64/neon-simd-vget.ll
new file mode 100644
index 000000000000..6474499e4ff1
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-simd-vget.ll
@@ -0,0 +1,225 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define <8 x i8> @test_vget_high_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_high_s8:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_s16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_high_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_high_s32:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_high_s64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vget_high_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_high_u8:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_u16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_high_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_high_u32:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_high_u64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vget_high_p64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_high_p64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1>
+ ret <1 x i64> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_f16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_f16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x float> @test_vget_high_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vget_high_f32:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ ret <2 x float> %shuffle.i
+}
+
+define <8 x i8> @test_vget_high_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_high_p8:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_high_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_high_p16:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ ret <4 x i16> %shuffle.i
+}
+
+define <1 x double> @test_vget_high_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vget_high_f64:
+; CHECK: dup d0, {{v[0-9]+}}.d[1]
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> <i32 1>
+ ret <1 x double> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_low_s8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_s16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_low_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_low_s32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_low_s64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_low_u8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_u16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x i32> @test_vget_low_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_vget_low_u32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x i32> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_low_u64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <1 x i64> @test_vget_low_p64(<2 x i64> %a) {
+; CHECK-LABEL: test_vget_low_p64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer
+ ret <1 x i64> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_f16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_f16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <2 x float> @test_vget_low_f32(<4 x float> %a) {
+; CHECK-LABEL: test_vget_low_f32:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+ ret <2 x float> %shuffle.i
+}
+
+define <8 x i8> @test_vget_low_p8(<16 x i8> %a) {
+; CHECK-LABEL: test_vget_low_p8:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ ret <8 x i8> %shuffle.i
+}
+
+define <4 x i16> @test_vget_low_p16(<8 x i16> %a) {
+; CHECK-LABEL: test_vget_low_p16:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ ret <4 x i16> %shuffle.i
+}
+
+define <1 x double> @test_vget_low_f64(<2 x double> %a) {
+; CHECK-LABEL: test_vget_low_f64:
+; CHECK: ret
+entry:
+ %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> zeroinitializer
+ ret <1 x double> %shuffle.i
+}
diff --git a/test/CodeGen/AArch64/pic-eh-stubs.ll b/test/CodeGen/AArch64/pic-eh-stubs.ll
index 77bf691cbcbd..6ec4b19a1204 100644
--- a/test/CodeGen/AArch64/pic-eh-stubs.ll
+++ b/test/CodeGen/AArch64/pic-eh-stubs.ll
@@ -57,4 +57,4 @@ declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
declare i8* @__cxa_begin_catch(i8*)
-declare void @__cxa_end_catch() \ No newline at end of file
+declare void @__cxa_end_catch()
diff --git a/test/CodeGen/AArch64/regress-bitcast-formals.ll b/test/CodeGen/AArch64/regress-bitcast-formals.ll
index 28dc9a7e2515..9655f90d826d 100644
--- a/test/CodeGen/AArch64/regress-bitcast-formals.ll
+++ b/test/CodeGen/AArch64/regress-bitcast-formals.ll
@@ -4,7 +4,7 @@
; actually capable of that (the test was omitted from LowerFormalArguments).
define void @test_bitcast_lower(<2 x i32> %a) {
-; CHECK: test_bitcast_lower:
+; CHECK-LABEL: test_bitcast_lower:
ret void
; CHECK: ret
diff --git a/test/CodeGen/AArch64/regress-fp128-livein.ll b/test/CodeGen/AArch64/regress-fp128-livein.ll
new file mode 100644
index 000000000000..cb8432a7e4e4
--- /dev/null
+++ b/test/CodeGen/AArch64/regress-fp128-livein.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s
+
+; Regression test for NZCV reg live-in not being added to fp128csel IfTrue BB,
+; causing a crash during live range calc.
+define void @fp128_livein(i64 %a) {
+ %tobool = icmp ne i64 %a, 0
+ %conv = zext i1 %tobool to i32
+ %conv2 = sitofp i32 %conv to fp128
+ %conv6 = sitofp i32 %conv to double
+ %call3 = tail call i32 @g(fp128 %conv2)
+ %call8 = tail call i32 @h(double %conv6)
+ ret void
+}
+
+declare i32 @f()
+declare i32 @g(fp128)
+declare i32 @h(double)
diff --git a/test/CodeGen/AArch64/regress-tail-livereg.ll b/test/CodeGen/AArch64/regress-tail-livereg.ll
index 8d5485cae4c8..053249c6855f 100644
--- a/test/CodeGen/AArch64/regress-tail-livereg.ll
+++ b/test/CodeGen/AArch64/regress-tail-livereg.ll
@@ -4,7 +4,7 @@
declare void @bar()
define void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
%func = load void()** @var
; Calling a function encourages @foo to use a callee-saved register,
@@ -16,4 +16,4 @@ define void @foo() {
tail call void %func()
; CHECK: br {{x([0-79]|1[0-8])}}
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/regress-tblgen-chains.ll b/test/CodeGen/AArch64/regress-tblgen-chains.ll
index e54552fd8edf..ff77fb4e48f7 100644
--- a/test/CodeGen/AArch64/regress-tblgen-chains.ll
+++ b/test/CodeGen/AArch64/regress-tblgen-chains.ll
@@ -12,7 +12,7 @@
declare void @bar(i8*)
define i64 @test_chains() {
-; CHECK: test_chains:
+; CHECK-LABEL: test_chains:
%locvar = alloca i8
diff --git a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
index 980e2ffef901..0ef981819ec3 100644
--- a/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
+++ b/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
@@ -4,8 +4,23 @@
declare void @bar()
define void @test_w29_reserved() {
-; CHECK: test_w29_reserved:
+; CHECK-LABEL: test_w29_reserved:
+; CHECK: .cfi_startproc
+; CHECK: .cfi_def_cfa sp, 96
; CHECK: add x29, sp, #{{[0-9]+}}
+; CHECK: .cfi_def_cfa x29, 16
+; CHECK: .cfi_offset x30, -8
+; CHECK: .cfi_offset x29, -16
+; CHECK: .cfi_offset x28, -24
+; CHECK: .cfi_offset x27, -32
+; CHECK: .cfi_offset x26, -40
+; CHECK: .cfi_offset x25, -48
+; CHECK: .cfi_offset x24, -56
+; CHECK: .cfi_offset x23, -64
+; CHECK: .cfi_offset x22, -72
+; CHECK: .cfi_offset x21, -80
+; CHECK: .cfi_offset x20, -88
+; CHECK: .cfi_offset x19, -96
%val1 = load volatile i32* @var
%val2 = load volatile i32* @var
diff --git a/test/CodeGen/AArch64/returnaddr.ll b/test/CodeGen/AArch64/returnaddr.ll
new file mode 100644
index 000000000000..c85f9ec4ffd5
--- /dev/null
+++ b/test/CodeGen/AArch64/returnaddr.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+
+define i8* @rt0(i32 %x) nounwind readnone {
+entry:
+; CHECK-LABEL: rt0:
+; CHECK: mov x0, x30
+ %0 = tail call i8* @llvm.returnaddress(i32 0)
+ ret i8* %0
+}
+
+define i8* @rt2() nounwind readnone {
+entry:
+; CHECK-LABEL: rt2:
+; CHECK: ldr x[[reg:[0-9]+]], [x29]
+; CHECK: ldr x[[reg]], [x[[reg]]]
+; CHECK: ldr x0, [x[[reg]], #8]
+ %0 = tail call i8* @llvm.returnaddress(i32 2)
+ ret i8* %0
+}
+
+declare i8* @llvm.returnaddress(i32) nounwind readnone
diff --git a/test/CodeGen/AArch64/setcc-takes-i32.ll b/test/CodeGen/AArch64/setcc-takes-i32.ll
index d2eb77ab1b54..bd79685d34b4 100644
--- a/test/CodeGen/AArch64/setcc-takes-i32.ll
+++ b/test/CodeGen/AArch64/setcc-takes-i32.ll
@@ -12,11 +12,11 @@
declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64)
define i64 @test_select(i64 %lhs, i64 %rhs) {
-; CHECK: test_select:
+; CHECK-LABEL: test_select:
%res = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %lhs, i64 %rhs)
%flag = extractvalue {i64, i1} %res, 1
%retval = select i1 %flag, i64 %lhs, i64 %rhs
ret i64 %retval
; CHECK: ret
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/sibling-call.ll b/test/CodeGen/AArch64/sibling-call.ll
index a1ec618b03ba..20f1062a44dc 100644
--- a/test/CodeGen/AArch64/sibling-call.ll
+++ b/test/CodeGen/AArch64/sibling-call.ll
@@ -5,7 +5,7 @@ declare void @callee_stack8([8 x i32], i64)
declare void @callee_stack16([8 x i32], i64, i64)
define void @caller_to0_from0() nounwind {
-; CHECK: caller_to0_from0:
+; CHECK-LABEL: caller_to0_from0:
; CHECK-NEXT: // BB
tail call void @callee_stack0()
ret void
@@ -13,7 +13,7 @@ define void @caller_to0_from0() nounwind {
}
define void @caller_to0_from8([8 x i32], i64) nounwind{
-; CHECK: caller_to0_from8:
+; CHECK-LABEL: caller_to0_from8:
; CHECK-NEXT: // BB
tail call void @callee_stack0()
@@ -22,7 +22,7 @@ define void @caller_to0_from8([8 x i32], i64) nounwind{
}
define void @caller_to8_from0() {
-; CHECK: caller_to8_from0:
+; CHECK-LABEL: caller_to8_from0:
; Caller isn't going to clean up any extra stack we allocate, so it
; can't be a tail call.
@@ -32,7 +32,7 @@ define void @caller_to8_from0() {
}
define void @caller_to8_from8([8 x i32], i64 %a) {
-; CHECK: caller_to8_from8:
+; CHECK-LABEL: caller_to8_from8:
; CHECK-NOT: sub sp, sp,
; This should reuse our stack area for the 42
@@ -43,7 +43,7 @@ define void @caller_to8_from8([8 x i32], i64 %a) {
}
define void @caller_to16_from8([8 x i32], i64 %a) {
-; CHECK: caller_to16_from8:
+; CHECK-LABEL: caller_to16_from8:
; Shouldn't be a tail call: we can't use SP+8 because our caller might
; have something there. This may sound obvious but implementation does
@@ -54,7 +54,7 @@ define void @caller_to16_from8([8 x i32], i64 %a) {
}
define void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
-; CHECK: caller_to8_from24:
+; CHECK-LABEL: caller_to8_from24:
; CHECK-NOT: sub sp, sp
; Reuse our area, putting "42" at incoming sp
@@ -65,7 +65,7 @@ define void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
}
define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
-; CHECK: caller_to16_from16:
+; CHECK-LABEL: caller_to16_from16:
; CHECK-NOT: sub sp, sp,
; Here we want to make sure that both loads happen before the stores:
@@ -85,13 +85,13 @@ define void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
@func = global void(i32)* null
define void @indirect_tail() {
-; CHECK: indirect_tail:
+; CHECK-LABEL: indirect_tail:
; CHECK-NOT: sub sp, sp
%fptr = load void(i32)** @func
tail call void %fptr(i32 42)
ret void
-; CHECK: movz w0, #42
; CHECK: ldr [[FPTR:x[1-9]+]], [{{x[0-9]+}}, #:lo12:func]
+; CHECK: movz w0, #42
; CHECK: br [[FPTR]]
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/AArch64/sincos-expansion.ll b/test/CodeGen/AArch64/sincos-expansion.ll
index c7a392b78c24..4cd44494d545 100644
--- a/test/CodeGen/AArch64/sincos-expansion.ll
+++ b/test/CodeGen/AArch64/sincos-expansion.ll
@@ -3,8 +3,8 @@
define float @test_sincos_f32(float %f) {
%sin = call float @sinf(float %f) readnone
%cos = call float @cosf(float %f) readnone
-; CHECK: bl cosf
; CHECK: bl sinf
+; CHECK: bl cosf
%val = fadd float %sin, %cos
ret float %val
}
@@ -13,8 +13,8 @@ define double @test_sincos_f64(double %f) {
%sin = call double @sin(double %f) readnone
%cos = call double @cos(double %f) readnone
%val = fadd double %sin, %cos
-; CHECK: bl cos
; CHECK: bl sin
+; CHECK: bl cos
ret double %val
}
@@ -22,8 +22,8 @@ define fp128 @test_sincos_f128(fp128 %f) {
%sin = call fp128 @sinl(fp128 %f) readnone
%cos = call fp128 @cosl(fp128 %f) readnone
%val = fadd fp128 %sin, %cos
-; CHECK: bl cosl
; CHECK: bl sinl
+; CHECK: bl cosl
ret fp128 %val
}
@@ -32,4 +32,4 @@ declare double @sin(double) readonly
declare fp128 @sinl(fp128) readonly
declare float @cosf(float) readonly
declare double @cos(double) readonly
-declare fp128 @cosl(fp128) readonly \ No newline at end of file
+declare fp128 @cosl(fp128) readonly
diff --git a/test/CodeGen/AArch64/tail-call.ll b/test/CodeGen/AArch64/tail-call.ll
index f323b151ad1e..81885f108512 100644
--- a/test/CodeGen/AArch64/tail-call.ll
+++ b/test/CodeGen/AArch64/tail-call.ll
@@ -5,7 +5,7 @@ declare fastcc void @callee_stack8([8 x i32], i64)
declare fastcc void @callee_stack16([8 x i32], i64, i64)
define fastcc void @caller_to0_from0() nounwind {
-; CHECK: caller_to0_from0:
+; CHECK-LABEL: caller_to0_from0:
; CHECK-NEXT: // BB
tail call fastcc void @callee_stack0()
ret void
@@ -13,7 +13,7 @@ define fastcc void @caller_to0_from0() nounwind {
}
define fastcc void @caller_to0_from8([8 x i32], i64) {
-; CHECK: caller_to0_from8:
+; CHECK-LABEL: caller_to0_from8:
tail call fastcc void @callee_stack0()
ret void
@@ -22,7 +22,7 @@ define fastcc void @caller_to0_from8([8 x i32], i64) {
}
define fastcc void @caller_to8_from0() {
-; CHECK: caller_to8_from0:
+; CHECK-LABEL: caller_to8_from0:
; CHECK: sub sp, sp, #32
; Key point is that the "42" should go #16 below incoming stack
@@ -35,7 +35,7 @@ define fastcc void @caller_to8_from0() {
}
define fastcc void @caller_to8_from8([8 x i32], i64 %a) {
-; CHECK: caller_to8_from8:
+; CHECK-LABEL: caller_to8_from8:
; CHECK: sub sp, sp, #16
; Key point is that the "%a" should go where at SP on entry.
@@ -47,7 +47,7 @@ define fastcc void @caller_to8_from8([8 x i32], i64 %a) {
}
define fastcc void @caller_to16_from8([8 x i32], i64 %a) {
-; CHECK: caller_to16_from8:
+; CHECK-LABEL: caller_to16_from8:
; CHECK: sub sp, sp, #16
; Important point is that the call reuses the "dead" argument space
@@ -63,7 +63,7 @@ define fastcc void @caller_to16_from8([8 x i32], i64 %a) {
define fastcc void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
-; CHECK: caller_to8_from24:
+; CHECK-LABEL: caller_to8_from24:
; CHECK: sub sp, sp, #16
; Key point is that the "%a" should go where at #16 above SP on entry.
@@ -76,7 +76,7 @@ define fastcc void @caller_to8_from24([8 x i32], i64 %a, i64 %b, i64 %c) {
define fastcc void @caller_to16_from16([8 x i32], i64 %a, i64 %b) {
-; CHECK: caller_to16_from16:
+; CHECK-LABEL: caller_to16_from16:
; CHECK: sub sp, sp, #16
; Here we want to make sure that both loads happen before the stores:
diff --git a/test/CodeGen/AArch64/tls-dynamic-together.ll b/test/CodeGen/AArch64/tls-dynamic-together.ll
index bad2298c8a65..b5d7d8938444 100644
--- a/test/CodeGen/AArch64/tls-dynamic-together.ll
+++ b/test/CodeGen/AArch64/tls-dynamic-together.ll
@@ -8,7 +8,7 @@
@general_dynamic_var = external thread_local global i32
define i32 @test_generaldynamic() {
-; CHECK: test_generaldynamic:
+; CHECK-LABEL: test_generaldynamic:
%val = load i32* @general_dynamic_var
ret i32 %val
diff --git a/test/CodeGen/AArch64/tls-dynamics.ll b/test/CodeGen/AArch64/tls-dynamics.ll
index cdfd11783c23..68c481ce98b6 100644
--- a/test/CodeGen/AArch64/tls-dynamics.ll
+++ b/test/CodeGen/AArch64/tls-dynamics.ll
@@ -4,14 +4,14 @@
@general_dynamic_var = external thread_local global i32
define i32 @test_generaldynamic() {
-; CHECK: test_generaldynamic:
+; CHECK-LABEL: test_generaldynamic:
%val = load i32* @general_dynamic_var
ret i32 %val
; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
-; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
-; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
+; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
+; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
; CHECK: .tlsdesccall general_dynamic_var
; CHECK-NEXT: blr [[CALLEE]]
@@ -19,20 +19,20 @@ define i32 @test_generaldynamic() {
; CHECK: ldr w0, [x[[TP]], x0]
; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
}
define i32* @test_generaldynamic_addr() {
-; CHECK: test_generaldynamic_addr:
+; CHECK-LABEL: test_generaldynamic_addr:
ret i32* @general_dynamic_var
; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:general_dynamic_var
-; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
-; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
+; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var
+; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:general_dynamic_var]
; CHECK: .tlsdesccall general_dynamic_var
; CHECK-NEXT: blr [[CALLEE]]
@@ -40,8 +40,8 @@ define i32* @test_generaldynamic_addr() {
; CHECK: add x0, [[TP]], x0
; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
}
@@ -49,14 +49,14 @@ define i32* @test_generaldynamic_addr() {
@local_dynamic_var = external thread_local(localdynamic) global i32
define i32 @test_localdynamic() {
-; CHECK: test_localdynamic:
+; CHECK-LABEL: test_localdynamic:
%val = load i32* @local_dynamic_var
ret i32 %val
; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
-; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
-; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
; CHECK: .tlsdesccall _TLS_MODULE_BASE_
; CHECK-NEXT: blr [[CALLEE]]
@@ -66,20 +66,20 @@ define i32 @test_localdynamic() {
; CHECK: ldr w0, [x0, [[DTP_OFFSET]]]
; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
}
define i32* @test_localdynamic_addr() {
-; CHECK: test_localdynamic_addr:
+; CHECK-LABEL: test_localdynamic_addr:
ret i32* @local_dynamic_var
; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
-; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
-; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
; CHECK: .tlsdesccall _TLS_MODULE_BASE_
; CHECK-NEXT: blr [[CALLEE]]
@@ -89,8 +89,8 @@ define i32* @test_localdynamic_addr() {
; CHECK: add x0, x0, [[DTP_OFFSET]]
; CHECK-RELOC: R_AARCH64_TLSDESC_ADR_PAGE
-; CHECK-RELOC: R_AARCH64_TLSDESC_ADD_LO12_NC
-; CHECK-RELOC: R_AARCH64_TLSDESC_LD64_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_ADD_LO12_NC
+; CHECK-RELOC-DAG: R_AARCH64_TLSDESC_LD64_LO12_NC
; CHECK-RELOC: R_AARCH64_TLSDESC_CALL
}
@@ -101,7 +101,7 @@ define i32* @test_localdynamic_addr() {
@local_dynamic_var2 = external thread_local(localdynamic) global i32
define i32 @test_localdynamic_deduplicate() {
-; CHECK: test_localdynamic_deduplicate:
+; CHECK-LABEL: test_localdynamic_deduplicate:
%val = load i32* @local_dynamic_var
%val2 = load i32* @local_dynamic_var2
@@ -110,8 +110,8 @@ define i32 @test_localdynamic_deduplicate() {
ret i32 %sum
; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
-; CHECK: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
-; CHECK: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
+; CHECK-DAG: add x0, x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_
+; CHECK-DAG: ldr [[CALLEE:x[0-9]+]], [x[[TLSDESC_HI]], #:tlsdesc_lo12:_TLS_MODULE_BASE_]
; CHECK: .tlsdesccall _TLS_MODULE_BASE_
; CHECK-NEXT: blr [[CALLEE]]
diff --git a/test/CodeGen/AArch64/tls-execs.ll b/test/CodeGen/AArch64/tls-execs.ll
index a66588422793..39ceb9a4795c 100644
--- a/test/CodeGen/AArch64/tls-execs.ll
+++ b/test/CodeGen/AArch64/tls-execs.ll
@@ -1,10 +1,10 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -show-mc-encoding < %s | FileCheck %s
; RUN: llc -mtriple=aarch64-none-linux-gnu -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
@initial_exec_var = external thread_local(initialexec) global i32
define i32 @test_initial_exec() {
-; CHECK: test_initial_exec:
+; CHECK-LABEL: test_initial_exec:
%val = load i32* @initial_exec_var
; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
@@ -19,7 +19,7 @@ define i32 @test_initial_exec() {
}
define i32* @test_initial_exec_addr() {
-; CHECK: test_initial_exec_addr:
+; CHECK-LABEL: test_initial_exec_addr:
ret i32* @initial_exec_var
; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
@@ -35,10 +35,10 @@ define i32* @test_initial_exec_addr() {
@local_exec_var = thread_local(initialexec) global i32 0
define i32 @test_local_exec() {
-; CHECK: test_local_exec:
+; CHECK-LABEL: test_local_exec:
%val = load i32* @local_exec_var
-; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var
+; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var // encoding: [A,A,0xa0'A',0x92'A']
; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
; CHECK: mrs x[[TP:[0-9]+]], tpidr_el0
; CHECK: ldr w0, [x[[TP]], [[TP_OFFSET]]]
@@ -50,7 +50,7 @@ define i32 @test_local_exec() {
}
define i32* @test_local_exec_addr() {
-; CHECK: test_local_exec_addr:
+; CHECK-LABEL: test_local_exec_addr:
ret i32* @local_exec_var
; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var
diff --git a/test/CodeGen/AArch64/tst-br.ll b/test/CodeGen/AArch64/tst-br.ll
index 65c1fda49e2d..154bc08c144c 100644
--- a/test/CodeGen/AArch64/tst-br.ll
+++ b/test/CodeGen/AArch64/tst-br.ll
@@ -7,7 +7,7 @@
@var64 = global i64 0
define i32 @test_tbz() {
-; CHECK: test_tbz:
+; CHECK-LABEL: test_tbz:
%val = load i32* @var32
%val64 = load i64* @var64
diff --git a/test/CodeGen/AArch64/variadic.ll b/test/CodeGen/AArch64/variadic.ll
index c5d319eb112b..f3d376beeb28 100644
--- a/test/CodeGen/AArch64/variadic.ll
+++ b/test/CodeGen/AArch64/variadic.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 < %s | FileCheck --check-prefix=CHECK-NOFP %s
%va_list = type {i8*, i8*, i8*, i32, i32}
@@ -7,21 +8,30 @@
declare void @llvm.va_start(i8*)
define void @test_simple(i32 %n, ...) {
-; CHECK: test_simple:
+; CHECK-LABEL: test_simple:
; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
; CHECK: mov x[[FPRBASE:[0-9]+]], sp
; CHECK: str q7, [x[[FPRBASE]], #112]
; CHECK: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
; CHECK: str x7, [x[[GPRBASE]], #48]
+; CHECK-NOFP: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK-NOFP: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK-NOFP: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
+; CHECK-NOFP: str x7, [x[[GPRBASE]], #48]
+; CHECK-NOFP-NOT: str q7,
+; CHECK-NOFP: str x1, [sp, #[[GPRFROMSP]]]
+
; Omit the middle ones
; CHECK: str q0, [sp]
; CHECK: str x1, [sp, #[[GPRFROMSP]]]
+; CHECK-NOFP-NOT: str q0, [sp]
+
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
; CHECK: movn [[VR_OFFS:w[0-9]+]], #127
; CHECK: str [[VR_OFFS]], [x[[VA_LIST]], #28]
; CHECK: movn [[GR_OFFS:w[0-9]+]], #55
@@ -33,22 +43,38 @@ define void @test_simple(i32 %n, ...) {
; CHECK: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK-NOFP: str wzr, [x[[VA_LIST]], #28]
+; CHECK-NOFP: movn [[GR_OFFS:w[0-9]+]], #55
+; CHECK-NOFP: str [[GR_OFFS]], [x[[VA_LIST]], #24]
+; CHECK-NOFP: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #56
+; CHECK-NOFP: str [[GR_TOP]], [x[[VA_LIST]], #8]
+; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
+; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+
ret void
}
define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
-; CHECK: test_fewargs:
+; CHECK-LABEL: test_fewargs:
; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
; CHECK: mov x[[FPRBASE:[0-9]+]], sp
; CHECK: str q7, [x[[FPRBASE]], #96]
; CHECK: add x[[GPRBASE:[0-9]+]], sp, #[[GPRFROMSP:[0-9]+]]
; CHECK: str x7, [x[[GPRBASE]], #32]
+; CHECK-NOFP: sub sp, sp, #[[STACKSIZE:[0-9]+]]
+; CHECK-NOFP-NOT: str q7,
+; CHECK-NOFP: mov x[[GPRBASE:[0-9]+]], sp
+; CHECK-NOFP: str x7, [x[[GPRBASE]], #24]
+
; Omit the middle ones
; CHECK: str q1, [sp]
; CHECK: str x3, [sp, #[[GPRFROMSP]]]
+; CHECK-NOFP-NOT: str q1, [sp]
+; CHECK-NOFP: str x4, [sp]
+
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
@@ -63,11 +89,20 @@ define void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
; CHECK: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK-NOFP: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK-NOFP: str wzr, [x[[VA_LIST]], #28]
+; CHECK-NOFP: movn [[GR_OFFS:w[0-9]+]], #31
+; CHECK-NOFP: str [[GR_OFFS]], [x[[VA_LIST]], #24]
+; CHECK-NOFP: add [[GR_TOP:x[0-9]+]], x[[GPRBASE]], #32
+; CHECK-NOFP: str [[GR_TOP]], [x[[VA_LIST]], #8]
+; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #[[STACKSIZE]]
+; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+
ret void
}
define void @test_nospare([8 x i64], [8 x float], ...) {
-; CHECK: test_nospare:
+; CHECK-LABEL: test_nospare:
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_start(i8* %addr)
@@ -75,18 +110,25 @@ define void @test_nospare([8 x i64], [8 x float], ...) {
; CHECK: mov [[STACK:x[0-9]+]], sp
; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK-NOFP-NOT: sub sp, sp
+; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #64
+; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
ret void
}
; If there are non-variadic arguments on the stack (here two i64s) then the
; __stack field should point just past them.
define void @test_offsetstack([10 x i64], [3 x float], ...) {
-; CHECK: test_offsetstack:
+; CHECK-LABEL: test_offsetstack:
; CHECK: sub sp, sp, #80
; CHECK: mov x[[FPRBASE:[0-9]+]], sp
; CHECK: str q7, [x[[FPRBASE]], #64]
; CHECK-NOT: str x{{[0-9]+}},
+
+; CHECK-NOFP-NOT: str q7,
+; CHECK-NOT: str x7,
+
; Omit the middle ones
; CHECK: str q3, [sp]
@@ -102,20 +144,27 @@ define void @test_offsetstack([10 x i64], [3 x float], ...) {
; CHECK: add [[STACK:x[0-9]+]], sp, #96
; CHECK: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK-NOFP: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK-NOFP: add [[STACK:x[0-9]+]], sp, #40
+; CHECK-NOFP: str [[STACK]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK-NOFP: str wzr, [x[[VA_LIST]], #28]
+; CHECK-NOFP: str wzr, [x[[VA_LIST]], #24]
ret void
}
declare void @llvm.va_end(i8*)
define void @test_va_end() nounwind {
-; CHECK: test_va_end:
+; CHECK-LABEL: test_va_end:
; CHECK-NEXT: BB#0
+; CHECK-NOFP: BB#0
%addr = bitcast %va_list* @var to i8*
call void @llvm.va_end(i8* %addr)
ret void
; CHECK-NEXT: ret
+; CHECK-NOFP-NEXT: ret
}
declare void @llvm.va_copy(i8* %dest, i8* %src)
@@ -123,7 +172,7 @@ declare void @llvm.va_copy(i8* %dest, i8* %src)
@second_list = global %va_list zeroinitializer
define void @test_va_copy() {
-; CHECK: test_va_copy:
+; CHECK-LABEL: test_va_copy:
%srcaddr = bitcast %va_list* @var to i8*
%dstaddr = bitcast %va_list* @second_list to i8*
call void @llvm.va_copy(i8* %dstaddr, i8* %srcaddr)
@@ -131,14 +180,25 @@ define void @test_va_copy() {
; Check beginning and end again:
; CHECK: ldr [[BLOCK:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK: add x[[SRC_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+; CHECK-NOFP: ldr [[BLOCK:x[0-9]+]], [{{x[0-9]+}}, #:lo12:var]
+; CHECK-NOFP: add x[[SRC_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
+
; CHECK: str [[BLOCK]], [{{x[0-9]+}}, #:lo12:second_list]
+; CHECK: ldr [[BLOCK:x[0-9]+]], [x[[SRC_LIST]], #24]
; CHECK: add x[[DEST_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:second_list
-; CHECK: add x[[SRC_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:var
-; CHECK: ldr [[BLOCK:x[0-9]+]], [x[[SRC_LIST]], #24]
; CHECK: str [[BLOCK]], [x[[DEST_LIST]], #24]
+; CHECK-NOFP: str [[BLOCK]], [{{x[0-9]+}}, #:lo12:second_list]
+
+; CHECK-NOFP: ldr [[BLOCK:x[0-9]+]], [x[[SRC_LIST]], #24]
+; CHECK-NOFP: add x[[DEST_LIST:[0-9]+]], {{x[0-9]+}}, #:lo12:second_list
+
+; CHECK-NOFP: str [[BLOCK]], [x[[DEST_LIST]], #24]
+
ret void
; CHECK: ret
+; CHECK-NOFP: ret
}
diff --git a/test/CodeGen/AArch64/zero-reg.ll b/test/CodeGen/AArch64/zero-reg.ll
index fef0437ae7f3..9b1e52770ce4 100644
--- a/test/CodeGen/AArch64/zero-reg.ll
+++ b/test/CodeGen/AArch64/zero-reg.ll
@@ -4,7 +4,7 @@
@var64 = global i64 0
define void @test_zr() {
-; CHECK: test_zr:
+; CHECK-LABEL: test_zr:
store i32 0, i32* @var32
; CHECK: str wzr, [{{x[0-9]+}}, #:lo12:var32]
@@ -16,7 +16,7 @@ define void @test_zr() {
}
define void @test_sp(i32 %val) {
-; CHECK: test_sp:
+; CHECK-LABEL: test_sp:
; Important correctness point here is that LLVM doesn't try to use xzr
; as an addressing register: "str w0, [xzr]" is not a valid A64
@@ -28,4 +28,4 @@ define void @test_sp(i32 %val) {
ret void
; CHECK: ret
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
index 0bfe33175196..e7c0129a7752 100644
--- a/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
+++ b/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
@@ -4,7 +4,7 @@
@dequant_coef = external global [6 x [4 x [4 x i32]]] ; <[6 x [4 x [4 x i32]]]*> [#uses=1]
@A = external global [4 x [4 x i32]] ; <[4 x [4 x i32]]*> [#uses=1]
-; CHECK: dct_luma_sp:
+; CHECK-LABEL: dct_luma_sp:
define fastcc i32 @dct_luma_sp(i32 %block_x, i32 %block_y, i32* %coeff_cost) {
entry:
; Make sure to use base-updating stores for saving callee-saved registers.
diff --git a/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll b/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
index e1e60e6317a6..ee99c70ff0e6 100644
--- a/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
+++ b/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
; pr4843
define <4 x i16> @v2regbug(<4 x i16>* %B) nounwind {
-;CHECK: v2regbug:
+;CHECK-LABEL: v2regbug:
;CHECK: vzip.16
%tmp1 = load <4 x i16>* %B
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32><i32 0, i32 0, i32 1, i32 1>
diff --git a/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll b/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
index 0fe3b39a622d..e2ff164502ce 100644
--- a/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
+++ b/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
@@ -4,7 +4,7 @@
%0 = type { double, double }
define void @foo(%0* noalias nocapture sret %agg.result, double %x.0, double %y.0) nounwind {
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: bl __aeabi_dadd
; CHECK-NOT: strd
; CHECK: mov
diff --git a/test/CodeGen/ARM/2009-10-16-Scope.ll b/test/CodeGen/ARM/2009-10-16-Scope.ll
index a2e7ff718b4a..570fcf96e641 100644
--- a/test/CodeGen/ARM/2009-10-16-Scope.ll
+++ b/test/CodeGen/ARM/2009-10-16-Scope.ll
@@ -23,10 +23,12 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
declare i32 @foo(i32) ssp
!0 = metadata !{i32 5, i32 2, metadata !1, null}
-!1 = metadata !{i32 458763, metadata !2, i32 1, i32 1}; [DW_TAG_lexical_block ]
-!2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"bar", metadata !"bar", metadata !"bar", metadata !3, i32 4, null, i1 false, i1 true}; [DW_TAG_subprogram ]
-!3 = metadata !{i32 458769, i32 0, i32 12, metadata !"genmodes.i", metadata !"/Users/yash/Downloads", metadata !"clang 1.1", i1 true, i1 false, metadata !"", i32 0}; [DW_TAG_compile_unit ]
+!1 = metadata !{i32 458763, null, metadata !2, i32 1, i32 1, i32 0}; [DW_TAG_lexical_block ]
+!2 = metadata !{i32 458798, i32 0, metadata !3, metadata !"bar", metadata !"bar", metadata !"bar", i32 4, null, i1 false, i1 true, i32 0, i32 0, null, i32 0, i32 0, null, null, null, null, i32 0}; [DW_TAG_subprogram ]
+!3 = metadata !{i32 458769, metadata !8, i32 12, metadata !"clang 1.1", i1 true, metadata !"", i32 0, null, metadata !9, null, null, null, metadata !""}; [DW_TAG_compile_unit ]
!4 = metadata !{i32 459008, metadata !5, metadata !"count_", metadata !3, i32 5, metadata !6}; [ DW_TAG_auto_variable ]
-!5 = metadata !{i32 458763, metadata !1, i32 1, i32 1}; [DW_TAG_lexical_block ]
-!6 = metadata !{i32 458788, metadata !3, metadata !"int", metadata !3, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}; [DW_TAG_base_type ]
+!5 = metadata !{i32 458763, null, metadata !1, i32 1, i32 1, i32 0}; [DW_TAG_lexical_block ]
+!6 = metadata !{i32 458788, null, metadata !3, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}; [DW_TAG_base_type ]
!7 = metadata !{i32 6, i32 1, metadata !2, null}
+!8 = metadata !{metadata !"genmodes.i", metadata !"/Users/yash/Downloads"}
+!9 = metadata !{i32 0}
diff --git a/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll b/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
index a8afc20bc130..4fb2be02ce9a 100644
--- a/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
+++ b/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
@@ -12,7 +12,7 @@ entry:
%3 = fmul float %0, %1 ; <float> [#uses=1]
%4 = fadd float 0.000000e+00, %3 ; <float> [#uses=1]
%5 = fsub float 1.000000e+00, %4 ; <float> [#uses=1]
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: vmov.f32 s{{[0-9]+}}, #1.000000e+00
%6 = fsub float 1.000000e+00, undef ; <float> [#uses=2]
%7 = fsub float %2, undef ; <float> [#uses=1]
diff --git a/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll b/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
index 05581c3f16cf..35739d76eae0 100644
--- a/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
+++ b/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
@@ -12,15 +12,21 @@ entry:
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-!0 = metadata !{i32 524545, metadata !1, metadata !"b", metadata !2, i32 93, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"__addvsi3", metadata !"__addvsi3", metadata !"__addvsi3", metadata !2, i32 94, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524329, metadata !"libgcc2.c", metadata !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc", metadata !3} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 524305, i32 0, i32 1, metadata !"libgcc2.c", metadata !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 00)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 524309, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!llvm.dbg.cu = !{!3}
+!llvm.module.flags = !{!15}
+!0 = metadata !{i32 524545, metadata !1, metadata !"b", metadata !2, i32 93, metadata !6, i32 0, null} ; [ DW_TAG_arg_variable ]
+!1 = metadata !{i32 524334, metadata !12, null, metadata !"__addvsi3", metadata !"__addvsi3", metadata !"__addvsi3", i32 94, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i32 0, i32 0, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 524329, metadata !12} ; [ DW_TAG_file_type ]
+!12 = metadata !{metadata !"libgcc2.c", metadata !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc"}
+!3 = metadata !{i32 524305, metadata !12, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 00)", i1 true, metadata !"", i32 0, metadata !13, metadata !13, metadata !14, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!4 = metadata !{i32 524309, metadata !12, metadata !2, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!5 = metadata !{metadata !6, metadata !6, metadata !6}
-!6 = metadata !{i32 524310, metadata !2, metadata !"SItype", metadata !7, i32 152, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_typedef ]
+!6 = metadata !{i32 524310, metadata !12, null, metadata !"SItype", i32 152, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_typedef ]
!7 = metadata !{i32 524329, metadata !"libgcc2.h", metadata !"/Users/bwilson/local/nightly/test-2010-04-14/build/llvmgcc.roots/llvmgcc~obj/src/gcc", metadata !3} ; [ DW_TAG_file_type ]
-!8 = metadata !{i32 524324, metadata !2, metadata !"int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!8 = metadata !{i32 524324, metadata !12, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!9 = metadata !{i32 95, i32 0, metadata !10, null}
-!10 = metadata !{i32 524299, metadata !1, i32 94, i32 0} ; [ DW_TAG_lexical_block ]
+!10 = metadata !{i32 524299, metadata !12, metadata !1, i32 94, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
!11 = metadata !{i32 100, i32 0, metadata !10, null}
+!13 = metadata !{i32 0}
+!14 = metadata !{metadata !1}
+!15 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll b/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
index 0ae7f84f3ef3..35995b77c5bc 100644
--- a/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
+++ b/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
@@ -6,10 +6,10 @@
define zeroext i8 @t(%struct.foo* %this) noreturn optsize {
entry:
-; ARM: t:
+; ARM-LABEL: t:
; ARM: str r2, [r1], r0
-; THUMB: t:
+; THUMB-LABEL: t:
; THUMB-NOT: str r0, [r1], r0
; THUMB: str r1, [r0]
%0 = getelementptr inbounds %struct.foo* %this, i32 0, i32 1 ; <i64*> [#uses=1]
diff --git a/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll b/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
index cdb11c71fc0e..a53200e72c3f 100644
--- a/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
+++ b/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
@@ -48,19 +48,19 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.gv = !{!14}
!0 = metadata !{i32 524545, metadata !1, metadata !"buf", metadata !2, i32 4, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"x0", metadata !"x0", metadata !"x0", metadata !2, i32 5, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, null} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524329, metadata !"t.c", metadata !"/private/tmp", metadata !3} ; [ DW_TAG_file_type ]
+!1 = metadata !{i32 524334, metadata !26, null, metadata !"x0", metadata !"x0", metadata !"x0", i32 5, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!2 = metadata !{i32 524329, metadata !26} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 524305, i32 0, i32 12, metadata !"t.c", metadata !".", metadata !"clang 2.0", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 524309, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!4 = metadata !{i32 524309, metadata !26, metadata !2, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
!5 = metadata !{null}
-!6 = metadata !{i32 524303, metadata !2, metadata !"", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{i32 524324, metadata !2, metadata !"unsigned char", metadata !2, i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 524303, metadata !26, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ]
+!7 = metadata !{i32 524324, metadata !26, metadata !2, metadata !"unsigned char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
!8 = metadata !{i32 524545, metadata !1, metadata !"nbytes", metadata !2, i32 4, metadata !9} ; [ DW_TAG_arg_variable ]
-!9 = metadata !{i32 524324, metadata !2, metadata !"unsigned long", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
+!9 = metadata !{i32 524324, metadata !26, metadata !2, metadata !"unsigned long", i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
!10 = metadata !{i32 524544, metadata !11, metadata !"nread", metadata !2, i32 6, metadata !9} ; [ DW_TAG_auto_variable ]
-!11 = metadata !{i32 524299, metadata !1, i32 5, i32 1} ; [ DW_TAG_lexical_block ]
+!11 = metadata !{i32 524299, metadata !26, metadata !1, i32 5, i32 1, i32 0} ; [ DW_TAG_lexical_block ]
!12 = metadata !{i32 524544, metadata !11, metadata !"c", metadata !2, i32 7, metadata !13} ; [ DW_TAG_auto_variable ]
-!13 = metadata !{i32 524324, metadata !2, metadata !"int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!13 = metadata !{i32 524324, metadata !26, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!14 = metadata !{i32 524340, i32 0, metadata !2, metadata !"length", metadata !"length", metadata !"length", metadata !2, i32 1, metadata !13, i1 false, i1 true, i32* @length} ; [ DW_TAG_variable ]
!15 = metadata !{i32 4, i32 24, metadata !1, null}
!16 = metadata !{i32 4, i32 43, metadata !1, null}
@@ -69,7 +69,8 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!19 = metadata !{i32 10, i32 2, metadata !11, null}
!20 = metadata !{i32 11, i32 2, metadata !11, null}
!21 = metadata !{i32 12, i32 3, metadata !22, null}
-!22 = metadata !{i32 524299, metadata !11, i32 11, i32 45} ; [ DW_TAG_lexical_block ]
+!22 = metadata !{i32 524299, metadata !26, metadata !11, i32 11, i32 45, i32 0} ; [ DW_TAG_lexical_block ]
!23 = metadata !{i32 13, i32 3, metadata !22, null}
!24 = metadata !{i32 14, i32 2, metadata !22, null}
!25 = metadata !{i32 15, i32 1, metadata !11, null}
+!26 = metadata !{metadata !"t.c", metadata !"/private/tmp"}
diff --git a/test/CodeGen/ARM/2010-08-04-StackVariable.ll b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
index 112512ff59a5..7aacd1aa70ca 100644
--- a/test/CodeGen/ARM/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
@@ -75,48 +75,49 @@ return: ; preds = %entry
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!3}
+!llvm.module.flags = !{!49}
-!0 = metadata !{i32 786478, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"", metadata !2, i32 11, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null} ; [ DW_TAG_subprogram ]
-!1 = metadata !{i32 786451, metadata !2, metadata !"SVal", metadata !2, i32 1, i64 128, i64 64, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_structure_type ]
+!0 = metadata !{i32 786478, metadata !48, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"", i32 11, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!1 = metadata !{i32 786451, metadata !48, null, metadata !"SVal", i32 1, i64 128, i64 64, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [SVal] [line 1, size 128, align 64, offset 0] [def] [from ]
!2 = metadata !{i32 786473, metadata !48} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786449, i32 4, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, metadata !47, metadata !47, metadata !46, metadata !47, metadata !47, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786449, metadata !48, i32 4, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, metadata !47, metadata !47, metadata !46, metadata !47, metadata !47, metadata !""} ; [ DW_TAG_compile_unit ]
!4 = metadata !{metadata !5, metadata !7, metadata !0, metadata !9}
-!5 = metadata !{i32 786445, metadata !1, metadata !"Data", metadata !2, i32 7, i64 64, i64 64, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
-!6 = metadata !{i32 786447, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{i32 786445, metadata !1, metadata !"Kind", metadata !2, i32 8, i64 32, i64 32, i64 64, i32 0, metadata !8} ; [ DW_TAG_member ]
-!8 = metadata !{i32 786468, metadata !2, metadata !"unsigned int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
-!9 = metadata !{i32 786478, metadata !1, metadata !"~SVal", metadata !"~SVal", metadata !"", metadata !2, i32 12, metadata !10, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null} ; [ DW_TAG_subprogram ]
-!10 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !11, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!5 = metadata !{i32 786445, metadata !48, metadata !1, metadata !"Data", i32 7, i64 64, i64 64, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
+!6 = metadata !{i32 786447, metadata !48, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
+!7 = metadata !{i32 786445, metadata !48, metadata !1, metadata !"Kind", i32 8, i64 32, i64 32, i64 64, i32 0, metadata !8} ; [ DW_TAG_member ]
+!8 = metadata !{i32 786468, metadata !48, null, metadata !"unsigned int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
+!9 = metadata !{i32 786478, metadata !48, metadata !1, metadata !"~SVal", metadata !"~SVal", metadata !"", i32 12, metadata !10, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!10 = metadata !{i32 786453, metadata !48, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !11, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!11 = metadata !{null, metadata !12, metadata !13}
-!12 = metadata !{i32 786447, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !1} ; [ DW_TAG_pointer_type ]
-!13 = metadata !{i32 786468, metadata !2, metadata !"int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!14 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!12 = metadata !{i32 786447, metadata !48, null, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 64, metadata !1} ; [ DW_TAG_pointer_type ]
+!13 = metadata !{i32 786468, metadata !48, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!14 = metadata !{i32 786453, metadata !48, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !15, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!15 = metadata !{null, metadata !12}
-!16 = metadata !{i32 786478, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"_ZN4SValC1Ev", metadata !2, i32 11, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, void (%struct.SVal*)* @_ZN4SValC1Ev} ; [ DW_TAG_subprogram ]
-!17 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"_Z3fooi4SVal", metadata !2, i32 16, metadata !18, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, i32 (i32, %struct.SVal*)* @_Z3fooi4SVal} ; [ DW_TAG_subprogram ]
-!18 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !19, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!16 = metadata !{i32 786478, metadata !48, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"_ZN4SValC1Ev", i32 11, metadata !14, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, void (%struct.SVal*)* @_ZN4SValC1Ev, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!17 = metadata !{i32 786478, metadata !48, metadata !2, metadata !"foo", metadata !"foo", metadata !"_Z3fooi4SVal", i32 16, metadata !18, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, i32 (i32, %struct.SVal*)* @_Z3fooi4SVal, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!18 = metadata !{i32 786453, metadata !48, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !19, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!19 = metadata !{metadata !13, metadata !13, metadata !1}
-!20 = metadata !{i32 786478, metadata !2, metadata !"main", metadata !"main", metadata !"main", metadata !2, i32 23, metadata !21, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, i32 ()* @main} ; [ DW_TAG_subprogram ]
-!21 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !22, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!20 = metadata !{i32 786478, metadata !48, metadata !2, metadata !"main", metadata !"main", metadata !"main", i32 23, metadata !21, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 false, i32 ()* @main, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
+!21 = metadata !{i32 786453, metadata !48, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !22, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!22 = metadata !{metadata !13}
!23 = metadata !{i32 786689, metadata !17, metadata !"i", metadata !2, i32 16, metadata !13, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
!24 = metadata !{i32 16, i32 0, metadata !17, null}
!25 = metadata !{i32 786689, metadata !17, metadata !"location", metadata !2, i32 16, metadata !26, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
-!26 = metadata !{i32 786448, metadata !2, metadata !"SVal", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !1} ; [ DW_TAG_reference_type ]
+!26 = metadata !{i32 786448, metadata !48, metadata !2, metadata !"SVal", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !1} ; [ DW_TAG_reference_type ]
!27 = metadata !{i32 17, i32 0, metadata !28, null}
!28 = metadata !{i32 786443, metadata !2, metadata !17, i32 16, i32 0, i32 2} ; [ DW_TAG_lexical_block ]
!29 = metadata !{i32 18, i32 0, metadata !28, null}
!30 = metadata !{i32 20, i32 0, metadata !28, null}
!31 = metadata !{i32 786689, metadata !16, metadata !"this", metadata !2, i32 11, metadata !32, i32 0, i32 0} ; [ DW_TAG_arg_variable ]
-!32 = metadata !{i32 786470, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !33} ; [ DW_TAG_const_type ]
-!33 = metadata !{i32 786447, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !1} ; [ DW_TAG_pointer_type ]
+!32 = metadata !{i32 786470, metadata !48, metadata !2, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 64, metadata !33} ; [ DW_TAG_const_type ]
+!33 = metadata !{i32 786447, metadata !48, metadata !2, metadata !"", i32 0, i64 64, i64 64, i64 0, i32 0, metadata !1} ; [ DW_TAG_pointer_type ]
!34 = metadata !{i32 11, i32 0, metadata !16, null}
!35 = metadata !{i32 11, i32 0, metadata !36, null}
-!36 = metadata !{i32 786443, metadata !2, metadata !37, i32 11, i32 0, i32 1} ; [ DW_TAG_lexical_block ]
-!37 = metadata !{i32 786443, metadata !2, metadata !16, i32 11, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
+!36 = metadata !{i32 786443, metadata !48, metadata !37, i32 11, i32 0, i32 1} ; [ DW_TAG_lexical_block ]
+!37 = metadata !{i32 786443, metadata !48, metadata !16, i32 11, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
!38 = metadata !{i32 786688, metadata !39, metadata !"v", metadata !2, i32 24, metadata !1, i32 0, i32 0} ; [ DW_TAG_auto_variable ]
-!39 = metadata !{i32 786443, metadata !2, metadata !40, i32 23, i32 0, i32 4} ; [ DW_TAG_lexical_block ]
-!40 = metadata !{i32 786443, metadata !2, metadata !20, i32 23, i32 0, i32 3} ; [ DW_TAG_lexical_block ]
+!39 = metadata !{i32 786443, metadata !48, metadata !40, i32 23, i32 0, i32 4} ; [ DW_TAG_lexical_block ]
+!40 = metadata !{i32 786443, metadata !48, metadata !20, i32 23, i32 0, i32 3} ; [ DW_TAG_lexical_block ]
!41 = metadata !{i32 24, i32 0, metadata !39, null}
!42 = metadata !{i32 25, i32 0, metadata !39, null}
!43 = metadata !{i32 26, i32 0, metadata !39, null}
@@ -125,3 +126,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!46 = metadata !{metadata !0, metadata !9, metadata !16, metadata !17, metadata !20}
!47 = metadata !{i32 0}
!48 = metadata !{metadata !"small.cc", metadata !"/Users/manav/R8248330"}
+!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll b/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
index bda14bcb1520..305369435138 100644
--- a/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
+++ b/test/CodeGen/ARM/2010-09-29-mc-asm-header-test.ll
@@ -1,12 +1,284 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
-; This tests that MC/asm header conversion is smooth
-;
-; CHECK: .syntax unified
-; CHECK: .eabi_attribute 20, 1
-; CHECK: .eabi_attribute 21, 1
-; CHECK: .eabi_attribute 23, 3
-; CHECK: .eabi_attribute 24, 1
-; CHECK: .eabi_attribute 25, 1
+; This tests that MC/asm header conversion is smooth and that the
+; build attributes are correct
+
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi | FileCheck %s --check-prefix=V6
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi | FileCheck %s --check-prefix=V6M
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s | FileCheck %s --check-prefix=ARM1156T2F-S
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-neon,-crypto | FileCheck %s --check-prefix=V8-FPARMv8
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-fp-armv8,-crypto | FileCheck %s --check-prefix=V8-NEON
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mattr=-crypto | FileCheck %s --check-prefix=V8-FPARMv8-NEON
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8-FPARMv8-NEON-CRYPTO
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9-mp | FileCheck %s --check-prefix=CORTEX-A9-MP
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57
+
+; V6: .eabi_attribute 6, 6
+; V6: .eabi_attribute 8, 1
+; V6: .eabi_attribute 24, 1
+; V6: .eabi_attribute 25, 1
+; V6-NOT: .eabi_attribute 27
+; V6-NOT: .eabi_attribute 28
+; V6-NOT: .eabi_attribute 36
+; V6-NOT: .eabi_attribute 42
+; V6-NOT: .eabi_attribute 68
+
+; V6M: .eabi_attribute 6, 12
+; V6M: .eabi_attribute 7, 77
+; V6M: .eabi_attribute 8, 0
+; V6M: .eabi_attribute 9, 1
+; V6M: .eabi_attribute 24, 1
+; V6M: .eabi_attribute 25, 1
+; V6M-NOT: .eabi_attribute 27
+; V6M-NOT: .eabi_attribute 28
+; V6M-NOT: .eabi_attribute 36
+; V6M-NOT: .eabi_attribute 42
+; V6M-NOT: .eabi_attribute 68
+
+; ARM1156T2F-S: .cpu arm1156t2f-s
+; ARM1156T2F-S: .eabi_attribute 6, 8
+; ARM1156T2F-S: .eabi_attribute 8, 1
+; ARM1156T2F-S: .eabi_attribute 9, 2
+; ARM1156T2F-S: .fpu vfpv2
+; ARM1156T2F-S: .eabi_attribute 20, 1
+; ARM1156T2F-S: .eabi_attribute 21, 1
+; ARM1156T2F-S: .eabi_attribute 23, 3
+; ARM1156T2F-S: .eabi_attribute 24, 1
+; ARM1156T2F-S: .eabi_attribute 25, 1
+; ARM1156T2F-S-NOT: .eabi_attribute 27
+; ARM1156T2F-S-NOT: .eabi_attribute 28
+; ARM1156T2F-S-NOT: .eabi_attribute 36
+; ARM1156T2F-S-NOT: .eabi_attribute 42
+; ARM1156T2F-S-NOT: .eabi_attribute 68
+
+; V7M: .eabi_attribute 6, 10
+; V7M: .eabi_attribute 7, 77
+; V7M: .eabi_attribute 8, 0
+; V7M: .eabi_attribute 9, 2
+; V7M: .eabi_attribute 24, 1
+; V7M: .eabi_attribute 25, 1
+; V7M-NOT: .eabi_attribute 27
+; V7M-NOT: .eabi_attribute 28
+; V7M-NOT: .eabi_attribute 36
+; V7M-NOT: .eabi_attribute 42
+; V7M: .eabi_attribute 44, 0
+; V7M-NOT: .eabi_attribute 68
+
+; V7: .syntax unified
+; V7: .eabi_attribute 6, 10
+; V7: .eabi_attribute 20, 1
+; V7: .eabi_attribute 21, 1
+; V7: .eabi_attribute 23, 3
+; V7: .eabi_attribute 24, 1
+; V7: .eabi_attribute 25, 1
+; V7-NOT: .eabi_attribute 27
+; V7-NOT: .eabi_attribute 28
+; V7-NOT: .eabi_attribute 36
+; V7-NOT: .eabi_attribute 42
+; V7-NOT: .eabi_attribute 68
+
+; V8: .syntax unified
+; V8: .eabi_attribute 6, 14
+
+; Vt8: .syntax unified
+; Vt8: .eabi_attribute 6, 14
+
+; V8-FPARMv8: .syntax unified
+; V8-FPARMv8: .eabi_attribute 6, 14
+; V8-FPARMv8: .fpu fp-armv8
+
+; V8-NEON: .syntax unified
+; V8-NEON: .eabi_attribute 6, 14
+; V8-NEON: .fpu neon
+; V8-NEON: .eabi_attribute 12, 3
+
+; V8-FPARMv8-NEON: .syntax unified
+; V8-FPARMv8-NEON: .eabi_attribute 6, 14
+; V8-FPARMv8-NEON: .fpu neon-fp-armv8
+; V8-FPARMv8-NEON: .eabi_attribute 12, 3
+
+; V8-FPARMv8-NEON-CRYPTO: .syntax unified
+; V8-FPARMv8-NEON-CRYPTO: .eabi_attribute 6, 14
+; V8-FPARMv8-NEON-CRYPTO: .fpu crypto-neon-fp-armv8
+; V8-FPARMv8-NEON-CRYPTO: .eabi_attribute 12, 3
+
+; CORTEX-A9-SOFT: .cpu cortex-a9
+; CORTEX-A9-SOFT: .eabi_attribute 6, 10
+; CORTEX-A9-SOFT: .eabi_attribute 7, 65
+; CORTEX-A9-SOFT: .eabi_attribute 8, 1
+; CORTEX-A9-SOFT: .eabi_attribute 9, 2
+; CORTEX-A9-SOFT: .fpu neon
+; CORTEX-A9-SOFT: .eabi_attribute 20, 1
+; CORTEX-A9-SOFT: .eabi_attribute 21, 1
+; CORTEX-A9-SOFT: .eabi_attribute 23, 3
+; CORTEX-A9-SOFT: .eabi_attribute 24, 1
+; CORTEX-A9-SOFT: .eabi_attribute 25, 1
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 27
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 28
+; CORTEX-A9-SOFT: .eabi_attribute 36, 1
+; CORTEX-A9-SOFT-NOT: .eabi_attribute 42
+; CORTEX-A9-SOFT: .eabi_attribute 68, 1
+
+; CORTEX-A9-HARD: .cpu cortex-a9
+; CORTEX-A9-HARD: .eabi_attribute 6, 10
+; CORTEX-A9-HARD: .eabi_attribute 7, 65
+; CORTEX-A9-HARD: .eabi_attribute 8, 1
+; CORTEX-A9-HARD: .eabi_attribute 9, 2
+; CORTEX-A9-HARD: .fpu neon
+; CORTEX-A9-HARD: .eabi_attribute 20, 1
+; CORTEX-A9-HARD: .eabi_attribute 21, 1
+; CORTEX-A9-HARD: .eabi_attribute 23, 3
+; CORTEX-A9-HARD: .eabi_attribute 24, 1
+; CORTEX-A9-HARD: .eabi_attribute 25, 1
+; CORTEX-A9-HARD-NOT: .eabi_attribute 27
+; CORTEX-A9-HARD: .eabi_attribute 28, 1
+; CORTEX-A9-HARD: .eabi_attribute 36, 1
+; CORTEX-A9-HARD-NOT: .eabi_attribute 42
+; CORTEX-A9-HARD: .eabi_attribute 68, 1
+
+; CORTEX-A9-MP: .cpu cortex-a9-mp
+; CORTEX-A9-MP: .eabi_attribute 6, 10
+; CORTEX-A9-MP: .eabi_attribute 7, 65
+; CORTEX-A9-MP: .eabi_attribute 8, 1
+; CORTEX-A9-MP: .eabi_attribute 9, 2
+; CORTEX-A9-MP: .fpu neon
+; CORTEX-A9-MP: .eabi_attribute 20, 1
+; CORTEX-A9-MP: .eabi_attribute 21, 1
+; CORTEX-A9-MP: .eabi_attribute 23, 3
+; CORTEX-A9-MP: .eabi_attribute 24, 1
+; CORTEX-A9-MP: .eabi_attribute 25, 1
+; CORTEX-A9-NOT: .eabi_attribute 27
+; CORTEX-A9-NOT: .eabi_attribute 28
+; CORTEX-A9-MP: .eabi_attribute 36, 1
+; CORTEX-A9-MP: .eabi_attribute 42, 1
+; CORTEX-A9-MP: .eabi_attribute 68, 1
+
+; CORTEX-A15: .cpu cortex-a15
+; CORTEX-A15: .eabi_attribute 6, 10
+; CORTEX-A15: .eabi_attribute 7, 65
+; CORTEX-A15: .eabi_attribute 8, 1
+; CORTEX-A15: .eabi_attribute 9, 2
+; CORTEX-A15: .fpu neon-vfpv4
+; CORTEX-A15: .eabi_attribute 20, 1
+; CORTEX-A15: .eabi_attribute 21, 1
+; CORTEX-A15: .eabi_attribute 23, 3
+; CORTEX-A15: .eabi_attribute 24, 1
+; CORTEX-A15: .eabi_attribute 25, 1
+; CORTEX-A15-NOT: .eabi_attribute 27
+; CORTEX-A15-NOT: .eabi_attribute 28
+; CORTEX-A15: .eabi_attribute 36, 1
+; CORTEX-A15: .eabi_attribute 42, 1
+; CORTEX-A15: .eabi_attribute 44, 2
+; CORTEX-A15: .eabi_attribute 68, 3
+
+; CORTEX-M0: .cpu cortex-m0
+; CORTEX-M0: .eabi_attribute 6, 12
+; CORTEX-M0: .eabi_attribute 7, 77
+; CORTEX-M0: .eabi_attribute 8, 0
+; CORTEX-M0: .eabi_attribute 9, 1
+; CORTEX-M0: .eabi_attribute 24, 1
+; CORTEX-M0: .eabi_attribute 25, 1
+; CORTEX-M0-NOT: .eabi_attribute 27
+; CORTEX-M0-NOT: .eabi_attribute 28
+; CORTEX-M0-NOT: .eabi_attribute 36
+; CORTEX-M0-NOT: .eabi_attribute 42
+; CORTEX-M0-NOT: .eabi_attribute 68
+
+; CORTEX-M4-SOFT: .cpu cortex-m4
+; CORTEX-M4-SOFT: .eabi_attribute 6, 13
+; CORTEX-M4-SOFT: .eabi_attribute 7, 77
+; CORTEX-M4-SOFT: .eabi_attribute 8, 0
+; CORTEX-M4-SOFT: .eabi_attribute 9, 2
+; CORTEX-M4-SOFT: .fpu vfpv4-d16
+; CORTEX-M4-SOFT: .eabi_attribute 20, 1
+; CORTEX-M4-SOFT: .eabi_attribute 21, 1
+; CORTEX-M4-SOFT: .eabi_attribute 23, 3
+; CORTEX-M4-SOFT: .eabi_attribute 24, 1
+; CORTEX-M4-SOFT: .eabi_attribute 25, 1
+; CORTEX-M4-SOFT: .eabi_attribute 27, 1
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 28
+; CORTEX-M4-SOFT: .eabi_attribute 36, 1
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 42
+; CORTEX-M4-SOFT: .eabi_attribute 44, 0
+; CORTEX-M4-SOFT-NOT: .eabi_attribute 68
+
+; CORTEX-M4-HARD: .cpu cortex-m4
+; CORTEX-M4-HARD: .eabi_attribute 6, 13
+; CORTEX-M4-HARD: .eabi_attribute 7, 77
+; CORTEX-M4-HARD: .eabi_attribute 8, 0
+; CORTEX-M4-HARD: .eabi_attribute 9, 2
+; CORTEX-M4-HARD: .fpu vfpv4-d16
+; CORTEX-M4-HARD: .eabi_attribute 20, 1
+; CORTEX-M4-HARD: .eabi_attribute 21, 1
+; CORTEX-M4-HARD: .eabi_attribute 23, 3
+; CORTEX-M4-HARD: .eabi_attribute 24, 1
+; CORTEX-M4-HARD: .eabi_attribute 25, 1
+; CORTEX-M4-HARD: .eabi_attribute 27, 1
+; CORTEX-M4-HARD: .eabi_attribute 28, 1
+; CORTEX-M4-HARD: .eabi_attribute 36, 1
+; CORTEX-M4-HARD-NOT: .eabi_attribute 42
+; CORTEX-M4-HARD: .eabi_attribute 44, 0
+; CORTEX-M4-HRAD-NOT: .eabi_attribute 68
+
+; CORTEX-R5: .cpu cortex-r5
+; CORTEX-R5: .eabi_attribute 6, 10
+; CORTEX-R5: .eabi_attribute 7, 82
+; CORTEX-R5: .eabi_attribute 8, 1
+; CORTEX-R5: .eabi_attribute 9, 2
+; CORTEX-R5: .fpu vfpv3-d16
+; CORTEX-R5: .eabi_attribute 20, 1
+; CORTEX-R5: .eabi_attribute 21, 1
+; CORTEX-R5: .eabi_attribute 23, 3
+; CORTEX-R5: .eabi_attribute 24, 1
+; CORTEX-R5: .eabi_attribute 25, 1
+; CORTEX-R5: .eabi_attribute 27, 1
+; CORTEX-R5-NOT: .eabi_attribute 28
+; CORTEX-R5-NOT: .eabi_attribute 36
+; CORTEX-R5-NOT: .eabi_attribute 42
+; CORTEX-R5: .eabi_attribute 44, 2
+; CORTEX-R5-NOT: .eabi_attribute 68
+
+; CORTEX-A53: .cpu cortex-a53
+; CORTEX-A53: .eabi_attribute 6, 14
+; CORTEX-A53: .eabi_attribute 7, 65
+; CORTEX-A53: .eabi_attribute 8, 1
+; CORTEX-A53: .eabi_attribute 9, 2
+; CORTEX-A53: .fpu crypto-neon-fp-armv8
+; CORTEX-A53: .eabi_attribute 12, 3
+; CORTEX-A53: .eabi_attribute 24, 1
+; CORTEX-A53: .eabi_attribute 25, 1
+; CORTEX-A53-NOT: .eabi_attribute 27
+; CORTEX-A53-NOT: .eabi_attribute 28
+; CORTEX-A53: .eabi_attribute 36, 1
+; CORTEX-A53: .eabi_attribute 42, 1
+; CORTEX-A53: .eabi_attribute 44, 2
+; CORTEX-A53: .eabi_attribute 68, 3
+
+; CORTEX-A57: .cpu cortex-a57
+; CORTEX-A57: .eabi_attribute 6, 14
+; CORTEX-A57: .eabi_attribute 7, 65
+; CORTEX-A57: .eabi_attribute 8, 1
+; CORTEX-A57: .eabi_attribute 9, 2
+; CORTEX-A57: .fpu crypto-neon-fp-armv8
+; CORTEX-A57: .eabi_attribute 12, 3
+; CORTEX-A57: .eabi_attribute 24, 1
+; CORTEX-A57: .eabi_attribute 25, 1
+; CORTEX-A57-NOT: .eabi_attribute 27
+; CORTEX-A57-NOT: .eabi_attribute 28
+; CORTEX-A57: .eabi_attribute 36, 1
+; CORTEX-A57: .eabi_attribute 42, 1
+; CORTEX-A57: .eabi_attribute 44, 2
+; CORTEX-A57: .eabi_attribute 68, 3
define i32 @f(i64 %z) {
ret i32 0
diff --git a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
deleted file mode 100644
index b253fefe87c4..000000000000
--- a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc %s -mtriple=arm-linux-gnueabi -filetype=obj -o - | \
-; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=BASIC %s
-; RUN: llc %s -mtriple=armv7-linux-gnueabi -march=arm -mcpu=cortex-a8 \
-; RUN: -mattr=-neon,-vfp3,+vfp2 \
-; RUN: -arm-reserve-r9 -filetype=obj -o - | \
-; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=CORTEXA8 %s
-
-
-; This tests that the extpected ARM attributes are emitted.
-;
-; BASIC: Section {
-; BASIC: Name: .ARM.attributes
-; BASIC-NEXT: Type: SHT_ARM_ATTRIBUTES
-; BASIC-NEXT: Flags [ (0x0)
-; BASIC-NEXT: ]
-; BASIC-NEXT: Address: 0x0
-; BASIC-NEXT: Offset: 0x3C
-; BASIC-NEXT: Size: 34
-; BASIC-NEXT: Link: 0
-; BASIC-NEXT: Info: 0
-; BASIC-NEXT: AddressAlignment: 1
-; BASIC-NEXT: EntrySize: 0
-; BASIC-NEXT: SectionData (
-; BASIC-NEXT: 0000: 41210000 00616561 62690001 17000000
-; BASIC-NEXT: 0010: 060A0741 08010902 14011501 17031801
-; BASIC-NEXT: 0020: 1901
-; BASIC-NEXT: )
-
-; CORTEXA8: Name: .ARM.attributes
-; CORTEXA8-NEXT: Type: SHT_ARM_ATTRIBUTES
-; CORTEXA8-NEXT: Flags [ (0x0)
-; CORTEXA8-NEXT: ]
-; CORTEXA8-NEXT: Address: 0x0
-; CORTEXA8-NEXT: Offset: 0x3C
-; CORTEXA8-NEXT: Size: 47
-; CORTEXA8-NEXT: Link: 0
-; CORTEXA8-NEXT: Info: 0
-; CORTEXA8-NEXT: AddressAlignment: 1
-; CORTEXA8-NEXT: EntrySize: 0
-; CORTEXA8-NEXT: SectionData (
-; CORTEXA8-NEXT: 0000: 412E0000 00616561 62690001 24000000
-; CORTEXA8-NEXT: 0010: 05434F52 5445582D 41380006 0A074108
-; CORTEXA8-NEXT: 0020: 0109020A 02140115 01170318 011901
-; CORTEXA8-NEXT: )
-
-define i32 @f(i64 %z) {
- ret i32 0
-}
diff --git a/test/CodeGen/ARM/2010-11-29-PrologueBug.ll b/test/CodeGen/ARM/2010-11-29-PrologueBug.ll
index da4d15771f48..4179d8c99d6a 100644
--- a/test/CodeGen/ARM/2010-11-29-PrologueBug.ll
+++ b/test/CodeGen/ARM/2010-11-29-PrologueBug.ll
@@ -4,7 +4,7 @@
define i32* @t(i32* %x) nounwind {
entry:
-; ARM: t:
+; ARM-LABEL: t:
; ARM: push
; ARM: mov r7, sp
; ARM: bl _foo
@@ -12,7 +12,7 @@ entry:
; ARM: bl _foo
; ARM: pop {r7, pc}
-; THUMB2: t:
+; THUMB2-LABEL: t:
; THUMB2: push
; THUMB2: mov r7, sp
; THUMB2: blx _foo
diff --git a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
deleted file mode 100644
index 9eecd045bfa0..000000000000
--- a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
-; RUN: llvm-readobj -s -sr -sd | FileCheck -check-prefix=OBJ %s
-
-target triple = "armv7-none-linux-gnueabi"
-
-@a = external global i8
-
-define arm_aapcs_vfpcc i32 @barf() nounwind {
-entry:
- %0 = tail call arm_aapcs_vfpcc i32 @foo(i8* @a) nounwind
- ret i32 %0
-; OBJ: Section {
-; OBJ: Name: .text
-; OBJ: Relocations [
-; OBJ-NEXT: 0x4 R_ARM_MOVW_ABS_NC a
-; OBJ-NEXT: 0x8 R_ARM_MOVT_ABS
-; OBJ-NEXT: 0xC R_ARM_CALL foo
-; OBJ-NEXT: ]
-; OBJ-NEXT: SectionData (
-; OBJ-NEXT: 0000: 00482DE9 000000E3 000040E3 FEFFFFEB
-; OBJ-NEXT: 0010: 0088BDE8
-; OBJ-NEXT: )
-
-}
-
-declare arm_aapcs_vfpcc i32 @foo(i8*)
-
diff --git a/test/CodeGen/ARM/2010-12-07-PEIBug.ll b/test/CodeGen/ARM/2010-12-07-PEIBug.ll
index 4879f4e10bac..eef6abd96451 100644
--- a/test/CodeGen/ARM/2010-12-07-PEIBug.ll
+++ b/test/CodeGen/ARM/2010-12-07-PEIBug.ll
@@ -3,7 +3,7 @@
define hidden void @foo() nounwind ssp {
entry:
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: mov r7, sp
; CHECK-NEXT: vpush {d8}
; CHECK-NEXT: vpush {d10, d11}
diff --git a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
index 98c0af35ef9a..f57411bb2c56 100644
--- a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
@@ -76,20 +76,21 @@ entry:
}
!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!49}
-!0 = metadata !{i32 786478, metadata !1, metadata !"get1", metadata !"get1", metadata !"get1", metadata !1, i32 4, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get1, null, null, metadata !42, i32 4} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"get1", metadata !"get1", metadata !"get1", i32 4, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get1, null, null, metadata !42, i32 4} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !47} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !47, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, metadata !"", i32 0, null, null, metadata !40, metadata !41, metadata !41, metadata !""} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 786453, metadata !1, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!2 = metadata !{i32 786449, metadata !47, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, metadata !"", i32 0, metadata !48, metadata !48, metadata !40, metadata !41, metadata !41, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786453, metadata !47, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5, metadata !5}
-!5 = metadata !{i32 786468, metadata !1, metadata !1, metadata !"_Bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 786478, metadata !1, metadata !"get2", metadata !"get2", metadata !"get2", metadata !1, i32 7, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get2, null, null, metadata !43, i32 7} ; [ DW_TAG_subprogram ]
-!7 = metadata !{i32 786478, metadata !1, metadata !"get3", metadata !"get3", metadata !"get3", metadata !1, i32 10, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get3, null, null, metadata !44, i32 10} ; [ DW_TAG_subprogram ]
-!8 = metadata !{i32 786478, metadata !1, metadata !"get4", metadata !"get4", metadata !"get4", metadata !1, i32 13, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get4, null, null, metadata !45, i32 13} ; [ DW_TAG_subprogram ]
-!9 = metadata !{i32 786478, metadata !1, metadata !"get5", metadata !"get5", metadata !"get5", metadata !1, i32 16, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get5, null, null, metadata !46, i32 16} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786468, metadata !47, metadata !1, metadata !"_Bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"get2", metadata !"get2", metadata !"get2", i32 7, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get2, null, null, metadata !43, i32 7} ; [ DW_TAG_subprogram ]
+!7 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"get3", metadata !"get3", metadata !"get3", i32 10, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get3, null, null, metadata !44, i32 10} ; [ DW_TAG_subprogram ]
+!8 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"get4", metadata !"get4", metadata !"get4", i32 13, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get4, null, null, metadata !45, i32 13} ; [ DW_TAG_subprogram ]
+!9 = metadata !{i32 786478, metadata !47, metadata !1, metadata !"get5", metadata !"get5", metadata !"get5", i32 16, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get5, null, null, metadata !46, i32 16} ; [ DW_TAG_subprogram ]
!10 = metadata !{i32 786689, metadata !0, metadata !"a", metadata !1, i32 4, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!11 = metadata !{i32 786688, metadata !12, metadata !"b", metadata !1, i32 4, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!12 = metadata !{i32 786443, metadata !0, i32 4, i32 0, metadata !1, i32 0} ; [ DW_TAG_lexical_block ]
+!12 = metadata !{i32 786443, metadata !47, metadata !0, i32 4, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
!13 = metadata !{i32 786484, i32 0, metadata !1, metadata !"x1", metadata !"x1", metadata !"", metadata !1, i32 3, metadata !5, i1 true, i1 true, i8* @x1, null} ; [ DW_TAG_variable ]
!14 = metadata !{i32 786484, i32 0, metadata !1, metadata !"x2", metadata !"x2", metadata !"", metadata !1, i32 6, metadata !5, i1 true, i1 true, i8* @x2, null} ; [ DW_TAG_variable ]
!15 = metadata !{i32 786484, i32 0, metadata !1, metadata !"x3", metadata !"x3", metadata !"", metadata !1, i32 9, metadata !5, i1 true, i1 true, i8* @x3, null} ; [ DW_TAG_variable ]
@@ -97,16 +98,16 @@ entry:
!17 = metadata !{i32 786484, i32 0, metadata !1, metadata !"x5", metadata !"x5", metadata !"", metadata !1, i32 15, metadata !5, i1 false, i1 true, i8* @x5, null} ; [ DW_TAG_variable ]
!18 = metadata !{i32 786689, metadata !6, metadata !"a", metadata !1, i32 7, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!19 = metadata !{i32 786688, metadata !20, metadata !"b", metadata !1, i32 7, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!20 = metadata !{i32 786443, metadata !6, i32 7, i32 0, metadata !1, i32 1} ; [ DW_TAG_lexical_block ]
+!20 = metadata !{i32 786443, metadata !47, metadata !6, i32 7, i32 0, i32 1} ; [ DW_TAG_lexical_block ]
!21 = metadata !{i32 786689, metadata !7, metadata !"a", metadata !1, i32 10, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!22 = metadata !{i32 786688, metadata !23, metadata !"b", metadata !1, i32 10, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!23 = metadata !{i32 786443, metadata !7, i32 10, i32 0, metadata !1, i32 2} ; [ DW_TAG_lexical_block ]
+!23 = metadata !{i32 786443, metadata !47, metadata !7, i32 10, i32 0, i32 2} ; [ DW_TAG_lexical_block ]
!24 = metadata !{i32 786689, metadata !8, metadata !"a", metadata !1, i32 13, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!25 = metadata !{i32 786688, metadata !26, metadata !"b", metadata !1, i32 13, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!26 = metadata !{i32 786443, metadata !8, i32 13, i32 0, metadata !1, i32 3} ; [ DW_TAG_lexical_block ]
+!26 = metadata !{i32 786443, metadata !47, metadata !8, i32 13, i32 0, i32 3} ; [ DW_TAG_lexical_block ]
!27 = metadata !{i32 786689, metadata !9, metadata !"a", metadata !1, i32 16, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!28 = metadata !{i32 786688, metadata !29, metadata !"b", metadata !1, i32 16, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{i32 786443, metadata !9, i32 16, i32 0, metadata !1, i32 4} ; [ DW_TAG_lexical_block ]
+!29 = metadata !{i32 786443, metadata !47, metadata !9, i32 16, i32 0, i32 4} ; [ DW_TAG_lexical_block ]
!30 = metadata !{i32 4, i32 0, metadata !0, null}
!31 = metadata !{i32 4, i32 0, metadata !12, null}
!32 = metadata !{i32 7, i32 0, metadata !6, null}
@@ -125,3 +126,5 @@ entry:
!45 = metadata !{metadata !24, metadata !25}
!46 = metadata !{metadata !27, metadata !28}
!47 = metadata !{metadata !"foo.c", metadata !"/tmp/"}
+!48 = metadata !{i32 0}
+!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
index e84ce0e2394d..bc72e126b407 100644
--- a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
+++ b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
@@ -9,7 +9,7 @@
@oStruct = external global %struct.Outer, align 4
define void @main() nounwind {
-; CHECK: main:
+; CHECK-LABEL: main:
; CHECK-NOT: ldrd
; CHECK: mul
for.body.lr.ph:
@@ -21,8 +21,8 @@ for.body: ; preds = %_Z14printIsNotZeroi
%x = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 0
%y = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 1
%inc = add i32 %i.022, 1
- %tmp8 = load i32* %x, align 4, !tbaa !0
- %tmp11 = load i32* %y, align 4, !tbaa !0
+ %tmp8 = load i32* %x, align 4
+ %tmp11 = load i32* %y, align 4
%mul = mul nsw i32 %tmp11, %tmp8
%tobool.i14 = icmp eq i32 %mul, 0
br i1 %tobool.i14, label %_Z14printIsNotZeroi.exit17, label %if.then.i16
@@ -35,15 +35,10 @@ _Z14printIsNotZeroi.exit17: ; preds = %_Z14printIsNotZeroi
_Z14printIsNotZeroi.exit17.for.body_crit_edge: ; preds = %_Z14printIsNotZeroi.exit17
%b.phi.trans.insert = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
- %tmp3.pre = load i8* %b.phi.trans.insert, align 1, !tbaa !3
+ %tmp3.pre = load i8* %b.phi.trans.insert, align 1
%phitmp27 = icmp eq i8 undef, 0
br label %for.body
for.end: ; preds = %_Z14printIsNotZeroi.exit17
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"bool", metadata !1}
diff --git a/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll b/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
index 0fe88bd0ed7e..caa0be56578c 100644
--- a/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
+++ b/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
@@ -8,7 +8,7 @@
; rdar://9172742
define i32 @t() nounwind {
-; CHECK: t:
+; CHECK-LABEL: t:
entry:
br label %bb2
diff --git a/test/CodeGen/ARM/2011-04-07-schediv.ll b/test/CodeGen/ARM/2011-04-07-schediv.ll
index 19f756f51364..f3dd3dd5811e 100644
--- a/test/CodeGen/ARM/2011-04-07-schediv.ll
+++ b/test/CodeGen/ARM/2011-04-07-schediv.ll
@@ -12,7 +12,7 @@ entry:
; Make sure the scheduler schedules all uses of the preincrement
; induction variable before defining the postincrement value.
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK: %bb
; CHECK-NOT: mov
bb: ; preds = %entry, %bb
diff --git a/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll b/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll
index 568718c91127..e30c9c615053 100644
--- a/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll
+++ b/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll
@@ -4,7 +4,7 @@
; rdar://9266679
define zeroext i1 @t(i32* nocapture %A, i32 %size, i32 %value) nounwind readonly ssp {
-; CHECK: t:
+; CHECK-LABEL: t:
entry:
br label %for.cond
@@ -15,15 +15,14 @@ for.cond:
for.body:
; CHECK: %for.
-; CHECK: movs r{{[0-9]+}}, #{{[01]}}
+; CHECK: mov{{.*}} r{{[0-9]+}}, #{{[01]}}
+; CHECK: mov{{.*}} r{{[0-9]+}}, #{{[01]}}
+; CHECK-NOT: mov r{{[0-9]+}}, #{{[01]}}
%arrayidx = getelementptr i32* %A, i32 %0
%tmp4 = load i32* %arrayidx, align 4
%cmp6 = icmp eq i32 %tmp4, %value
br i1 %cmp6, label %return, label %for.inc
-; CHECK: %for.
-; CHECK: movs r{{[0-9]+}}, #{{[01]}}
-
for.inc:
%inc = add i32 %0, 1
br label %for.cond
diff --git a/test/CodeGen/ARM/2011-04-26-SchedTweak.ll b/test/CodeGen/ARM/2011-04-26-SchedTweak.ll
index ed7dd0332046..057c19948c35 100644
--- a/test/CodeGen/ARM/2011-04-26-SchedTweak.ll
+++ b/test/CodeGen/ARM/2011-04-26-SchedTweak.ll
@@ -15,7 +15,7 @@
define i32 @test() nounwind optsize ssp {
entry:
-; CHECK: test:
+; CHECK-LABEL: test:
; CHECK: push
; CHECK-NOT: push
%block_size = alloca i32, align 4
diff --git a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
index 7a7ca8e0d8d9..bb7870764c50 100644
--- a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
@@ -73,29 +73,30 @@ define i32 @get5(i32 %a) nounwind optsize ssp {
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!49}
-!0 = metadata !{i32 786449, metadata !47, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, null, null, metadata !40, metadata !41, metadata !41, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 786478, metadata !2, metadata !"get1", metadata !"get1", metadata !"", metadata !2, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get1, null, null, metadata !42, i32 5} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786449, metadata !47, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, metadata !48, metadata !48, metadata !40, metadata !41, metadata !41, null} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 786478, metadata !47, metadata !2, metadata !"get1", metadata !"get1", metadata !"", i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @get1, null, null, metadata !42, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [get1]
!2 = metadata !{i32 786473, metadata !47} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786453, metadata !2, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!3 = metadata !{i32 786453, metadata !47, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, null, metadata !0, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 786478, metadata !2, metadata !"get2", metadata !"get2", metadata !"", metadata !2, i32 8, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get2, null, null, metadata !43, i32 8} ; [ DW_TAG_subprogram ]
-!7 = metadata !{i32 786478, metadata !2, metadata !"get3", metadata !"get3", metadata !"", metadata !2, i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get3, null, null, metadata !44, i32 11} ; [ DW_TAG_subprogram ]
-!8 = metadata !{i32 786478, metadata !2, metadata !"get4", metadata !"get4", metadata !"", metadata !2, i32 14, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get4, null, null, metadata !45, i32 14} ; [ DW_TAG_subprogram ]
-!9 = metadata !{i32 786478, metadata !2, metadata !"get5", metadata !"get5", metadata !"", metadata !2, i32 17, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get5, null, null, metadata !46, i32 17} ; [ DW_TAG_subprogram ]
+!6 = metadata !{i32 786478, metadata !47, metadata !2, metadata !"get2", metadata !"get2", metadata !"", i32 8, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @get2, null, null, metadata !43, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [get2]
+!7 = metadata !{i32 786478, metadata !47, metadata !2, metadata !"get3", metadata !"get3", metadata !"", i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @get3, null, null, metadata !44, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [get3]
+!8 = metadata !{i32 786478, metadata !47, metadata !2, metadata !"get4", metadata !"get4", metadata !"", i32 14, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @get4, null, null, metadata !45, i32 14} ; [ DW_TAG_subprogram ] [line 14] [def] [get4]
+!9 = metadata !{i32 786478, metadata !47, metadata !2, metadata !"get5", metadata !"get5", metadata !"", i32 17, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32)* @get5, null, null, metadata !46, i32 17} ; [ DW_TAG_subprogram ] [line 17] [def] [get5]
!10 = metadata !{i32 786689, metadata !1, metadata !"a", metadata !2, i32 16777221, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!11 = metadata !{i32 786688, metadata !12, metadata !"b", metadata !2, i32 5, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!12 = metadata !{i32 786443, metadata !1, i32 5, i32 19, metadata !2, i32 0} ; [ DW_TAG_lexical_block ]
+!12 = metadata !{i32 786443, metadata !47, metadata !1, i32 5, i32 19, i32 0} ; [ DW_TAG_lexical_block ]
!13 = metadata !{i32 786689, metadata !6, metadata !"a", metadata !2, i32 16777224, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!14 = metadata !{i32 786688, metadata !15, metadata !"b", metadata !2, i32 8, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!15 = metadata !{i32 786443, metadata !6, i32 8, i32 17, metadata !2, i32 1} ; [ DW_TAG_lexical_block ]
+!15 = metadata !{i32 786443, metadata !47, metadata !6, i32 8, i32 17, i32 1} ; [ DW_TAG_lexical_block ]
!16 = metadata !{i32 786689, metadata !7, metadata !"a", metadata !2, i32 16777227, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!17 = metadata !{i32 786688, metadata !18, metadata !"b", metadata !2, i32 11, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!18 = metadata !{i32 786443, metadata !7, i32 11, i32 19, metadata !2, i32 2} ; [ DW_TAG_lexical_block ]
+!18 = metadata !{i32 786443, metadata !47, metadata !7, i32 11, i32 19, i32 2} ; [ DW_TAG_lexical_block ]
!19 = metadata !{i32 786689, metadata !8, metadata !"a", metadata !2, i32 16777230, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!20 = metadata !{i32 786688, metadata !21, metadata !"b", metadata !2, i32 14, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!21 = metadata !{i32 786443, metadata !8, i32 14, i32 19, metadata !2, i32 3} ; [ DW_TAG_lexical_block ]
+!21 = metadata !{i32 786443, metadata !47, metadata !8, i32 14, i32 19, i32 3} ; [ DW_TAG_lexical_block ]
!22 = metadata !{i32 786484, i32 0, metadata !0, metadata !"x5", metadata !"x5", metadata !"", metadata !2, i32 16, metadata !5, i32 0, i32 1, i32* @x5, null} ; [ DW_TAG_variable ]
!23 = metadata !{i32 786484, i32 0, metadata !0, metadata !"x4", metadata !"x4", metadata !"", metadata !2, i32 13, metadata !5, i32 1, i32 1, i32* @x4, null} ; [ DW_TAG_variable ]
!24 = metadata !{i32 786484, i32 0, metadata !0, metadata !"x3", metadata !"x3", metadata !"", metadata !2, i32 10, metadata !5, i32 1, i32 1, i32* @x3, null} ; [ DW_TAG_variable ]
@@ -103,7 +104,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!26 = metadata !{i32 786484, i32 0, metadata !0, metadata !"x1", metadata !"x1", metadata !"", metadata !2, i32 4, metadata !5, i32 1, i32 1, i32* @x1, null} ; [ DW_TAG_variable ]
!27 = metadata !{i32 786689, metadata !9, metadata !"a", metadata !2, i32 16777233, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!28 = metadata !{i32 786688, metadata !29, metadata !"b", metadata !2, i32 17, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{i32 786443, metadata !9, i32 17, i32 19, metadata !2, i32 4} ; [ DW_TAG_lexical_block ]
+!29 = metadata !{i32 786443, metadata !47, metadata !9, i32 17, i32 19, i32 4} ; [ DW_TAG_lexical_block ]
!30 = metadata !{i32 5, i32 16, metadata !1, null}
!31 = metadata !{i32 5, i32 32, metadata !12, null}
!32 = metadata !{i32 8, i32 14, metadata !6, null}
@@ -122,3 +123,5 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!45 = metadata !{metadata !19, metadata !20}
!46 = metadata !{metadata !27, metadata !28}
!47 = metadata !{metadata !"ss3.c", metadata !"/private/tmp"}
+!48 = metadata !{i32 0}
+!49 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll b/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll
index 216057a31385..9163166177c1 100644
--- a/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll
+++ b/test/CodeGen/ARM/2011-08-25-ldmia_ret.ll
@@ -42,7 +42,7 @@ if.then: ; preds = %land.lhs.true
; If-convert the return
; CHECK: it ne
; Fold the CSR+return into a pop
-; CHECK: popne {r4, r5, r7, pc}
+; CHECK: pop {r4, r5, r7, pc}
sw.bb18:
%call20 = tail call i32 @bar(i32 %in2) nounwind
switch i32 %call20, label %sw.default56 [
diff --git a/test/CodeGen/ARM/2011-10-26-memset-inline.ll b/test/CodeGen/ARM/2011-10-26-memset-inline.ll
index ff049c89860d..03614eddbf70 100644
--- a/test/CodeGen/ARM/2011-10-26-memset-inline.ll
+++ b/test/CodeGen/ARM/2011-10-26-memset-inline.ll
@@ -10,8 +10,8 @@ target triple = "thumbv7-apple-ios5.0.0"
; CHECK-GENERIT-NEXT: strb
; CHECK-GENERIT-NEXT: strb
; CHECK-GENERIT-NEXT: strb
-; CHECK-UNALIGNED: strb
-; CHECK-UNALIGNED-NEXT: str
+; CHECK-UNALIGNED: strb
+; CHECK-UNALIGNED: str
define void @foo(i8* nocapture %c) nounwind optsize {
entry:
call void @llvm.memset.p0i8.i64(i8* %c, i8 -1, i64 5, i32 1, i1 false)
diff --git a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
index f563eeef0180..850c51133f3e 100644
--- a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
+++ b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=arm -mcpu=cortex-a8 < %s | FileCheck %s
; Trigger multiple NEON stores.
-; CHECK: vst1.64
-; CHECK-NEXT: vst1.64
+; CHECK: vst1.64
+; CHECK: vst1.64
define void @f_0_40(i8* nocapture %c) nounwind optsize {
entry:
call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 40, i32 16, i1 false)
diff --git a/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll b/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
index 113cbfe39620..8a65f2e82b75 100644
--- a/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
+++ b/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
@@ -6,7 +6,7 @@
@i8_src2 = global <2 x i8> <i8 2, i8 1>
define void @test_neon_vector_add_2xi8() nounwind {
-; CHECK: test_neon_vector_add_2xi8:
+; CHECK-LABEL: test_neon_vector_add_2xi8:
%1 = load <2 x i8>* @i8_src1
%2 = load <2 x i8>* @i8_src2
%3 = add <2 x i8> %1, %2
@@ -15,7 +15,7 @@ define void @test_neon_vector_add_2xi8() nounwind {
}
define void @test_neon_ld_st_volatile_with_ashr_2xi8() {
-; CHECK: test_neon_ld_st_volatile_with_ashr_2xi8:
+; CHECK-LABEL: test_neon_ld_st_volatile_with_ashr_2xi8:
%1 = load volatile <2 x i8>* @i8_src1
%2 = load volatile <2 x i8>* @i8_src2
%3 = ashr <2 x i8> %1, %2
diff --git a/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll b/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
index 2ab6a4fcc9b4..42eb32d14c74 100644
--- a/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
+++ b/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
@@ -7,7 +7,7 @@
declare <2 x i16> @foo_v2i16(<2 x i16>) nounwind
define void @test_neon_call_return_v2i16() {
-; CHECK: test_neon_call_return_v2i16:
+; CHECK-LABEL: test_neon_call_return_v2i16:
%1 = load <2 x i16>* @src1_v2i16
%2 = call <2 x i16> @foo_v2i16(<2 x i16> %1) nounwind
store <2 x i16> %2, <2 x i16>* @res_v2i16
diff --git a/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll b/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll
index 5409f8c60887..bc496b99f4a6 100644
--- a/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll
+++ b/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll
@@ -10,7 +10,7 @@
@infoBlock = external global %struct.InformationBlock
define hidden void @foo() {
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: ldr.w
; CHECK: ldr.w
; CHECK-NOT: ldm
diff --git a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
index 0d0d03b23e86..a263c9c8d678 100644
--- a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
+++ b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
@@ -4,7 +4,7 @@
define void @test_sqrt(<4 x float>* %X) nounwind {
-; CHECK: test_sqrt:
+; CHECK-LABEL: test_sqrt:
; CHECK: movw r1, :lower16:{{.*}}
; CHECK: movt r1, :upper16:{{.*}}
@@ -27,7 +27,7 @@ declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind readonly
define void @test_cos(<4 x float>* %X) nounwind {
-; CHECK: test_cos:
+; CHECK-LABEL: test_cos:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -58,7 +58,7 @@ declare <4 x float> @llvm.cos.v4f32(<4 x float>) nounwind readonly
define void @test_exp(<4 x float>* %X) nounwind {
-; CHECK: test_exp:
+; CHECK-LABEL: test_exp:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -89,7 +89,7 @@ declare <4 x float> @llvm.exp.v4f32(<4 x float>) nounwind readonly
define void @test_exp2(<4 x float>* %X) nounwind {
-; CHECK: test_exp2:
+; CHECK-LABEL: test_exp2:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -120,7 +120,7 @@ declare <4 x float> @llvm.exp2.v4f32(<4 x float>) nounwind readonly
define void @test_log10(<4 x float>* %X) nounwind {
-; CHECK: test_log10:
+; CHECK-LABEL: test_log10:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -151,7 +151,7 @@ declare <4 x float> @llvm.log10.v4f32(<4 x float>) nounwind readonly
define void @test_log(<4 x float>* %X) nounwind {
-; CHECK: test_log:
+; CHECK-LABEL: test_log:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -182,7 +182,7 @@ declare <4 x float> @llvm.log.v4f32(<4 x float>) nounwind readonly
define void @test_log2(<4 x float>* %X) nounwind {
-; CHECK: test_log2:
+; CHECK-LABEL: test_log2:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -214,7 +214,7 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float>) nounwind readonly
define void @test_pow(<4 x float>* %X) nounwind {
-; CHECK: test_pow:
+; CHECK-LABEL: test_pow:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -248,7 +248,7 @@ declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>) nounwind readonly
define void @test_powi(<4 x float>* %X) nounwind {
-; CHECK: test_powi:
+; CHECK-LABEL: test_powi:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -271,7 +271,7 @@ declare <4 x float> @llvm.powi.v4f32(<4 x float>, i32) nounwind readonly
define void @test_sin(<4 x float>* %X) nounwind {
-; CHECK: test_sin:
+; CHECK-LABEL: test_sin:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
@@ -302,7 +302,7 @@ declare <4 x float> @llvm.sin.v4f32(<4 x float>) nounwind readonly
define void @test_floor(<4 x float>* %X) nounwind {
-; CHECK: test_floor:
+; CHECK-LABEL: test_floor:
; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}}
; CHECK: movt [[reg0]], :upper16:{{.*}}
diff --git a/test/CodeGen/ARM/2012-03-26-FoldImmBug.ll b/test/CodeGen/ARM/2012-03-26-FoldImmBug.ll
index 0ff4f510eb3e..e795ec55fe5b 100644
--- a/test/CodeGen/ARM/2012-03-26-FoldImmBug.ll
+++ b/test/CodeGen/ARM/2012-03-26-FoldImmBug.ll
@@ -23,7 +23,7 @@
;
; rdar://11116189
define i64 @t(i64 %aInput) nounwind {
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK: movs [[REG:(r[0-9]+)]], #0
; CHECK: movt [[REG]], #46540
; CHECK: adds r{{[0-9]+}}, r{{[0-9]+}}, [[REG]]
diff --git a/test/CodeGen/ARM/2012-05-04-vmov.ll b/test/CodeGen/ARM/2012-05-04-vmov.ll
index d52ef2cc5a1c..14dbf7ff4ac9 100644
--- a/test/CodeGen/ARM/2012-05-04-vmov.ll
+++ b/test/CodeGen/ARM/2012-05-04-vmov.ll
@@ -7,5 +7,8 @@ entry:
%div = udiv <2 x i32> %A, %B
ret <2 x i32> %div
; A9-CHECK: vmov.32
-; SWIFT-CHECK-NOT: vmov.32
+; vmov.32 should not be used to get a lane:
+; vmov.32 <dst>, <src>[<lane>].
+; but vmov.32 <dst>[<lane>], <src> is fine.
+; SWIFT-CHECK-NOT: vmov.32 {{r[0-9]+}}, {{d[0-9]\[[0-9]+\]}}
}
diff --git a/test/CodeGen/ARM/2012-08-09-neon-extload.ll b/test/CodeGen/ARM/2012-08-09-neon-extload.ll
index 764c58f2e159..a7108253cb62 100644
--- a/test/CodeGen/ARM/2012-08-09-neon-extload.ll
+++ b/test/CodeGen/ARM/2012-08-09-neon-extload.ll
@@ -12,7 +12,7 @@
@var_v2i64 = global <2 x i64> zeroinitializer
define void @test_v2i8tov2i32() {
-; CHECK: test_v2i8tov2i32:
+; CHECK-LABEL: test_v2i8tov2i32:
%i8val = load <2 x i8>* @var_v2i8
@@ -26,7 +26,7 @@ define void @test_v2i8tov2i32() {
}
define void @test_v2i8tov2i64() {
-; CHECK: test_v2i8tov2i64:
+; CHECK-LABEL: test_v2i8tov2i64:
%i8val = load <2 x i8>* @var_v2i8
@@ -44,7 +44,7 @@ define void @test_v2i8tov2i64() {
}
define void @test_v4i8tov4i16() {
-; CHECK: test_v4i8tov4i16:
+; CHECK-LABEL: test_v4i8tov4i16:
%i8val = load <4 x i8>* @var_v4i8
@@ -59,7 +59,7 @@ define void @test_v4i8tov4i16() {
}
define void @test_v4i8tov4i32() {
-; CHECK: test_v4i8tov4i32:
+; CHECK-LABEL: test_v4i8tov4i32:
%i8val = load <4 x i8>* @var_v4i8
@@ -73,7 +73,7 @@ define void @test_v4i8tov4i32() {
}
define void @test_v2i16tov2i32() {
-; CHECK: test_v2i16tov2i32:
+; CHECK-LABEL: test_v2i16tov2i32:
%i16val = load <2 x i16>* @var_v2i16
@@ -88,7 +88,7 @@ define void @test_v2i16tov2i32() {
}
define void @test_v2i16tov2i64() {
-; CHECK: test_v2i16tov2i64:
+; CHECK-LABEL: test_v2i16tov2i64:
%i16val = load <2 x i16>* @var_v2i16
diff --git a/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll b/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
index 2f55204aa407..647ebd6bdfd4 100644
--- a/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
+++ b/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
@@ -13,7 +13,7 @@
; v4i8
;
define void @sextload_v4i8_c(<4 x i8>* %v) nounwind {
-;CHECK: sextload_v4i8_c:
+;CHECK-LABEL: sextload_v4i8_c:
entry:
%0 = load <4 x i8>* %v, align 8
%v0 = sext <4 x i8> %0 to <4 x i32>
@@ -26,7 +26,7 @@ entry:
; v2i8
;
define void @sextload_v2i8_c(<2 x i8>* %v) nounwind {
-;CHECK: sextload_v2i8_c:
+;CHECK-LABEL: sextload_v2i8_c:
entry:
%0 = load <2 x i8>* %v, align 8
%v0 = sext <2 x i8> %0 to <2 x i64>
@@ -39,7 +39,7 @@ entry:
; v2i16
;
define void @sextload_v2i16_c(<2 x i16>* %v) nounwind {
-;CHECK: sextload_v2i16_c:
+;CHECK-LABEL: sextload_v2i16_c:
entry:
%0 = load <2 x i16>* %v, align 8
%v0 = sext <2 x i16> %0 to <2 x i64>
@@ -54,7 +54,7 @@ entry:
; v4i8
;
define void @sextload_v4i8_v(<4 x i8>* %v, <4 x i8>* %p) nounwind {
-;CHECK: sextload_v4i8_v:
+;CHECK-LABEL: sextload_v4i8_v:
entry:
%0 = load <4 x i8>* %v, align 8
%v0 = sext <4 x i8> %0 to <4 x i32>
@@ -70,7 +70,7 @@ entry:
; v2i8
;
define void @sextload_v2i8_v(<2 x i8>* %v, <2 x i8>* %p) nounwind {
-;CHECK: sextload_v2i8_v:
+;CHECK-LABEL: sextload_v2i8_v:
entry:
%0 = load <2 x i8>* %v, align 8
%v0 = sext <2 x i8> %0 to <2 x i64>
@@ -86,7 +86,7 @@ entry:
; v2i16
;
define void @sextload_v2i16_v(<2 x i16>* %v, <2 x i16>* %p) nounwind {
-;CHECK: sextload_v2i16_v:
+;CHECK-LABEL: sextload_v2i16_v:
entry:
%0 = load <2 x i16>* %v, align 8
%v0 = sext <2 x i16> %0 to <2 x i64>
@@ -104,7 +104,7 @@ entry:
; v4i8 x v4i16
;
define void @sextload_v4i8_vs(<4 x i8>* %v, <4 x i16>* %p) nounwind {
-;CHECK: sextload_v4i8_vs:
+;CHECK-LABEL: sextload_v4i8_vs:
entry:
%0 = load <4 x i8>* %v, align 8
%v0 = sext <4 x i8> %0 to <4 x i32>
@@ -120,7 +120,7 @@ entry:
; v2i8
; v2i8 x v2i16
define void @sextload_v2i8_vs(<2 x i8>* %v, <2 x i16>* %p) nounwind {
-;CHECK: sextload_v2i8_vs:
+;CHECK-LABEL: sextload_v2i8_vs:
entry:
%0 = load <2 x i8>* %v, align 8
%v0 = sext <2 x i8> %0 to <2 x i64>
@@ -136,7 +136,7 @@ entry:
; v2i16
; v2i16 x v2i32
define void @sextload_v2i16_vs(<2 x i16>* %v, <2 x i32>* %p) nounwind {
-;CHECK: sextload_v2i16_vs:
+;CHECK-LABEL: sextload_v2i16_vs:
entry:
%0 = load <2 x i16>* %v, align 8
%v0 = sext <2 x i16> %0 to <2 x i64>
diff --git a/test/CodeGen/ARM/2012-08-30-select.ll b/test/CodeGen/ARM/2012-08-30-select.ll
index 8471be5330b8..e78bbdea01f2 100644
--- a/test/CodeGen/ARM/2012-08-30-select.ll
+++ b/test/CodeGen/ARM/2012-08-30-select.ll
@@ -1,18 +1,15 @@
; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
; rdar://12201387
-;CHECK: select_s_v_v
+;CHECK-LABEL: select_s_v_v:
;CHECK: it ne
;CHECK-NEXT: vmovne.i32
;CHECK: bx
-define <16 x i8> @select_s_v_v(i32 %avail, i8* %bar) {
+define <16 x i8> @select_s_v_v(<16 x i8> %vec, i32 %avail) {
entry:
- %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1)
%and = and i32 %avail, 1
%tobool = icmp eq i32 %and, 0
- %vld1. = select i1 %tobool, <16 x i8> %vld1, <16 x i8> zeroinitializer
- ret <16 x i8> %vld1.
+ %ret = select i1 %tobool, <16 x i8> %vec, <16 x i8> zeroinitializer
+ ret <16 x i8> %ret
}
-declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
-
diff --git a/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll b/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll
index e761ffe72c13..3bdbb3cf5801 100644
--- a/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll
+++ b/test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll
@@ -4,7 +4,7 @@
; rdar://12300648
define i32 @t(i32 %x) {
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK-NOT: movw
%tmp = add i32 %x, -65535
ret i32 %tmp
diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
index 75766099a220..38624e0641f2 100644
--- a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
+++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
+; RUN: not llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
; Check for error message:
; CHECK: non-trivial scalar-to-vector conversion, possible invalid constraint for vector type
diff --git a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
index 6fa1391474bb..7ba693d6df4a 100644
--- a/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
+++ b/test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
+; RUN: not llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
; Check for error message:
; CHECK: scalar-to-vector conversion failed, possible invalid constraint for vector type
diff --git a/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll b/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
index b0644d17431d..f864c8cbfcb5 100644
--- a/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
+++ b/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
@@ -7,7 +7,7 @@
declare void @llvm.va_start(i8*) nounwind
declare void @llvm.va_end(i8*) nounwind
-; CHECK: test_byval_8_bytes_alignment:
+; CHECK-LABEL: test_byval_8_bytes_alignment:
define void @test_byval_8_bytes_alignment(i32 %i, ...) {
entry:
; CHECK: stm r0, {r1, r2, r3}
@@ -23,8 +23,12 @@ entry:
ret void
}
-; CHECK: main:
-; CHECK: ldm r0, {r2, r3}
+; CHECK-LABEL: main:
+; CHECK: movw [[BASE:r[0-9]+]], :lower16:static_val
+; CHECK: movt [[BASE]], :upper16:static_val
+; ldm is not formed when the coalescer failed to coalesce everything.
+; CHECK: ldrd r2, [[TMP:r[0-9]+]], {{\[}}[[BASE]]{{\]}}
+; CHECK: movw r0, #555
define i32 @main() {
entry:
call void (i32, ...)* @test_byval_8_bytes_alignment(i32 555, %struct_t* byval @static_val)
@@ -33,7 +37,7 @@ entry:
declare void @f(double);
-; CHECK: test_byval_8_bytes_alignment_fixed_arg:
+; CHECK-LABEL: test_byval_8_bytes_alignment_fixed_arg:
; CHECK-NOT: str r1
; CHECK: str r3, [sp, #12]
; CHECK: str r2, [sp, #8]
@@ -46,11 +50,14 @@ entry:
ret void
}
-; CHECK: main_fixed_arg:
-; CHECK: ldm r0, {r2, r3}
+; CHECK-LABEL: main_fixed_arg:
+; CHECK: movw [[BASE:r[0-9]+]], :lower16:static_val
+; CHECK: movt [[BASE]], :upper16:static_val
+; ldm is not formed when the coalescer failed to coalesce everything.
+; CHECK: ldrd r2, [[TMP:r[0-9]+]], {{\[}}[[BASE]]{{\]}}
+; CHECK: movw r0, #555
define i32 @main_fixed_arg() {
entry:
call void (i32, %struct_t*)* @test_byval_8_bytes_alignment_fixed_arg(i32 555, %struct_t* byval @static_val)
ret i32 0
}
-
diff --git a/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll b/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll
index 478048d09600..c9ccc103e2fa 100644
--- a/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll
+++ b/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll
@@ -6,7 +6,7 @@
declare i32 @printf(i8*, ...)
-; CHECK: test_byval_usage_scheduling:
+; CHECK-LABEL: test_byval_usage_scheduling:
; CHECK: str r3, [sp, #12]
; CHECK: str r2, [sp, #8]
; CHECK: vldr d16, [sp, #8]
diff --git a/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll b/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll
index f2395107d426..a59533c4a85d 100644
--- a/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll
+++ b/test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll
@@ -6,7 +6,7 @@
declare void @f(i32 %n1, i32 %n2, i32 %n3, %my_struct_t* byval %val);
-; CHECK: main:
+; CHECK-LABEL: main:
define i32 @main() nounwind {
entry:
; CHECK: ldrb {{(r[0-9]+)}}, {{(\[r[0-9]+\])}}, #1
diff --git a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
index fcc6a7f7e96f..0028eec80f44 100644
--- a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
+++ b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
@@ -5,7 +5,7 @@
declare void @f(%struct.s* %p);
-; CHECK: t:
+; CHECK-LABEL: t:
define void @t(i32 %a, %struct.s* byval %s) nounwind {
entry:
@@ -20,7 +20,7 @@ entry:
ret void
}
-; CHECK: caller:
+; CHECK-LABEL: caller:
define void @caller() {
; CHECK: ldm r0, {r1, r2, r3}
diff --git a/test/CodeGen/ARM/2012-11-14-subs_carry.ll b/test/CodeGen/ARM/2012-11-14-subs_carry.ll
index 38700f3a8d10..8df295a2f658 100644
--- a/test/CodeGen/ARM/2012-11-14-subs_carry.ll
+++ b/test/CodeGen/ARM/2012-11-14-subs_carry.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
-;CHECK: foo
+;CHECK-LABEL: foo:
;CHECK: adds
;CHECK-NEXT: adc
;CHECK-NEXT: bx
diff --git a/test/CodeGen/ARM/2013-01-21-PR14992.ll b/test/CodeGen/ARM/2013-01-21-PR14992.ll
index 05abdeda0f19..014686feee0e 100644
--- a/test/CodeGen/ARM/2013-01-21-PR14992.ll
+++ b/test/CodeGen/ARM/2013-01-21-PR14992.ll
@@ -2,8 +2,8 @@
;RUN: llc -mtriple=thumbv7 < %s | FileCheck -check-prefix=EXPECTED %s
;RUN: llc -mtriple=thumbv7 < %s | FileCheck %s
-;EXPECTED: foo:
-;CHECK: foo:
+;EXPECTED-LABEL: foo:
+;CHECK-LABEL: foo:
define i32 @foo(i32* %a) nounwind optsize {
entry:
%0 = load i32* %a, align 4
diff --git a/test/CodeGen/ARM/2013-02-27-expand-vfma.ll b/test/CodeGen/ARM/2013-02-27-expand-vfma.ll
index 0e3bf2371061..f81211811860 100644
--- a/test/CodeGen/ARM/2013-02-27-expand-vfma.ll
+++ b/test/CodeGen/ARM/2013-02-27-expand-vfma.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=armv7s-apple-darwin | FileCheck %s -check-prefix=VFP4
+; RUN: llc < %s -mtriple=armv7s-apple-darwin | FileCheck %s -check-prefix=CHECK-VFP4
define <4 x float> @muladd(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind {
-; CHECK: muladd:
+; CHECK-LABEL: muladd:
; CHECK: fmaf
; CHECK: fmaf
; CHECK: fmaf
@@ -17,7 +17,7 @@ define <4 x float> @muladd(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounw
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #1
define <2 x float> @muladd2(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind {
-; CHECK: muladd2:
+; CHECK-LABEL: muladd2:
; CHECK: fmaf
; CHECK: fmaf
; CHECK-NOT: fmaf
diff --git a/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll b/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
index 4a5ca9db0e50..127429bc31e3 100644
--- a/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
+++ b/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
@@ -1,30 +1,30 @@
;PR15293: ARM codegen ice - expected larger existing stack allocation
;RUN: llc -mtriple=arm-linux-gnueabihf < %s | FileCheck %s
-;CHECK: foo:
+;CHECK-LABEL: foo:
;CHECK: sub sp, sp, #8
;CHECK: push {r11, lr}
-;CHECK: str r0, [sp, #12]
-;CHECK: add r0, sp, #12
+;CHECK: str r0, [sp, #8]
+;CHECK: add r0, sp, #8
;CHECK: bl fooUseParam
;CHECK: pop {r11, lr}
;CHECK: add sp, sp, #8
;CHECK: mov pc, lr
-;CHECK: foo2:
-;CHECK: sub sp, sp, #16
+;CHECK-LABEL: foo2:
+;CHECK: sub sp, sp, #8
;CHECK: push {r11, lr}
-;CHECK: str r0, [sp, #12]
-;CHECK: add r0, sp, #12
-;CHECK: str r2, [sp, #16]
+;CHECK: str r0, [sp, #8]
+;CHECK: add r0, sp, #8
+;CHECK: str r2, [sp, #12]
;CHECK: bl fooUseParam
-;CHECK: add r0, sp, #16
+;CHECK: add r0, sp, #12
;CHECK: bl fooUseParam
;CHECK: pop {r11, lr}
-;CHECK: add sp, sp, #16
+;CHECK: add sp, sp, #8
;CHECK: mov pc, lr
-;CHECK: doFoo:
+;CHECK-LABEL: doFoo:
;CHECK: push {r11, lr}
;CHECK: ldr r0,
;CHECK: ldr r0, [r0]
@@ -33,7 +33,7 @@
;CHECK: mov pc, lr
-;CHECK: doFoo2:
+;CHECK-LABEL: doFoo2:
;CHECK: push {r11, lr}
;CHECK: ldr r0,
;CHECK: mov r1, #0
diff --git a/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll b/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
index 38d515f9227f..08bf99b31f54 100644
--- a/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
+++ b/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
@@ -53,11 +53,11 @@
;RUN: llc -mtriple=thumbv7-linux-gnueabihf -float-abi=hard < %s | FileCheck %s
;
-;CHECK: foo:
+;CHECK-LABEL: foo:
;CHECK-NOT: mov r0
;CHECK-NOT: ldr r0
;CHECK: bl fooUseI32
-;CHECK: doFoo:
+;CHECK-LABEL: doFoo:
;CHECK: movs r0, #43
;CHECK: bl foo
diff --git a/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll b/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll
index de5fd31e2f2d..0e0537ec5bfc 100644
--- a/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll
+++ b/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll
@@ -9,7 +9,7 @@
@.str = private unnamed_addr constant [13 x i8] c"%d %d %f %i\0A\00", align 1
-;CHECK: printfn:
+;CHECK-LABEL: printfn:
define void @printfn(i32 %a, i16 signext %b, double %C, i8 signext %E) {
entry:
%conv = sext i16 %b to i32
diff --git a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
index abc6e0d11144..c4f5f54c3af0 100644
--- a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
+++ b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
@@ -1,8 +1,10 @@
; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 | FileCheck -check-prefix=CHECK-V8 %s
+; RUN: llc < %s -mtriple=thumbv7 -arm-restrict-it | FileCheck -check-prefix=CHECK-V8 %s
; rdar://13782395
define i32 @t1(i32 %a, i32 %b, i8** %retaddr) {
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: Block address taken
; CHECK-NOT: Address of block that was removed by CodeGen
store i8* blockaddress(@t1, %cond_true), i8** %retaddr
@@ -19,7 +21,7 @@ cond_false:
}
define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d, i8** %retaddr) {
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: Block address taken
; CHECK: %cond_true
; CHECK: add
@@ -41,7 +43,7 @@ UnifiedReturnBlock:
}
define hidden fastcc void @t3(i8** %retaddr) {
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK: Block address taken
; CHECK-NOT: Address of block that was removed by CodeGen
bb:
@@ -69,3 +71,83 @@ bb6.i350: ; preds = %bb2.i
KBBlockZero.exit: ; preds = %bb2.i
indirectbr i8* undef, [label %KBBlockZero_return_1, label %KBBlockZero_return_0]
}
+
+
+; If-converter was checking for the wrong predicate subsumes pattern when doing
+; nested predicates.
+; E.g., Let A be a basic block that flows conditionally into B and B be a
+; predicated block.
+; B can be predicated with A.BrToBPredicate into A iff B.Predicate is less
+; "permissive" than A.BrToBPredicate, i.e., iff A.BrToBPredicate subsumes
+; B.Predicate.
+; <rdar://problem/14379453>
+
+; Hard-coded registers comes from the ABI.
+; CHECK-LABEL: wrapDistance:
+; CHECK: cmp r1, #59
+; CHECK-NEXT: itt le
+; CHECK-NEXT: suble r0, r2, #1
+; CHECK-NEXT: bxle lr
+; CHECK-NEXT: subs [[REG:r[0-9]+]], #120
+; CHECK-NEXT: cmp [[REG]], r1
+; CHECK-NOT: it lt
+; CHECK-NEXT: bge [[LABEL:.+]]
+; Next BB
+; CHECK-NOT: cmplt
+; CHECK: cmp r0, #119
+; CHECK-NEXT: itt le
+; CHECK-NEXT: addle r0, r1, #1
+; CHECK-NEXT: bxle lr
+; Next BB
+; CHECK: [[LABEL]]:
+; CHECK-NEXT: subs r0, r1, r0
+; CHECK-NEXT: bx lr
+
+; CHECK-V8-LABEL: wrapDistance:
+; CHECK-V8: cmp r1, #59
+; CHECK-V8-NEXT: bgt
+; CHECK-V8-NEXT: %if.then
+; CHECK-V8-NEXT: subs r0, r2, #1
+; CHECK-V8-NEXT: bx lr
+; CHECK-V8-NEXT: %if.else
+; CHECK-V8-NEXT: subs [[REG:r[0-9]+]], #120
+; CHECK-V8-NEXT: cmp [[REG]], r1
+; CHECK-V8-NEXT: bge
+; CHECK-V8-NEXT: %if.else
+; CHECK-V8-NEXT: cmp r0, #119
+; CHECK-V8-NEXT: bgt
+; CHECK-V8-NEXT: %if.then4
+; CHECK-V8-NEXT: adds r0, r1, #1
+; CHECK-V8-NEXT: bx lr
+; CHECK-V8-NEXT: %if.end5
+; CHECK-V8-NEXT: subs r0, r1, r0
+; CHECK-V8-NEXT: bx lr
+
+define i32 @wrapDistance(i32 %tx, i32 %sx, i32 %w) {
+entry:
+ %cmp = icmp slt i32 %sx, 60
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %sub = add nsw i32 %w, -1
+ br label %return
+
+if.else: ; preds = %entry
+ %sub1 = add nsw i32 %w, -120
+ %cmp2 = icmp slt i32 %sub1, %sx
+ %cmp3 = icmp slt i32 %tx, 120
+ %or.cond = and i1 %cmp2, %cmp3
+ br i1 %or.cond, label %if.then4, label %if.end5
+
+if.then4: ; preds = %if.else
+ %add = add nsw i32 %sx, 1
+ br label %return
+
+if.end5: ; preds = %if.else
+ %sub6 = sub nsw i32 %sx, %tx
+ br label %return
+
+return: ; preds = %if.end5, %if.then4, %if.then
+ %retval.0 = phi i32 [ %sub, %if.then ], [ %add, %if.then4 ], [ %sub6, %if.end5 ]
+ ret i32 %retval.0
+}
diff --git a/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll b/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
new file mode 100644
index 000000000000..defb94601141
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -march=thumb -mattr=+v7,+thumb2 | FileCheck %s
+
+define i8 @f1(i8* %call1, i8* %call3, i32 %h, i32 %w, i32 %Width) {
+; CHECK: f1:
+entry:
+ %mul17 = mul nsw i32 %Width, %h
+ %add = add nsw i32 %mul17, %w
+ %sub19 = sub i32 %add, %Width
+ %sub20 = add i32 %sub19, -1
+ %arrayidx21 = getelementptr inbounds i8* %call1, i32 %sub20
+ %0 = load i8* %arrayidx21, align 1
+ %conv22 = zext i8 %0 to i32
+ %arrayidx25 = getelementptr inbounds i8* %call1, i32 %sub19
+ %1 = load i8* %arrayidx25, align 1
+ %conv26 = zext i8 %1 to i32
+ %mul23189 = add i32 %conv26, %conv22
+ %add30 = add i32 %sub19, 1
+ %arrayidx31 = getelementptr inbounds i8* %call1, i32 %add30
+ %2 = load i8* %arrayidx31, align 1
+ %conv32 = zext i8 %2 to i32
+; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
+; CHECK-NEXT: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #1]
+ %add28190 = add i32 %mul23189, %conv32
+ %sub35 = add i32 %add, -1
+ %arrayidx36 = getelementptr inbounds i8* %call1, i32 %sub35
+ %3 = load i8* %arrayidx36, align 1
+ %conv37 = zext i8 %3 to i32
+ %add34191 = add i32 %add28190, %conv37
+ %arrayidx40 = getelementptr inbounds i8* %call1, i32 %add
+ %4 = load i8* %arrayidx40, align 1
+ %conv41 = zext i8 %4 to i32
+ %mul42 = mul nsw i32 %conv41, 255
+ %add44 = add i32 %add, 1
+ %arrayidx45 = getelementptr inbounds i8* %call1, i32 %add44
+ %5 = load i8* %arrayidx45, align 1
+ %conv46 = zext i8 %5 to i32
+; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
+; CHECK-NEXT: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #1]
+ %add49 = add i32 %add, %Width
+ %sub50 = add i32 %add49, -1
+ %arrayidx51 = getelementptr inbounds i8* %call1, i32 %sub50
+ %6 = load i8* %arrayidx51, align 1
+ %conv52 = zext i8 %6 to i32
+ %arrayidx56 = getelementptr inbounds i8* %call1, i32 %add49
+ %7 = load i8* %arrayidx56, align 1
+ %conv57 = zext i8 %7 to i32
+ %add61 = add i32 %add49, 1
+ %arrayidx62 = getelementptr inbounds i8* %call1, i32 %add61
+ %8 = load i8* %arrayidx62, align 1
+ %conv63 = zext i8 %8 to i32
+; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
+; CHECK-NEXT: ldrb{{[.w]*}} r{{[0-9]*}}, [r{{[0-9]*}}, #1]
+ %tmp = add i32 %add34191, %conv46
+ %tmp193 = add i32 %tmp, %conv52
+ %tmp194 = add i32 %tmp193, %conv57
+ %tmp195 = add i32 %tmp194, %conv63
+ %tmp196 = mul i32 %tmp195, -28
+ %add65 = add i32 %tmp196, %mul42
+ %9 = lshr i32 %add65, 8
+ %conv68 = trunc i32 %9 to i8
+ %arrayidx69 = getelementptr inbounds i8* %call3, i32 %add
+ store i8 %conv68, i8* %arrayidx69, align 1
+ ret i8 %conv68
+}
diff --git a/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll b/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll
new file mode 100644
index 000000000000..7bf03a16c6fb
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding.ll
@@ -0,0 +1,31 @@
+;PR15293: ARM codegen ice - expected larger existing stack allocation
+;RUN: llc -mtriple=arm-linux-gnueabihf < %s | FileCheck %s
+
+%struct.S227 = type { [49 x i32], i32 }
+
+define void @check227(
+ i32 %b,
+ %struct.S227* byval nocapture %arg0,
+ %struct.S227* %arg1) {
+; b --> R0
+; arg0 --> [R1, R2, R3, SP+0 .. SP+188)
+; arg1 --> SP+188
+
+entry:
+
+;CHECK: sub sp, sp, #16
+;CHECK: push {r11, lr}
+;CHECK: add r0, sp, #12
+;CHECK: stm r0, {r1, r2, r3}
+;CHECK: ldr r0, [sp, #212]
+;CHECK: bl useInt
+;CHECK: pop {r11, lr}
+;CHECK: add sp, sp, #16
+
+ %0 = ptrtoint %struct.S227* %arg1 to i32
+ tail call void @useInt(i32 %0)
+ ret void
+}
+
+declare void @useInt(i32)
+
diff --git a/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll b/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll
new file mode 100644
index 000000000000..438b021a040b
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll
@@ -0,0 +1,25 @@
+;PR15293: ARM codegen ice - expected larger existing stack allocation
+;RUN: llc -mtriple=arm-linux-gnueabihf < %s | FileCheck %s
+
+%struct4bytes = type { i32 }
+%struct20bytes = type { i32, i32, i32, i32, i32 }
+
+define void @foo(%struct4bytes* byval %p0, ; --> R0
+ %struct20bytes* byval %p1 ; --> R1,R2,R3, [SP+0 .. SP+8)
+) {
+;CHECK: sub sp, sp, #16
+;CHECK: push {r11, lr}
+;CHECK: add r11, sp, #8
+;CHECK: stm r11, {r0, r1, r2, r3}
+;CHECK: add r0, sp, #12
+;CHECK: bl useInt
+;CHECK: pop {r11, lr}
+;CHECK: add sp, sp, #16
+
+ %1 = ptrtoint %struct20bytes* %p1 to i32
+ tail call void @useInt(i32 %1)
+ ret void
+}
+
+declare void @useInt(i32)
+
diff --git a/test/CodeGen/ARM/2013-05-13-DAGCombiner-undef-mask.ll b/test/CodeGen/ARM/2013-05-13-DAGCombiner-undef-mask.ll
new file mode 100644
index 000000000000..8f6709ec5e7b
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-13-DAGCombiner-undef-mask.ll
@@ -0,0 +1,10 @@
+; RUN: llc < %s
+target triple = "armv7-none-linux-gnueabi"
+
+define <3 x i64> @shuffle(i1 %dec1, i1 %dec0, <3 x i64> %b) {
+entry:
+ %.sink = select i1 %dec1, <3 x i64> %b, <3 x i64> zeroinitializer
+ %.sink15 = select i1 %dec0, <3 x i64> %b, <3 x i64> zeroinitializer
+ %vecinit7 = shufflevector <3 x i64> %.sink, <3 x i64> %.sink15, <3 x i32> <i32 0, i32 4, i32 undef>
+ ret <3 x i64> %vecinit7
+}
diff --git a/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll b/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll
new file mode 100644
index 000000000000..0130f7ab68f5
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -O0 -mtriple=armv4t--linux-eabi-android
+; RUN: llc < %s -O0 -mtriple=armv4t-unknown-linux
+; RUN: llc < %s -O0 -mtriple=armv5-unknown-linux
+
+; See http://llvm.org/bugs/show_bug.cgi?id=16178
+; ARMFastISel used to fail emitting sext/zext in pre-ARMv6.
+
+; Function Attrs: nounwind
+define arm_aapcscc void @f2(i8 signext %a) #0 {
+entry:
+ %a.addr = alloca i8, align 1
+ store i8 %a, i8* %a.addr, align 1
+ %0 = load i8* %a.addr, align 1
+ %conv = sext i8 %0 to i32
+ %shr = ashr i32 %conv, 56
+ %conv1 = trunc i32 %shr to i8
+ call arm_aapcscc void @f1(i8 signext %conv1)
+ ret void
+}
+
+declare arm_aapcscc void @f1(i8 signext) #1
diff --git a/test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll b/test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll
new file mode 100644
index 000000000000..1c1380070219
--- /dev/null
+++ b/test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -mcpu=cortex-a15 | FileCheck %s
+; ModuleID = 'attri_16.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv4t--linux-gnueabihf"
+
+%big_struct0 = type { [517 x i32] }
+%big_struct1 = type { [516 x i32] }
+
+;CHECK-LABEL: f:
+define void @f(%big_struct0* %p0, %big_struct1* %p1) {
+
+;CHECK: sub sp, sp, #8
+;CHECK: sub sp, sp, #2048
+;CHECK: bl callme0
+ call void @callme0(%big_struct0* byval %p0)
+
+;CHECK: add sp, sp, #8
+;CHECK: add sp, sp, #2048
+;CHECK: sub sp, sp, #2048
+;CHECK: bl callme1
+ call void @callme1(%big_struct1* byval %p1)
+
+;CHECK: add sp, sp, #2048
+
+ ret void
+}
+
+declare void @callme0(%big_struct0* byval)
+declare void @callme1(%big_struct1* byval)
+
diff --git a/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll b/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
new file mode 100644
index 000000000000..a438c1f4556a
--- /dev/null
+++ b/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7--linux-gnueabi"
+
+; CHECK-LABEL: function
+define void @function() {
+; CHECK: cmp r0, #0
+; CHECK: bxne lr
+; CHECK: vmov.i32 q8, #0xff0000
+entry:
+ br i1 undef, label %vector.body, label %for.end
+
+; CHECK: vld1.32 {d18, d19}, [r0]
+; CHECK: vand q10, q9, q8
+; CHECK: vbic.i16 q9, #0xff
+; CHECK: vorr q9, q9, q10
+; CHECK: vst1.32 {d18, d19}, [r0]
+vector.body:
+ %wide.load = load <4 x i32>* undef, align 4
+ %0 = and <4 x i32> %wide.load, <i32 -16711936, i32 -16711936, i32 -16711936, i32 -16711936>
+ %1 = sub <4 x i32> %wide.load, zeroinitializer
+ %2 = and <4 x i32> %1, <i32 16711680, i32 16711680, i32 16711680, i32 16711680>
+ %3 = or <4 x i32> undef, %0
+ %4 = or <4 x i32> %3, %2
+ store <4 x i32> %4, <4 x i32>* undef, align 4
+ br label %vector.body
+
+for.end:
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/2013-10-11-select-stalls.ll b/test/CodeGen/ARM/2013-10-11-select-stalls.ll
new file mode 100644
index 000000000000..33c0587226a8
--- /dev/null
+++ b/test/CodeGen/ARM/2013-10-11-select-stalls.ll
@@ -0,0 +1,16 @@
+; REQUIRES: asserts
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -stats 2>&1 | not grep "Number of pipeline stalls"
+; Evaluate the two vld1.8 instructions in separate MBB's,
+; instead of stalling on one and conditionally overwriting its result.
+
+define <16 x i8> @multiselect(i32 %avail, i8* %foo, i8* %bar) {
+entry:
+ %vld1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %foo, i32 1)
+ %vld2 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %bar, i32 1)
+ %and = and i32 %avail, 1
+ %tobool = icmp eq i32 %and, 0
+ %retv = select i1 %tobool, <16 x i8> %vld1, <16 x i8> %vld2
+ ret <16 x i8> %retv
+}
+
+declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* , i32 )
diff --git a/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll b/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll
new file mode 100644
index 000000000000..5a864772faef
--- /dev/null
+++ b/test/CodeGen/ARM/2013-11-08-inline-asm-neon-array.ll
@@ -0,0 +1,16 @@
+;RUN: not llc -mtriple=arm-linux-gnueabihf < %s 2>&1 | FileCheck %s
+
+; ModuleID = 'bug.c'
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv7--"
+
+%struct.uint8x8x4_t = type { [4 x <8 x i8>] }
+
+define void @foo() #0 {
+ %vsrc = alloca %struct.uint8x8x4_t, align 8
+ %ptr = alloca i8;
+ %1 = call i8* asm sideeffect "vld4.u8 ${0:h}, [$1], $2", "=*w,=r,r,1"(%struct.uint8x8x4_t* %vsrc, i32 0, i8* %ptr)
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint 'w'
diff --git a/test/CodeGen/ARM/a15-SD-dep.ll b/test/CodeGen/ARM/a15-SD-dep.ll
index a52468e5be9e..019ff6129b00 100644
--- a/test/CodeGen/ARM/a15-SD-dep.ll
+++ b/test/CodeGen/ARM/a15-SD-dep.ll
@@ -1,8 +1,8 @@
-; RUN: llc -O1 -mcpu=cortex-a15 -mtriple=armv7-linux-gnueabi -disable-a15-sd-optimization -verify-machineinstrs < %s | FileCheck -check-prefix=DISABLED %s
-; RUN: llc -O1 -mcpu=cortex-a15 -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck -check-prefix=ENABLED %s
+; RUN: llc -O1 -mcpu=cortex-a15 -mtriple=armv7-linux-gnueabi -disable-a15-sd-optimization -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-DISABLED %s
+; RUN: llc -O1 -mcpu=cortex-a15 -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-ENABLED %s
-; CHECK-ENABLED: t1:
-; CHECK-DISABLED: t1:
+; CHECK-ENABLED-LABEL: t1:
+; CHECK-DISABLED-LABEL: t1:
define <2 x float> @t1(float %f) {
; CHECK-ENABLED: vdup.32 d{{[0-9]*}}, d0[0]
; CHECK-DISABLED-NOT: vdup.32 d{{[0-9]*}}, d0[0]
@@ -11,8 +11,8 @@ define <2 x float> @t1(float %f) {
ret <2 x float> %i2
}
-; CHECK-ENABLED: t2:
-; CHECK-DISABLED: t2:
+; CHECK-ENABLED-LABEL: t2:
+; CHECK-DISABLED-LABEL: t2:
define <4 x float> @t2(float %g, float %f) {
; CHECK-ENABLED: vdup.32 q{{[0-9]*}}, d0[0]
; CHECK-DISABLED-NOT: vdup.32 d{{[0-9]*}}, d0[0]
@@ -21,8 +21,8 @@ define <4 x float> @t2(float %g, float %f) {
ret <4 x float> %i2
}
-; CHECK-ENABLED: t3:
-; CHECK-DISABLED: t3:
+; CHECK-ENABLED-LABEL: t3:
+; CHECK-DISABLED-LABEL: t3:
define arm_aapcs_vfpcc <2 x float> @t3(float %f) {
; CHECK-ENABLED: vdup.32 d{{[0-9]*}}, d0[0]
; CHECK-DISABLED-NOT: vdup.32 d{{[0-9]*}}, d0[0]
@@ -31,8 +31,8 @@ define arm_aapcs_vfpcc <2 x float> @t3(float %f) {
ret <2 x float> %i2
}
-; CHECK-ENABLED: t4:
-; CHECK-DISABLED: t4:
+; CHECK-ENABLED-LABEL: t4:
+; CHECK-DISABLED-LABEL: t4:
define <2 x float> @t4(float %f) {
; CHECK-ENABLED: vdup.32 d{{[0-9]*}}, d0[0]
; CHECK-DISABLED-NOT: vdup
@@ -45,8 +45,8 @@ b:
ret <2 x float> %i2
}
-; CHECK-ENABLED: t5:
-; CHECK-DISABLED: t5:
+; CHECK-ENABLED-LABEL: t5:
+; CHECK-DISABLED-LABEL: t5:
define arm_aapcs_vfpcc <4 x float> @t5(<4 x float> %q, float %f) {
; CHECK-ENABLED: vdup.32 d{{[0-9]*}}, d{{[0-9]*}}[0]
; CHECK-ENABLED: vadd.f32
diff --git a/test/CodeGen/ARM/a15-mla.ll b/test/CodeGen/ARM/a15-mla.ll
index 25f6de4762d5..b233cc27c4ba 100644
--- a/test/CodeGen/ARM/a15-mla.ll
+++ b/test/CodeGen/ARM/a15-mla.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -float-abi=hard -mcpu=cortex-a15 -mattr=+neon,+neonfp | FileCheck %s
; This test checks that the VMLxForwarting feature is disabled for A15.
-; CHECK: fun_a
+; CHECK: fun_a:
define <4 x i32> @fun_a(<4 x i32> %x, <4 x i32> %y) nounwind{
%1 = add <4 x i32> %x, %y
; CHECK-NOT: vmul
@@ -10,3 +10,27 @@ define <4 x i32> @fun_a(<4 x i32> %x, <4 x i32> %y) nounwind{
%3 = add <4 x i32> %y, %2
ret <4 x i32> %3
}
+
+; This tests checks that VMLA FP patterns can be matched in instruction selection when targeting
+; Cortex-A15.
+; CHECK: fun_b:
+define <4 x float> @fun_b(<4 x float> %x, <4 x float> %y, <4 x float> %z) nounwind{
+; CHECK: vmla.f32
+ %t = fmul <4 x float> %x, %y
+ %r = fadd <4 x float> %t, %z
+ ret <4 x float> %r
+}
+
+; This tests checks that FP VMLA instructions are not expanded into separate multiply/addition
+; operations when targeting Cortex-A15.
+; CHECK: fun_c:
+define <4 x float> @fun_c(<4 x float> %x, <4 x float> %y, <4 x float> %z, <4 x float> %u, <4 x float> %v) nounwind{
+; CHECK: vmla.f32
+ %t1 = fmul <4 x float> %x, %y
+ %r1 = fadd <4 x float> %t1, %z
+; CHECK: vmla.f32
+ %t2 = fmul <4 x float> %u, %v
+ %r2 = fadd <4 x float> %t2, %r1
+ ret <4 x float> %r2
+}
+
diff --git a/test/CodeGen/ARM/a15-partial-update.ll b/test/CodeGen/ARM/a15-partial-update.ll
index 6306790d15f0..5747253d56b7 100644
--- a/test/CodeGen/ARM/a15-partial-update.ll
+++ b/test/CodeGen/ARM/a15-partial-update.ll
@@ -1,6 +1,6 @@
; RUN: llc -O1 -mcpu=cortex-a15 -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s
-; CHECK: t1:
+; CHECK-LABEL: t1:
define <2 x float> @t1(float* %A, <2 x float> %B) {
; The generated code for this test uses a vld1.32 instruction
; to write the lane 1 of a D register containing the value of
@@ -15,7 +15,7 @@ define <2 x float> @t1(float* %A, <2 x float> %B) {
ret <2 x float> %tmp3
}
-; CHECK: t2:
+; CHECK-LABEL: t2:
define void @t2(<4 x i8> *%in, <4 x i8> *%out, i32 %n) {
entry:
br label %loop
diff --git a/test/CodeGen/ARM/addrspacecast.ll b/test/CodeGen/ARM/addrspacecast.ll
new file mode 100644
index 000000000000..2e98ba53c67a
--- /dev/null
+++ b/test/CodeGen/ARM/addrspacecast.ll
@@ -0,0 +1,7 @@
+; RUN: llc < %s -march=arm
+
+; Check that codegen for an addrspace cast succeeds without error.
+define <4 x i32 addrspace(1)*> @f (<4 x i32*> %x) {
+ %1 = addrspacecast <4 x i32*> %x to <4 x i32 addrspace(1)*>
+ ret <4 x i32 addrspace(1)*> %1
+}
diff --git a/test/CodeGen/ARM/aliases.ll b/test/CodeGen/ARM/aliases.ll
index d668334f8d6a..f55ae10b247d 100644
--- a/test/CodeGen/ARM/aliases.ll
+++ b/test/CodeGen/ARM/aliases.ll
@@ -1,15 +1,30 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -o %t
-; RUN: grep " = " %t | count 5
-; RUN: grep globl %t | count 4
-; RUN: grep weak %t | count 1
+; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
-@bar = external global i32
+; CHECK: .globl test
+
+; CHECK: .globl foo1
+; CHECK: foo1 = bar
+
+; CHECK: .globl foo2
+; CHECK: foo2 = bar
+
+; CHECK: .weak bar_f
+; CHECK: bar_f = foo_f
+
+; CHECK: bar_i = bar
+
+; CHECK: .globl A
+; CHECK: A = bar
+
+@bar = global i32 42
@foo1 = alias i32* @bar
@foo2 = alias i32* @bar
%FunTy = type i32()
-declare i32 @foo_f()
+define i32 @foo_f() {
+ ret i32 0
+}
@bar_f = alias weak %FunTy* @foo_f
@bar_i = alias internal i32* @bar
diff --git a/test/CodeGen/ARM/alloc-no-stack-realign.ll b/test/CodeGen/ARM/alloc-no-stack-realign.ll
index 273041dee34e..6e6311d4d34f 100644
--- a/test/CodeGen/ARM/alloc-no-stack-realign.ll
+++ b/test/CodeGen/ARM/alloc-no-stack-realign.ll
@@ -1,30 +1,14 @@
-; RUN: llc < %s -mtriple=armv7-apple-ios -O0 -realign-stack=0 | FileCheck %s -check-prefix=NO-REALIGN
-; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=NO-REALIGN
+; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=REALIGN
; rdar://12713765
; When realign-stack is set to false, make sure we are not creating stack
; objects that are assumed to be 64-byte aligned.
@T3_retval = common global <16 x float> zeroinitializer, align 16
-define void @test(<16 x float>* noalias sret %agg.result) nounwind ssp {
+define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
entry:
-; CHECK: test
-; CHECK: bic sp, sp, #63
-; CHECK: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
-; CHECK: vst1.64
-; CHECK: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
-; CHECK: vst1.64
-; CHECK: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
-; CHECK: vst1.64
-; CHECK: vst1.64
-; CHECK: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
-; CHECK: vst1.64
-; CHECK: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
-; CHECK: vst1.64
-; CHECK: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
-; CHECK: vst1.64
-; CHECK: vst1.64
-; NO-REALIGN: test
+; NO-REALIGN: test1
; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
; NO-REALIGN: vst1.64
; NO-REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
@@ -46,3 +30,29 @@ entry:
store <16 x float> %1, <16 x float>* %agg.result, align 16
ret void
}
+
+define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
+entry:
+; REALIGN: test2
+; REALIGN: bic sp, sp, #63
+; REALIGN: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
+; REALIGN: vst1.64
+; REALIGN: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
+; REALIGN: vst1.64
+; REALIGN: orr [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
+; REALIGN: vst1.64
+; REALIGN: vst1.64
+; REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #48
+; REALIGN: vst1.64
+; REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #32
+; REALIGN: vst1.64
+; REALIGN: add [[R2:r[0-9]+]], [[R1:r[0-9]+]], #16
+; REALIGN: vst1.64
+; REALIGN: vst1.64
+ %retval = alloca <16 x float>, align 16
+ %0 = load <16 x float>* @T3_retval, align 16
+ store <16 x float> %0, <16 x float>* %retval
+ %1 = load <16 x float>* %retval
+ store <16 x float> %1, <16 x float>* %agg.result, align 16
+ ret void
+}
diff --git a/test/CodeGen/ARM/arguments.ll b/test/CodeGen/ARM/arguments.ll
index a8b42e63b71f..e7fbf9f28eff 100644
--- a/test/CodeGen/ARM/arguments.ll
+++ b/test/CodeGen/ARM/arguments.ll
@@ -2,9 +2,9 @@
; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+vfp2 | FileCheck %s -check-prefix=DARWIN
define i32 @f1(i32 %a, i64 %b) {
-; ELF: f1:
+; ELF-LABEL: f1:
; ELF: mov r0, r2
-; DARWIN: f1:
+; DARWIN-LABEL: f1:
; DARWIN: mov r0, r1
%tmp = call i32 @g1(i64 %b)
ret i32 %tmp
@@ -12,10 +12,10 @@ define i32 @f1(i32 %a, i64 %b) {
; test that allocating the double to r2/r3 makes r1 unavailable on gnueabi.
define i32 @f2() nounwind optsize {
-; ELF: f2:
+; ELF-LABEL: f2:
; ELF: mov [[REGISTER:(r[0-9]+)]], #128
; ELF: str [[REGISTER]], [
-; DARWIN: f2:
+; DARWIN-LABEL: f2:
; DARWIN: mov r3, #128
entry:
%0 = tail call i32 (i32, ...)* @g2(i32 5, double 1.600000e+01, i32 128) nounwind optsize ; <i32> [#uses=1]
@@ -26,10 +26,10 @@ entry:
; test that on gnueabi a 64 bit value at this position will cause r3 to go
; unused and the value stored in [sp]
-; ELF: f3:
+; ELF-LABEL: f3:
; ELF: ldr r0, [sp]
; ELF-NEXT: mov pc, lr
-; DARWIN: f3:
+; DARWIN-LABEL: f3:
; DARWIN: mov r0, r3
; DARWIN-NEXT: mov pc, lr
define i32 @f3(i32 %i, i32 %j, i32 %k, i64 %l, ...) {
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 07620700aedb..88d797e83648 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -1,6 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck -check-prefix=ARM %s
; RUN: llc < %s -march=thumb | FileCheck -check-prefix=THUMB %s
; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck -check-prefix=T2 %s
+; RUN: llc < %s -mtriple=thumbv8 | FileCheck -check-prefix=V8 %s
; FIXME: The -march=thumb test doesn't change if -disable-peephole is specified.
@@ -39,6 +40,17 @@ tailrecurse: ; preds = %sw.bb, %entry
br i1 %tst, label %sw.bb, label %tailrecurse.switch
tailrecurse.switch: ; preds = %tailrecurse
+; V8-LABEL: %tailrecurse.switch
+; V8: cmp
+; V8-NEXT: beq
+; V8-NEXT: %tailrecurse.switch
+; V8: cmp
+; V8-NEXT: beq
+; V8-NEXT: %tailrecurse.switch
+; V8: cmp
+; V8-NEXT: beq
+; V8-NEXT: b
+; The trailing space in the last line checks that the branch is unconditional
switch i32 %and, label %sw.epilog [
i32 1, label %sw.bb
i32 3, label %sw.bb6
@@ -73,6 +85,7 @@ sw.epilog: ; preds = %tailrecurse.switch
; ARM: bar
; THUMB: bar
; T2: bar
+; V8-LABEL: bar:
define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
entry:
%0 = getelementptr inbounds %struct.S* %x, i32 0, i32 1, i32 0
@@ -81,22 +94,32 @@ entry:
; ARM: ands
; THUMB: ands
; T2: ands
+; V8: ands
+; V8-NEXT: beq
%3 = and i32 %2, 112
%4 = icmp eq i32 %3, 0
br i1 %4, label %return, label %bb
bb: ; preds = %entry
+; V8-NEXT: %bb
%5 = getelementptr inbounds %struct.S* %y, i32 0, i32 1, i32 0
%6 = load i8* %5, align 1
%7 = zext i8 %6 to i32
; ARM: andsne
; THUMB: ands
; T2: andsne
+; V8: ands
+; V8-NEXT: beq
%8 = and i32 %7, 112
%9 = icmp eq i32 %8, 0
br i1 %9, label %return, label %bb2
bb2: ; preds = %bb
+; V8-NEXT: %bb2
+; V8-NEXT: cmp
+; V8-NEXT: it ne
+; V8-NEXT: cmpne
+; V8-NEXT: bne
%10 = icmp eq i32 %3, 16
%11 = icmp eq i32 %8, 16
%or.cond = or i1 %10, %11
diff --git a/test/CodeGen/ARM/arm-frameaddr.ll b/test/CodeGen/ARM/arm-frameaddr.ll
index 2cf1422c66a9..9c4173ef0ce2 100644
--- a/test/CodeGen/ARM/arm-frameaddr.ll
+++ b/test/CodeGen/ARM/arm-frameaddr.ll
@@ -5,10 +5,10 @@
define i8* @t() nounwind {
entry:
-; DARWIN: t:
+; DARWIN-LABEL: t:
; DARWIN: mov r0, r7
-; LINUX: t:
+; LINUX-LABEL: t:
; LINUX: mov r0, r11
%0 = call i8* @llvm.frameaddress(i32 0)
ret i8* %0
diff --git a/test/CodeGen/ARM/arm-modifier.ll b/test/CodeGen/ARM/arm-modifier.ll
index c74701663459..854864277720 100644
--- a/test/CodeGen/ARM/arm-modifier.ll
+++ b/test/CodeGen/ARM/arm-modifier.ll
@@ -60,8 +60,14 @@ ret void
define i64 @f4(i64* %val) nounwind {
entry:
- ;CHECK: f4
+ ;CHECK-LABEL: f4:
;CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
%0 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [$1]", "=&r,r,*Qo"(i64* %val, i64* %val) nounwind
ret i64 %0
}
+
+; PR16490
+define void @f5(i64 %__pu_val) {
+ call void asm sideeffect "$1", "r,i"(i64 %__pu_val, i32 -14)
+ ret void
+}
diff --git a/test/CodeGen/ARM/arm-returnaddr.ll b/test/CodeGen/ARM/arm-returnaddr.ll
index 1272e8efc26b..4266572b077f 100644
--- a/test/CodeGen/ARM/arm-returnaddr.ll
+++ b/test/CodeGen/ARM/arm-returnaddr.ll
@@ -7,7 +7,7 @@
define i8* @rt0(i32 %x) nounwind readnone {
entry:
-; CHECK: rt0:
+; CHECK-LABEL: rt0:
; CHECK: {r7, lr}
; CHECK: mov r0, lr
%0 = tail call i8* @llvm.returnaddress(i32 0)
@@ -16,7 +16,7 @@ entry:
define i8* @rt2() nounwind readnone {
entry:
-; CHECK: rt2:
+; CHECK-LABEL: rt2:
; CHECK: {r7, lr}
; CHECK: ldr r[[R0:[0-9]+]], [r7]
; CHECK: ldr r0, [r0]
diff --git a/test/CodeGen/ARM/atomic-64bit.ll b/test/CodeGen/ARM/atomic-64bit.ll
index f2c7305ff33a..0477d4f40160 100644
--- a/test/CodeGen/ARM/atomic-64bit.ll
+++ b/test/CodeGen/ARM/atomic-64bit.ll
@@ -1,155 +1,155 @@
; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabihf | FileCheck %s --check-prefix=CHECK-THUMB
+; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabihf -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-THUMB
define i64 @test1(i64* %ptr, i64 %val) {
-; CHECK: test1:
-; CHECK: dmb ish
+; CHECK-LABEL: test1:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: adds [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: adc [[REG4:(r[0-9]?[13579])]], [[REG2]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test1:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test1:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: adds.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: adc.w [[REG4:[a-z0-9]+]], [[REG2]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw add i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test2(i64* %ptr, i64 %val) {
-; CHECK: test2:
-; CHECK: dmb ish
+; CHECK-LABEL: test2:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: subs [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: sbc [[REG4:(r[0-9]?[13579])]], [[REG2]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test2:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test2:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: subs.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: sbc.w [[REG4:[a-z0-9]+]], [[REG2]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw sub i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test3(i64* %ptr, i64 %val) {
-; CHECK: test3:
-; CHECK: dmb ish
+; CHECK-LABEL: test3:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: and [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: and [[REG4:(r[0-9]?[13579])]], [[REG2]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test3:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test3:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: and.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: and.w [[REG4:[a-z0-9]+]], [[REG2]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw and i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test4(i64* %ptr, i64 %val) {
-; CHECK: test4:
-; CHECK: dmb ish
+; CHECK-LABEL: test4:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: orr [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: orr [[REG4:(r[0-9]?[13579])]], [[REG2]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test4:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test4:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: orr.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: orr.w [[REG4:[a-z0-9]+]], [[REG2]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw or i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test5(i64* %ptr, i64 %val) {
-; CHECK: test5:
-; CHECK: dmb ish
+; CHECK-LABEL: test5:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: eor [[REG3:(r[0-9]?[02468])]], [[REG1]]
; CHECK: eor [[REG4:(r[0-9]?[13579])]], [[REG2]]
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test5:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test5:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: eor.w [[REG3:[a-z0-9]+]], [[REG1]]
; CHECK-THUMB: eor.w [[REG4:[a-z0-9]+]], [[REG2]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw xor i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test6(i64* %ptr, i64 %val) {
-; CHECK: test6:
-; CHECK: dmb ish
+; CHECK-LABEL: test6:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: strexd {{[a-z0-9]+}}, {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test6:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test6:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, {{[a-z0-9]+}}, {{[a-z0-9]+}}
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw xchg i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
-; CHECK: test7:
-; CHECK: dmb ish
+; CHECK-LABEL: test7:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: cmp [[REG1]]
; CHECK: cmpeq [[REG2]]
@@ -157,10 +157,10 @@ define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK: strexd {{[a-z0-9]+}}, {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test7:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test7:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: cmp [[REG1]]
; CHECK-THUMB: it eq
@@ -169,35 +169,21 @@ define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
; CHECK-THUMB: strexd {{[a-z0-9]+}}, {{[a-z0-9]+}}, {{[a-z0-9]+}}
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
ret i64 %r
}
-; Compiles down to cmpxchg
-; FIXME: Should compile to a single ldrexd
+; Compiles down to a single ldrexd
define i64 @test8(i64* %ptr) {
-; CHECK: test8:
+; CHECK-LABEL: test8:
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
-; CHECK: cmp [[REG1]]
-; CHECK: cmpeq [[REG2]]
-; CHECK: bne
-; CHECK: strexd {{[a-z0-9]+}}, {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
-; CHECK: cmp
-; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test8:
+; CHECK-THUMB-LABEL: test8:
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
-; CHECK-THUMB: cmp [[REG1]]
-; CHECK-THUMB: it eq
-; CHECK-THUMB: cmpeq [[REG2]]
-; CHECK-THUMB: bne
-; CHECK-THUMB: strexd {{[a-z0-9]+}}, {{[a-z0-9]+}}, {{[a-z0-9]+}}
-; CHECK-THUMB: cmp
-; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = load atomic i64* %ptr seq_cst, align 8
ret i64 %r
@@ -206,29 +192,29 @@ define i64 @test8(i64* %ptr) {
; Compiles down to atomicrmw xchg; there really isn't any more efficient
; way to write it.
define void @test9(i64* %ptr, i64 %val) {
-; CHECK: test9:
-; CHECK: dmb ish
+; CHECK-LABEL: test9:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: strexd {{[a-z0-9]+}}, {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test9:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test9:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: strexd {{[a-z0-9]+}}, {{[a-z0-9]+}}, {{[a-z0-9]+}}
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
store atomic i64 %val, i64* %ptr seq_cst, align 8
ret void
}
define i64 @test10(i64* %ptr, i64 %val) {
-; CHECK: test10:
-; CHECK: dmb ish
+; CHECK-LABEL: test10:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
@@ -236,10 +222,10 @@ define i64 @test10(i64* %ptr, i64 %val) {
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test10:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test10:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
@@ -247,15 +233,15 @@ define i64 @test10(i64* %ptr, i64 %val) {
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw min i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test11(i64* %ptr, i64 %val) {
-; CHECK: test11:
-; CHECK: dmb ish
+; CHECK-LABEL: test11:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
@@ -263,11 +249,11 @@ define i64 @test11(i64* %ptr, i64 %val) {
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test11:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test11:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
@@ -275,15 +261,15 @@ define i64 @test11(i64* %ptr, i64 %val) {
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw umin i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test12(i64* %ptr, i64 %val) {
-; CHECK: test12:
-; CHECK: dmb ish
+; CHECK-LABEL: test12:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
@@ -291,10 +277,10 @@ define i64 @test12(i64* %ptr, i64 %val) {
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test12:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test12:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
@@ -302,15 +288,15 @@ define i64 @test12(i64* %ptr, i64 %val) {
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw max i64* %ptr, i64 %val seq_cst
ret i64 %r
}
define i64 @test13(i64* %ptr, i64 %val) {
-; CHECK: test13:
-; CHECK: dmb ish
+; CHECK-LABEL: test13:
+; CHECK: dmb {{ish$}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], [[REG2:(r[0-9]?[13579])]]
; CHECK: subs {{[a-z0-9]+}}, [[REG1]], [[REG3:(r[0-9]?[02468])]]
; CHECK: sbcs {{[a-z0-9]+}}, [[REG2]], [[REG4:(r[0-9]?[13579])]]
@@ -318,10 +304,10 @@ define i64 @test13(i64* %ptr, i64 %val) {
; CHECK: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK: cmp
; CHECK: bne
-; CHECK: dmb ish
+; CHECK: dmb {{ish$}}
-; CHECK-THUMB: test13:
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB-LABEL: test13:
+; CHECK-THUMB: dmb {{ish$}}
; CHECK-THUMB: ldrexd [[REG1:[a-z0-9]+]], [[REG2:[a-z0-9]+]]
; CHECK-THUMB: subs.w {{[a-z0-9]+}}, [[REG1]], [[REG3:[a-z0-9]+]]
; CHECK-THUMB: sbcs.w {{[a-z0-9]+}}, [[REG2]], [[REG4:[a-z0-9]+]]
@@ -329,7 +315,7 @@ define i64 @test13(i64* %ptr, i64 %val) {
; CHECK-THUMB: strexd {{[a-z0-9]+}}, [[REG3]], [[REG4]]
; CHECK-THUMB: cmp
; CHECK-THUMB: bne
-; CHECK-THUMB: dmb ish
+; CHECK-THUMB: dmb {{ish$}}
%r = atomicrmw umax i64* %ptr, i64 %val seq_cst
ret i64 %r
}
diff --git a/test/CodeGen/ARM/atomic-cmp.ll b/test/CodeGen/ARM/atomic-cmp.ll
index 82726daebca3..51ada693d0b8 100644
--- a/test/CodeGen/ARM/atomic-cmp.ll
+++ b/test/CodeGen/ARM/atomic-cmp.ll
@@ -3,11 +3,11 @@
; rdar://8964854
define i8 @t(i8* %a, i8 %b, i8 %c) nounwind {
-; ARM: t:
+; ARM-LABEL: t:
; ARM: ldrexb
; ARM: strexb
-; T2: t:
+; T2-LABEL: t:
; T2: ldrexb
; T2: strexb
%tmp0 = cmpxchg i8* %a, i8 %b, i8 %c monotonic
diff --git a/test/CodeGen/ARM/atomic-load-store.ll b/test/CodeGen/ARM/atomic-load-store.ll
index 12a8fe4cd884..53c7184d2a84 100644
--- a/test/CodeGen/ARM/atomic-load-store.ll
+++ b/test/CodeGen/ARM/atomic-load-store.ll
@@ -1,19 +1,20 @@
; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s -check-prefix=ARM
; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s -check-prefix=THUMBTWO
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s -check-prefix=THUMBTWO
; RUN: llc < %s -mtriple=thumbv6-apple-ios | FileCheck %s -check-prefix=THUMBONE
+; RUN llc < %s -mtriple=armv4-apple-ios | FileCheck %s -check-prefix=ARMV4
define void @test1(i32* %ptr, i32 %val1) {
; ARM: test1
-; ARM: dmb ish
+; ARM: dmb {{ish$}}
; ARM-NEXT: str
-; ARM-NEXT: dmb ish
+; ARM-NEXT: dmb {{ish$}}
; THUMBONE: test1
; THUMBONE: __sync_lock_test_and_set_4
; THUMBTWO: test1
-; THUMBTWO: dmb ish
+; THUMBTWO: dmb {{ish$}}
; THUMBTWO-NEXT: str
-; THUMBTWO-NEXT: dmb ish
+; THUMBTWO-NEXT: dmb {{ish$}}
store atomic i32 %val1, i32* %ptr seq_cst, align 4
ret void
}
@@ -21,12 +22,12 @@ define void @test1(i32* %ptr, i32 %val1) {
define i32 @test2(i32* %ptr) {
; ARM: test2
; ARM: ldr
-; ARM-NEXT: dmb ish
+; ARM-NEXT: dmb {{ish$}}
; THUMBONE: test2
; THUMBONE: __sync_val_compare_and_swap_4
; THUMBTWO: test2
; THUMBTWO: ldr
-; THUMBTWO-NEXT: dmb ish
+; THUMBTWO-NEXT: dmb {{ish$}}
%val = load atomic i32* %ptr seq_cst, align 4
ret i32 %val
}
@@ -54,3 +55,17 @@ define void @test4(i8* %ptr1, i8* %ptr2) {
store atomic i8 %val, i8* %ptr2 seq_cst, align 1
ret void
}
+
+define i64 @test_old_load_64bit(i64* %p) {
+; ARMV4: test_old_load_64bit
+; ARMV4: ___sync_val_compare_and_swap_8
+ %1 = load atomic i64* %p seq_cst, align 8
+ ret i64 %1
+}
+
+define void @test_old_store_64bit(i64* %p, i64 %v) {
+; ARMV4: test_old_store_64bit
+; ARMV4: ___sync_lock_test_and_set_8
+ store atomic i64 %v, i64* %p seq_cst, align 8
+ ret void
+}
diff --git a/test/CodeGen/ARM/atomic-op.ll b/test/CodeGen/ARM/atomic-op.ll
index 6e6b36377fde..9a79c9fd7b1b 100644
--- a/test/CodeGen/ARM/atomic-op.ll
+++ b/test/CodeGen/ARM/atomic-op.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-T1
+; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-T1
define void @func(i32 %argc, i8** %argv) nounwind {
entry:
@@ -24,78 +26,93 @@ entry:
; CHECK: ldrex
; CHECK: add
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_add_4
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
store i32 %0, i32* %old
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_sub_4
%1 = atomicrmw sub i32* %val2, i32 30 monotonic
store i32 %1, i32* %old
; CHECK: ldrex
; CHECK: add
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_add_4
%2 = atomicrmw add i32* %val2, i32 1 monotonic
store i32 %2, i32* %old
; CHECK: ldrex
; CHECK: sub
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_sub_4
%3 = atomicrmw sub i32* %val2, i32 1 monotonic
store i32 %3, i32* %old
; CHECK: ldrex
; CHECK: and
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_and_4
%4 = atomicrmw and i32* %andt, i32 4080 monotonic
store i32 %4, i32* %old
; CHECK: ldrex
; CHECK: or
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_or_4
%5 = atomicrmw or i32* %ort, i32 4080 monotonic
store i32 %5, i32* %old
; CHECK: ldrex
; CHECK: eor
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_xor_4
%6 = atomicrmw xor i32* %xort, i32 4080 monotonic
store i32 %6, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_min_4
%7 = atomicrmw min i32* %val2, i32 16 monotonic
store i32 %7, i32* %old
%neg = sub i32 0, 1
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_min_4
%8 = atomicrmw min i32* %val2, i32 %neg monotonic
store i32 %8, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_max_4
%9 = atomicrmw max i32* %val2, i32 1 monotonic
store i32 %9, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_max_4
%10 = atomicrmw max i32* %val2, i32 0 monotonic
store i32 %10, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umin_4
%11 = atomicrmw umin i32* %val2, i32 16 monotonic
store i32 %11, i32* %old
%uneg = sub i32 0, 1
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umin_4
%12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
store i32 %12, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umax_4
%13 = atomicrmw umax i32* %val2, i32 1 monotonic
store i32 %13, i32* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umax_4
%14 = atomicrmw umax i32* %val2, i32 0 monotonic
store i32 %14, i32* %old
@@ -110,22 +127,26 @@ entry:
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umin_2
%0 = atomicrmw umin i16* %val, i16 16 monotonic
store i16 %0, i16* %old
%uneg = sub i16 0, 1
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umin_2
%1 = atomicrmw umin i16* %val, i16 %uneg monotonic
store i16 %1, i16* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umax_2
%2 = atomicrmw umax i16* %val, i16 1 monotonic
store i16 %2, i16* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umax_2
%3 = atomicrmw umax i16* %val, i16 0 monotonic
store i16 %3, i16* %old
ret void
@@ -139,22 +160,26 @@ entry:
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umin_1
%0 = atomicrmw umin i8* %val, i8 16 monotonic
store i8 %0, i8* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umin_1
%uneg = sub i8 0, 1
%1 = atomicrmw umin i8* %val, i8 %uneg monotonic
store i8 %1, i8* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umax_1
%2 = atomicrmw umax i8* %val, i8 1 monotonic
store i8 %2, i8* %old
; CHECK: ldrex
; CHECK: cmp
; CHECK: strex
+ ; CHECK-T1: blx ___sync_fetch_and_umax_1
%3 = atomicrmw umax i8* %val, i8 0 monotonic
store i8 %3, i8* %old
ret void
diff --git a/test/CodeGen/ARM/atomic-ops-v8.ll b/test/CodeGen/ARM/atomic-ops-v8.ll
new file mode 100644
index 000000000000..3f93929fd19d
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-ops-v8.ll
@@ -0,0 +1,1344 @@
+; RUN: llc -mtriple=armv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i8:
+ %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i16:
+ %old = atomicrmw add i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i32:
+ %old = atomicrmw add i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i64:
+ %old = atomicrmw add i64* @var64, i64 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: adds [[NEW1:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: adc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i8:
+ %old = atomicrmw sub i8* @var8, i8 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i16:
+ %old = atomicrmw sub i16* @var16, i16 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i32:
+ %old = atomicrmw sub i32* @var32, i32 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_sub_i64:
+ %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: subs [[NEW1:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: sbc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i8:
+ %old = atomicrmw and i8* @var8, i8 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i16:
+ %old = atomicrmw and i16* @var16, i16 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i32:
+ %old = atomicrmw and i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_and_i64:
+ %old = atomicrmw and i64* @var64, i64 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: and{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: and{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_or_i8:
+ %old = atomicrmw or i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_or_i16:
+ %old = atomicrmw or i16* @var16, i16 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_or_i32:
+ %old = atomicrmw or i32* @var32, i32 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_or_i64:
+ %old = atomicrmw or i64* @var64, i64 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: orr{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: orr{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xor_i8:
+ %old = atomicrmw xor i8* @var8, i8 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xor_i16:
+ %old = atomicrmw xor i16* @var16, i16 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xor_i32:
+ %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xor_i64:
+ %old = atomicrmw xor i64* @var64, i64 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: eor{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: eor{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
+; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xchg_i8:
+ %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xchg_i16:
+ %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xchg_i32:
+ %old = atomicrmw xchg i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_xchg_i64:
+ %old = atomicrmw xchg i64* @var64, i64 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_min_i8:
+ %old = atomicrmw min i8* @var8, i8 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLDX]], r0
+; Thumb mode: it ge
+; CHECK: movge r[[OLDX]], r0
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_min_i16:
+ %old = atomicrmw min i16* @var16, i16 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLDX]], r0
+; Thumb mode: it ge
+; CHECK: movge r[[OLDX]], r0
+; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_min_i32:
+ %old = atomicrmw min i32* @var32, i32 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it lt
+; CHECK: movlt r[[NEW]], r[[OLD]]
+; CHECK-NEXT: strex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_min_i64:
+ %old = atomicrmw min i64* @var64, i64 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
+; CHECK-NEXT: blt .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_max_i8:
+ %old = atomicrmw max i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLDX]], r0
+; Thumb mode: it le
+; CHECK: movle r[[OLDX]], r0
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_max_i16:
+ %old = atomicrmw max i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLDX]], r0
+; Thumb mode: it le
+; CHECK: movle r[[OLDX]], r0
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_max_i32:
+ %old = atomicrmw max i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it gt
+; CHECK: movgt r[[NEW]], r[[OLD]]
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_max_i64:
+ %old = atomicrmw max i64* @var64, i64 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
+; CHECK-NEXT: bge .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umin_i8:
+ %old = atomicrmw umin i8* @var8, i8 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it lo
+; CHECK: movlo r[[NEW]], r[[OLD]]
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umin_i16:
+ %old = atomicrmw umin i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it lo
+; CHECK: movlo r[[NEW]], r[[OLD]]
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umin_i32:
+ %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it lo
+; CHECK: movlo r[[NEW]], r[[OLD]]
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umin_i64:
+ %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
+; CHECK-NEXT: blo .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umax_i8:
+ %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it hi
+; CHECK: movhi r[[NEW]], r[[OLD]]
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umax_i16:
+ %old = atomicrmw umax i16* @var16, i16 %offset monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it hi
+; CHECK: movhi r[[NEW]], r[[OLD]]
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umax_i32:
+ %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
+; CHECK-NEXT: cmp r[[OLD]], r0
+; Thumb mode: it hi
+; CHECK: movhi r[[NEW]], r[[OLD]]
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_umax_i64:
+ %old = atomicrmw umax i64* @var64, i64 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
+; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
+; CHECK-NEXT: bhs .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD1]]
+; CHECK-NEXT: mov r1, r[[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
+; CHECK-LABEL: test_atomic_cmpxchg_i8:
+ %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLD]], r0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+ ; As above, r1 is a reasonable guess.
+; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
+; CHECK-LABEL: test_atomic_cmpxchg_i16:
+ %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLD]], r0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+ ; As above, r1 is a reasonable guess.
+; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
+; CHECK-LABEL: test_atomic_cmpxchg_i32:
+ %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp r[[OLD]], r0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+ ; As above, r1 is a reasonable guess.
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
+; CHECK-LABEL: test_atomic_cmpxchg_i64:
+ %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK-NEXT: ldrexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: cmp [[OLD1]], r0
+; Thumb mode: it eq
+; CHECK: cmpeq [[OLD2]], r1
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
+; CHECK-NEXT: BB#2:
+ ; As above, r2, r3 is a reasonable guess.
+; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, [[OLD1]]
+; CHECK-NEXT: mov r1, [[OLD2]]
+ ret i64 %old
+}
+
+define i8 @test_atomic_load_monotonic_i8() nounwind {
+; CHECK-LABEL: test_atomic_load_monotonic_i8:
+ %val = load atomic i8* @var8 monotonic, align 1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK: ldrb r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+ ret i8 %val
+}
+
+define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
+; CHECK-LABEL: test_atomic_load_monotonic_regoff_i8:
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i8*
+
+ %val = load atomic i8* %addr monotonic, align 1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldrb r0, [r0, r2]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+ ret i8 %val
+}
+
+define i8 @test_atomic_load_acquire_i8() nounwind {
+; CHECK-LABEL: test_atomic_load_acquire_i8:
+ %val = load atomic i8* @var8 acquire, align 1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldab r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+ ret i8 %val
+}
+
+define i8 @test_atomic_load_seq_cst_i8() nounwind {
+; CHECK-LABEL: test_atomic_load_seq_cst_i8:
+ %val = load atomic i8* @var8 seq_cst, align 1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldab r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+ ret i8 %val
+}
+
+define i16 @test_atomic_load_monotonic_i16() nounwind {
+; CHECK-LABEL: test_atomic_load_monotonic_i16:
+ %val = load atomic i16* @var16 monotonic, align 2
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var16
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldrh r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+ ret i16 %val
+}
+
+define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
+; CHECK-LABEL: test_atomic_load_monotonic_regoff_i32:
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i32*
+
+ %val = load atomic i32* %addr monotonic, align 4
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldr r0, [r0, r2]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+ ret i32 %val
+}
+
+define i64 @test_atomic_load_seq_cst_i64() nounwind {
+; CHECK-LABEL: test_atomic_load_seq_cst_i64:
+ %val = load atomic i64* @var64 seq_cst, align 8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var64
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldaexd r0, r1, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+ ret i64 %val
+}
+
+define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_monotonic_i8:
+ store atomic i8 %val, i8* @var8 monotonic, align 1
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK: strb r0, [r[[ADDR]]]
+
+ ret void
+}
+
+define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_monotonic_regoff_i8:
+
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i8*
+
+ store atomic i8 %val, i8* %addr monotonic, align 1
+; CHECK: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp]
+; CHECK: strb [[VAL]], [r0, r2]
+
+ ret void
+}
+
+define void @test_atomic_store_release_i8(i8 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_release_i8:
+ store atomic i8 %val, i8* @var8 release, align 1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: stlb r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+ ret void
+}
+
+define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_seq_cst_i8:
+ store atomic i8 %val, i8* @var8 seq_cst, align 1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: stlb r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+ ret void
+}
+
+define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_monotonic_i16:
+ store atomic i16 %val, i16* @var16 monotonic, align 2
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movt r[[ADDR]], :upper16:var16
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: strh r0, [r[[ADDR]]]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+ ret void
+}
+
+define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_monotonic_regoff_i32:
+
+ %addr_int = add i64 %base, %off
+ %addr = inttoptr i64 %addr_int to i32*
+
+ store atomic i32 %val, i32* %addr monotonic, align 4
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: ldr [[VAL:r[0-9]+]], [sp]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: str [[VAL]], [r0, r2]
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+ ret void
+}
+
+define void @test_atomic_store_release_i64(i64 %val) nounwind {
+; CHECK-LABEL: test_atomic_store_release_i64:
+ store atomic i64 %val, i64* @var64 release, align 8
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
+; CHECK: movt r[[ADDR]], :upper16:var64
+
+; CHECK: .LBB{{[0-9]+}}_1:
+ ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+ ret void
+}
+
+define i32 @not.barriers(i32* %var, i1 %cond) {
+; CHECK-LABEL: not.barriers:
+ br i1 %cond, label %atomic_ver, label %simple_ver
+simple_ver:
+ %oldval = load i32* %var
+ %newval = add nsw i32 %oldval, -1
+ store i32 %newval, i32* %var
+ br label %somewhere
+atomic_ver:
+ fence seq_cst
+ %val = atomicrmw add i32* %var, i32 -1 monotonic
+ fence seq_cst
+ br label %somewhere
+; CHECK: dmb
+; CHECK: ldrex
+; CHECK: dmb
+ ; The key point here is that the second dmb isn't immediately followed by the
+ ; simple_ver basic block, which LLVM attempted to do when DMB had been marked
+ ; with isBarrier. For now, look for something that looks like "somewhere".
+; CHECK-NEXT: mov
+somewhere:
+ %combined = phi i32 [ %val, %atomic_ver ], [ %newval, %simple_ver]
+ ret i32 %combined
+}
diff --git a/test/CodeGen/ARM/atomicrmw_minmax.ll b/test/CodeGen/ARM/atomicrmw_minmax.ll
index 69f1384e125c..5befc228e03c 100644
--- a/test/CodeGen/ARM/atomicrmw_minmax.ll
+++ b/test/CodeGen/ARM/atomicrmw_minmax.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=arm -mcpu=cortex-a9 < %s | FileCheck %s
-; CHECK: max:
+; CHECK-LABEL: max:
define i32 @max(i8 %ctx, i32* %ptr, i32 %val)
{
; CHECK: ldrex
@@ -10,7 +10,7 @@ define i32 @max(i8 %ctx, i32* %ptr, i32 %val)
ret i32 %old
}
-; CHECK: min:
+; CHECK-LABEL: min:
define i32 @min(i8 %ctx, i32* %ptr, i32 %val)
{
; CHECK: ldrex
diff --git a/test/CodeGen/ARM/avoid-cpsr-rmw.ll b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
index c14f5302d311..0217a4a8fb83 100644
--- a/test/CodeGen/ARM/avoid-cpsr-rmw.ll
+++ b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
@@ -6,7 +6,7 @@
define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind readnone {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: muls [[REG:(r[0-9]+)]], r3, r2
; CHECK-NEXT: mul [[REG2:(r[0-9]+)]], r1, r0
; CHECK-NEXT: muls r0, [[REG]], [[REG2]]
@@ -20,7 +20,7 @@ define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind readnone {
; rdar://10357570
define void @t2(i32* nocapture %ptr1, i32* %ptr2, i32 %c) nounwind {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
%tobool7 = icmp eq i32* %ptr2, null
br i1 %tobool7, label %while.end, label %while.body
@@ -54,7 +54,7 @@ while.end:
; rdar://12878928
define void @t3(i32* nocapture %ptr1, i32* %ptr2, i32 %c) nounwind minsize {
entry:
-; CHECK: t3:
+; CHECK-LABEL: t3:
%tobool7 = icmp eq i32* %ptr2, null
br i1 %tobool7, label %while.end, label %while.body
diff --git a/test/CodeGen/ARM/bfc.ll b/test/CodeGen/ARM/bfc.ll
index c4a44b4472d1..3a17d2b8cf99 100644
--- a/test/CodeGen/ARM/bfc.ll
+++ b/test/CodeGen/ARM/bfc.ll
@@ -2,7 +2,7 @@
; 4278190095 = 0xff00000f
define i32 @f1(i32 %a) {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: bfc
%tmp = and i32 %a, 4278190095
ret i32 %tmp
@@ -10,7 +10,7 @@ define i32 @f1(i32 %a) {
; 4286578688 = 0xff800000
define i32 @f2(i32 %a) {
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: bfc
%tmp = and i32 %a, 4286578688
ret i32 %tmp
@@ -18,7 +18,7 @@ define i32 @f2(i32 %a) {
; 4095 = 0x00000fff
define i32 @f3(i32 %a) {
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: bfc
%tmp = and i32 %a, 4095
ret i32 %tmp
diff --git a/test/CodeGen/ARM/bfi.ll b/test/CodeGen/ARM/bfi.ll
index 84f3813975a9..72a467809978 100644
--- a/test/CodeGen/ARM/bfi.ll
+++ b/test/CodeGen/ARM/bfi.ll
@@ -52,7 +52,7 @@ define i32 @f4(i32 %a) nounwind {
; rdar://8458663
define i32 @f5(i32 %a, i32 %b) nounwind {
entry:
-; CHECK: f5:
+; CHECK-LABEL: f5:
; CHECK-NOT: bfc
; CHECK: bfi r0, r1, #20, #4
%0 = and i32 %a, -15728641
@@ -65,7 +65,7 @@ entry:
; rdar://9609030
define i32 @f6(i32 %a, i32 %b) nounwind readnone {
entry:
-; CHECK: f6:
+; CHECK-LABEL: f6:
; CHECK-NOT: bic
; CHECK: bfi r0, r1, #8, #9
%and = and i32 %a, -130817
diff --git a/test/CodeGen/ARM/bswap-inline-asm.ll b/test/CodeGen/ARM/bswap-inline-asm.ll
index 472213d5f85f..31f9d729cf6e 100644
--- a/test/CodeGen/ARM/bswap-inline-asm.ll
+++ b/test/CodeGen/ARM/bswap-inline-asm.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6 | FileCheck %s
define i32 @t1(i32 %x) nounwind {
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK-NOT: InlineAsm
; CHECK: rev
%asmtmp = tail call i32 asm "rev $0, $1\0A", "=l,l"(i32 %x) nounwind
diff --git a/test/CodeGen/ARM/build-attributes-encoding.s b/test/CodeGen/ARM/build-attributes-encoding.s
new file mode 100644
index 000000000000..5ad51b284113
--- /dev/null
+++ b/test/CodeGen/ARM/build-attributes-encoding.s
@@ -0,0 +1,85 @@
+// This tests that ARM attributes are properly encoded.
+
+// RUN: llvm-mc < %s -triple=arm-linux-gnueabi -filetype=obj -o - \
+// RUN: | llvm-readobj -s -sd | FileCheck %s
+
+// Tag_CPU_name (=5)
+.cpu Cortex-A8
+
+// Tag_CPU_arch (=6)
+.eabi_attribute 6, 10
+
+// Tag_arch_profile (=7)
+.eabi_attribute 7, 'A'
+
+// Tag_ARM_ISA_use (=8)
+.eabi_attribute 8, 1
+
+// Tag_THUMB_ISA_use (=9)
+.eabi_attribute 9, 2
+
+// Tag_FP_arch (=10)
+.fpu vfpv3
+
+// Tag_Advanced_SIMD_arch (=12)
+.eabi_attribute 12, 2
+
+// Tag_ABI_FP_denormal (=20)
+.eabi_attribute 20, 1
+
+// Tag_ABI_FP_exceptions (=21)
+.eabi_attribute 21, 1
+
+// Tag_ABI_FP_number_model (=23)
+.eabi_attribute 23, 1
+
+// Tag_ABI_align_needed (=24)
+.eabi_attribute 24, 1
+
+// Tag_ABI_align_preserved (=25)
+.eabi_attribute 25, 1
+
+// Tag_ABI_HardFP_use (=27)
+.eabi_attribute 27, 0
+
+// Tag_ABI_VFP_args (=28)
+.eabi_attribute 28, 1
+
+// Tag_FP_HP_extension (=36)
+.eabi_attribute 36, 1
+
+// Tag_MPextension_use (=42)
+.eabi_attribute 42, 1
+
+// Tag_DIV_use (=44)
+.eabi_attribute 44, 2
+
+// Tag_Virtualization_use (=68)
+.eabi_attribute 68, 3
+
+// Check that values > 128 are encoded properly
+.eabi_attribute 110, 160
+
+// Check that tags > 128 are encoded properly
+.eabi_attribute 129, 1
+.eabi_attribute 250, 1
+
+// CHECK: Section {
+// CHECK: Name: .ARM.attributes
+// CHECK-NEXT: Type: SHT_ARM_ATTRIBUTES
+// CHECK-NEXT: Flags [ (0x0)
+// CHECK-NEXT: ]
+// CHECK-NEXT: Address: 0x0
+// CHECK-NEXT: Offset: 0x34
+// CHECK-NEXT: Size: 70
+// CHECK-NEXT: Link: 0
+// CHECK-NEXT: Info: 0
+// CHECK-NEXT: AddressAlignment: 1
+// CHECK-NEXT: EntrySize: 0
+// CHECK-NEXT: SectionData (
+// CHECK-NEXT: 0000: 41450000 00616561 62690001 3B000000
+// CHECK-NEXT: 0010: 05434F52 5445582D 41380006 0A074108
+// CHECK-NEXT: 0020: 0109020A 030C0214 01150117 01180119
+// CHECK-NEXT: 0030: 011B001C 0124012A 012C0244 036EA001
+// CHECK-NEXT: 0040: 810101FA 0101
+// CHECK-NEXT: )
diff --git a/test/CodeGen/ARM/byval_load_align.ll b/test/CodeGen/ARM/byval_load_align.ll
new file mode 100644
index 000000000000..2c0910c71d2f
--- /dev/null
+++ b/test/CodeGen/ARM/byval_load_align.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -mtriple thumbv7-apple-ios -O1 | FileCheck %s
+
+; rdar://15144402
+; Make sure we don't assume 4-byte alignment when loading from a byval argument
+; with alignment of 2.
+; CHECK: ldr r1, [r[[REG:[0-9]+]]]
+; CHECK: ldr r2, [r[[REG]], #4]
+; CHECK: ldr r3, [r[[REG]], #8]
+; CHECK-NOT: ldm
+; CHECK: .align 1 @ @sID
+
+%struct.ModuleID = type { [32 x i8], [32 x i8], i16 }
+
+@sID = internal constant %struct.ModuleID { [32 x i8] c"TEST\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", [32 x i8] c"1.0\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", i16 23 }, align 2
+
+; Function Attrs: nounwind ssp
+define void @Client() #0 {
+entry:
+ tail call void @Logger(i8 signext 97, %struct.ModuleID* byval @sID) #2
+ ret void
+}
+
+declare void @Logger(i8 signext, %struct.ModuleID* byval) #1
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/ARM/call-noret-minsize.ll b/test/CodeGen/ARM/call-noret-minsize.ll
index df3c19eca6a0..e610d29d77fc 100644
--- a/test/CodeGen/ARM/call-noret-minsize.ll
+++ b/test/CodeGen/ARM/call-noret-minsize.ll
@@ -4,10 +4,10 @@
define void @t1() noreturn minsize nounwind ssp {
entry:
-; ARM: t1:
+; ARM-LABEL: t1:
; ARM: bl _bar
-; SWIFT: t1:
+; SWIFT-LABEL: t1:
; SWIFT: bl _bar
tail call void @bar() noreturn nounwind
unreachable
@@ -15,10 +15,10 @@ entry:
define void @t2() noreturn minsize nounwind ssp {
entry:
-; ARM: t2:
+; ARM-LABEL: t2:
; ARM: bl _t1
-; SWIFT: t2:
+; SWIFT-LABEL: t2:
; SWIFT: bl _t1
tail call void @t1() noreturn nounwind
unreachable
diff --git a/test/CodeGen/ARM/call-noret.ll b/test/CodeGen/ARM/call-noret.ll
index 27062dca38dc..bb56e8b86336 100644
--- a/test/CodeGen/ARM/call-noret.ll
+++ b/test/CodeGen/ARM/call-noret.ll
@@ -4,11 +4,11 @@
define void @t1() noreturn nounwind ssp {
entry:
-; ARM: t1:
+; ARM-LABEL: t1:
; ARM: mov lr, pc
; ARM: b _bar
-; SWIFT: t1:
+; SWIFT-LABEL: t1:
; SWIFT: mov lr, pc
; SWIFT: b _bar
tail call void @bar() noreturn nounwind
@@ -17,11 +17,11 @@ entry:
define void @t2() noreturn nounwind ssp {
entry:
-; ARM: t2:
+; ARM-LABEL: t2:
; ARM: mov lr, pc
; ARM: b _t1
-; SWIFT: t2:
+; SWIFT-LABEL: t2:
; SWIFT: mov lr, pc
; SWIFT: b _t1
tail call void @t1() noreturn nounwind
diff --git a/test/CodeGen/ARM/call-tc.ll b/test/CodeGen/ARM/call-tc.ll
index 58fbbda0f6bd..d4636021b599 100644
--- a/test/CodeGen/ARM/call-tc.ll
+++ b/test/CodeGen/ARM/call-tc.ll
@@ -11,16 +11,16 @@
declare void @g(i32, i32, i32, i32)
define void @t1() {
-; CHECKELF: t1:
+; CHECKELF-LABEL: t1:
; CHECKELF: bl g(PLT)
call void @g( i32 1, i32 2, i32 3, i32 4 )
ret void
}
define void @t2() {
-; CHECKV6: t2:
+; CHECKV6-LABEL: t2:
; CHECKV6: bx r0
-; CHECKT2D: t2:
+; CHECKT2D-LABEL: t2:
; CHECKT2D: ldr
; CHECKT2D-NEXT: ldr
; CHECKT2D-NEXT: bx r0
@@ -30,11 +30,11 @@ define void @t2() {
}
define void @t3() {
-; CHECKV6: t3:
+; CHECKV6-LABEL: t3:
; CHECKV6: b _t2
-; CHECKELF: t3:
+; CHECKELF-LABEL: t3:
; CHECKELF: b t2(PLT)
-; CHECKT2D: t3:
+; CHECKT2D-LABEL: t3:
; CHECKT2D: b.w _t2
tail call void @t2( ) ; <i32> [#uses=0]
@@ -44,9 +44,9 @@ define void @t3() {
; Sibcall optimization of expanded libcalls. rdar://8707777
define double @t4(double %a) nounwind readonly ssp {
entry:
-; CHECKV6: t4:
+; CHECKV6-LABEL: t4:
; CHECKV6: b _sin
-; CHECKELF: t4:
+; CHECKELF-LABEL: t4:
; CHECKELF: b sin(PLT)
%0 = tail call double @sin(double %a) nounwind readonly ; <double> [#uses=1]
ret double %0
@@ -54,9 +54,9 @@ entry:
define float @t5(float %a) nounwind readonly ssp {
entry:
-; CHECKV6: t5:
+; CHECKV6-LABEL: t5:
; CHECKV6: b _sinf
-; CHECKELF: t5:
+; CHECKELF-LABEL: t5:
; CHECKELF: b sinf(PLT)
%0 = tail call float @sinf(float %a) nounwind readonly ; <float> [#uses=1]
ret float %0
@@ -68,9 +68,9 @@ declare double @sin(double) nounwind readonly
define i32 @t6(i32 %a, i32 %b) nounwind readnone {
entry:
-; CHECKV6: t6:
+; CHECKV6-LABEL: t6:
; CHECKV6: b ___divsi3
-; CHECKELF: t6:
+; CHECKELF-LABEL: t6:
; CHECKELF: b __aeabi_idiv(PLT)
%0 = sdiv i32 %a, %b
ret i32 %0
@@ -82,7 +82,7 @@ declare void @foo() nounwind
define void @t7() nounwind {
entry:
-; CHECKT2D: t7:
+; CHECKT2D-LABEL: t7:
; CHECKT2D: blxeq _foo
; CHECKT2D-NEXT: pop.w
; CHECKT2D-NEXT: b.w _foo
@@ -101,7 +101,7 @@ bb:
; rdar://11140249
define i32 @t8(i32 %x) nounwind ssp {
entry:
-; CHECKT2D: t8:
+; CHECKT2D-LABEL: t8:
; CHECKT2D-NOT: push
%and = and i32 %x, 1
%tobool = icmp eq i32 %and, 0
@@ -147,7 +147,7 @@ declare i32 @c(i32)
@x = external global i32, align 4
define i32 @t9() nounwind {
-; CHECKT2D: t9:
+; CHECKT2D-LABEL: t9:
; CHECKT2D: blx __ZN9MutexLockC1Ev
; CHECKT2D: blx __ZN9MutexLockD1Ev
; CHECKT2D: b.w ___divsi3
@@ -162,3 +162,20 @@ define i32 @t9() nounwind {
declare %class.MutexLock* @_ZN9MutexLockC1Ev(%class.MutexLock*) unnamed_addr nounwind align 2
declare %class.MutexLock* @_ZN9MutexLockD1Ev(%class.MutexLock*) unnamed_addr nounwind align 2
+
+; rdar://13827621
+; Correctly preserve the input chain for the tailcall node in the bitcast case,
+; otherwise the call to floorf is lost.
+define float @libcall_tc_test2(float* nocapture %a, float %b) {
+; CHECKT2D-LABEL: libcall_tc_test2:
+; CHECKT2D: blx _floorf
+; CHECKT2D: b.w _truncf
+ %1 = load float* %a, align 4
+ %call = tail call float @floorf(float %1)
+ store float %call, float* %a, align 4
+ %call1 = tail call float @truncf(float %b)
+ ret float %call1
+}
+
+declare float @floorf(float) readnone
+declare float @truncf(float) readnone
diff --git a/test/CodeGen/ARM/call_nolink.ll b/test/CodeGen/ARM/call_nolink.ll
index 5ec7f74a605f..48fa3a62ffb0 100644
--- a/test/CodeGen/ARM/call_nolink.ll
+++ b/test/CodeGen/ARM/call_nolink.ll
@@ -7,7 +7,7 @@
@numi = external global i32 ; <i32*> [#uses=1]
@counter = external global [2 x i32] ; <[2 x i32]*> [#uses=1]
-; CHECK: main_bb_2E_i_bb205_2E_i_2E_i_bb115_2E_i_2E_i:
+; CHECK-LABEL: main_bb_2E_i_bb205_2E_i_2E_i_bb115_2E_i_2E_i:
; CHECK-NOT: bx lr
define void @main_bb_2E_i_bb205_2E_i_2E_i_bb115_2E_i_2E_i() {
@@ -56,7 +56,7 @@ define void @PR15520(void ()* %fn) {
call void %fn()
ret void
-; CHECK: PR15520:
+; CHECK-LABEL: PR15520:
; CHECK: mov lr, pc
; CHECK: mov pc, r0
}
diff --git a/test/CodeGen/ARM/carry.ll b/test/CodeGen/ARM/carry.ll
index bf51cd627b3c..f67987f8eb61 100644
--- a/test/CodeGen/ARM/carry.ll
+++ b/test/CodeGen/ARM/carry.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
define i64 @f1(i64 %a, i64 %b) {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: subs r
; CHECK: sbc r
entry:
@@ -10,7 +10,7 @@ entry:
}
define i64 @f2(i64 %a, i64 %b) {
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: adc r
; CHECK: subs r
; CHECK: sbc r
@@ -22,7 +22,7 @@ entry:
; add with live carry
define i64 @f3(i32 %al, i32 %bl) {
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: adds r
; CHECK: adc r
entry:
@@ -39,7 +39,7 @@ entry:
; rdar://10073745
define i64 @f4(i64 %x) nounwind readnone {
entry:
-; CHECK: f4:
+; CHECK-LABEL: f4:
; CHECK: rsbs r
; CHECK: rsc r
%0 = sub nsw i64 0, %x
@@ -49,7 +49,7 @@ entry:
; rdar://12559385
define i64 @f5(i32 %vi) {
entry:
-; CHECK: f5:
+; CHECK-LABEL: f5:
; CHECK: movw [[REG:r[0-9]+]], #36102
; CHECK: sbc r{{[0-9]+}}, r{{[0-9]+}}, [[REG]]
%v0 = zext i32 %vi to i64
diff --git a/test/CodeGen/ARM/coalesce-dbgvalue.ll b/test/CodeGen/ARM/coalesce-dbgvalue.ll
new file mode 100644
index 000000000000..86106a045201
--- /dev/null
+++ b/test/CodeGen/ARM/coalesce-dbgvalue.ll
@@ -0,0 +1,111 @@
+; RUN: llc < %s -verify-machineinstrs
+; PR16110
+;
+; This test case contains a value that is split into two connected components
+; by rematerialization during coalescing. It also contains a DBG_VALUE
+; instruction which must be updated during
+; ConnectedVNInfoEqClasses::Distribute().
+
+target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "thumbv7-apple-ios3.0.0"
+
+@c = common global i32 0, align 4
+@b = common global i32 0, align 4
+@a = common global i64 0, align 8
+@d = common global i32 0, align 4
+
+; Function Attrs: nounwind ssp
+define i32 @pr16110() #0 {
+for.cond1.preheader:
+ store i32 0, i32* @c, align 4, !dbg !21
+ br label %for.cond1.outer, !dbg !26
+
+for.cond1: ; preds = %for.end9, %for.cond1.outer
+ %storemerge11 = phi i32 [ 0, %for.end9 ], [ %storemerge11.ph, %for.cond1.outer ]
+ %cmp = icmp slt i32 %storemerge11, 1, !dbg !26
+ br i1 %cmp, label %for.body2, label %for.end9, !dbg !26
+
+for.body2: ; preds = %for.cond1
+ store i32 %storemerge11, i32* @b, align 4, !dbg !26
+ tail call void @llvm.dbg.value(metadata !27, i64 0, metadata !11), !dbg !28
+ %0 = load i64* @a, align 8, !dbg !29
+ %xor = xor i64 %0, %e.1.ph, !dbg !29
+ %conv3 = trunc i64 %xor to i32, !dbg !29
+ tail call void @llvm.dbg.value(metadata !{i32 %conv3}, i64 0, metadata !10), !dbg !29
+ %tobool4 = icmp eq i32 %conv3, 0, !dbg !29
+ br i1 %tobool4, label %land.end, label %land.rhs, !dbg !29
+
+land.rhs: ; preds = %for.body2
+ %call = tail call i32 bitcast (i32 (...)* @fn3 to i32 ()*)() #3, !dbg !29
+ %tobool5 = icmp ne i32 %call, 0, !dbg !29
+ br label %land.end
+
+land.end: ; preds = %land.rhs, %for.body2
+ %1 = phi i1 [ false, %for.body2 ], [ %tobool5, %land.rhs ]
+ %land.ext = zext i1 %1 to i32
+ %call6 = tail call i32 bitcast (i32 (...)* @fn2 to i32 (i32, i32*)*)(i32 %land.ext, i32* null) #3
+ %2 = load i32* @b, align 4, !dbg !26
+ %inc8 = add nsw i32 %2, 1, !dbg !26
+ %phitmp = and i64 %xor, 4294967295, !dbg !26
+ br label %for.cond1.outer, !dbg !26
+
+for.cond1.outer: ; preds = %land.end, %for.cond1.preheader
+ %storemerge11.ph = phi i32 [ %inc8, %land.end ], [ 0, %for.cond1.preheader ]
+ %e.1.ph = phi i64 [ %phitmp, %land.end ], [ 0, %for.cond1.preheader ]
+ %3 = load i32* @d, align 4, !dbg !31
+ %tobool10 = icmp eq i32 %3, 0, !dbg !31
+ br label %for.cond1
+
+for.end9: ; preds = %for.cond1
+ br i1 %tobool10, label %if.end, label %for.cond1, !dbg !31
+
+if.end: ; preds = %for.end9
+ store i32 %storemerge11, i32* @b, align 4, !dbg !26
+ ret i32 0, !dbg !32
+}
+
+declare i32 @fn2(...) #1
+
+declare i32 @fn3(...) #1
+
+; Function Attrs: nounwind readnone
+declare void @llvm.dbg.value(metadata, i64, metadata) #2
+
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!33}
+
+!0 = metadata !{i32 786449, metadata !1, i32 12, metadata !"clang version 3.4 (trunk 182024) (llvm/trunk 182023)", i1 true, metadata !"", i32 0, metadata !2, metadata !2, metadata !3, metadata !15, metadata !2, metadata !""} ; [ DW_TAG_compile_unit ] [/d/b/pr16110.c] [DW_LANG_C99]
+!1 = metadata !{metadata !"pr16110.c", metadata !"/d/b"}
+!2 = metadata !{i32 0}
+!3 = metadata !{metadata !4}
+!4 = metadata !{i32 786478, metadata !1, metadata !5, metadata !"pr16110", metadata !"pr16110", metadata !"", i32 7, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 true, i32 ()* @pr16110, null, null, metadata !9, i32 7} ; [ DW_TAG_subprogram ] [line 7] [def] [pr16110]
+!5 = metadata !{i32 786473, metadata !1} ; [ DW_TAG_file_type ] [/d/b/pr16110.c]
+!6 = metadata !{i32 786453, i32 0, null, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
+!7 = metadata !{metadata !8}
+!8 = metadata !{i32 786468, null, null, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed]
+!9 = metadata !{metadata !10, metadata !11}
+!10 = metadata !{i32 786688, metadata !4, metadata !"e", metadata !5, i32 8, metadata !8, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [e] [line 8]
+!11 = metadata !{i32 786688, metadata !12, metadata !"f", metadata !5, i32 13, metadata !14, i32 0, i32 0} ; [ DW_TAG_auto_variable ] [f] [line 13]
+!12 = metadata !{i32 786443, metadata !1, metadata !13, i32 12, i32 0, i32 2} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
+!13 = metadata !{i32 786443, metadata !1, metadata !4, i32 12, i32 0, i32 1} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
+!14 = metadata !{i32 786447, null, null, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !8} ; [ DW_TAG_pointer_type ] [line 0, size 32, align 32, offset 0] [from int]
+!15 = metadata !{metadata !16, metadata !18, metadata !19, metadata !20}
+!16 = metadata !{i32 786484, i32 0, null, metadata !"a", metadata !"a", metadata !"", metadata !5, i32 1, metadata !17, i32 0, i32 1, i64* @a, null} ; [ DW_TAG_variable ] [a] [line 1] [def]
+!17 = metadata !{i32 786468, null, null, metadata !"long long int", i32 0, i64 64, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ] [long long int] [line 0, size 64, align 32, offset 0, enc DW_ATE_signed]
+!18 = metadata !{i32 786484, i32 0, null, metadata !"b", metadata !"b", metadata !"", metadata !5, i32 2, metadata !8, i32 0, i32 1, i32* @b, null} ; [ DW_TAG_variable ] [b] [line 2] [def]
+!19 = metadata !{i32 786484, i32 0, null, metadata !"c", metadata !"c", metadata !"", metadata !5, i32 3, metadata !8, i32 0, i32 1, i32* @c, null} ; [ DW_TAG_variable ] [c] [line 3] [def]
+!20 = metadata !{i32 786484, i32 0, null, metadata !"d", metadata !"d", metadata !"", metadata !5, i32 4, metadata !8, i32 0, i32 1, i32* @d, null} ; [ DW_TAG_variable ] [d] [line 4] [def]
+!21 = metadata !{i32 10, i32 0, metadata !22, null}
+!22 = metadata !{i32 786443, metadata !1, metadata !4, i32 10, i32 0, i32 0} ; [ DW_TAG_lexical_block ] [/d/b/pr16110.c]
+!26 = metadata !{i32 12, i32 0, metadata !13, null}
+!27 = metadata !{i32* null}
+!28 = metadata !{i32 13, i32 0, metadata !12, null}
+!29 = metadata !{i32 14, i32 0, metadata !12, null}
+!31 = metadata !{i32 16, i32 0, metadata !4, null}
+!32 = metadata !{i32 18, i32 0, metadata !4, null}
+!33 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/code-placement.ll b/test/CodeGen/ARM/code-placement.ll
index 487ec690ea5d..70d85c91c8ca 100644
--- a/test/CodeGen/ARM/code-placement.ll
+++ b/test/CodeGen/ARM/code-placement.ll
@@ -7,7 +7,7 @@
define arm_apcscc %struct.list_head* @t1(%struct.list_head* %list) nounwind {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
%0 = icmp eq %struct.list_head* %list, null
br i1 %0, label %bb2, label %bb
@@ -33,7 +33,7 @@ bb2:
; rdar://8117827
define i32 @t2(i32 %passes, i32* nocapture %src, i32 %size) nounwind readonly {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: beq LBB1_[[RET:.]]
%0 = icmp eq i32 %passes, 0 ; <i1> [#uses=1]
br i1 %0, label %bb5, label %bb.nph15
diff --git a/test/CodeGen/ARM/constantfp.ll b/test/CodeGen/ARM/constantfp.ll
new file mode 100644
index 000000000000..974bdd729efc
--- /dev/null
+++ b/test/CodeGen/ARM/constantfp.ll
@@ -0,0 +1,68 @@
+; RUN: llc -mtriple=armv7 -mattr=+neon -mcpu=swift %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv7 -mattr=+neon -mcpu=cortex-a8 %s -o - | FileCheck --check-prefix=CHECK-NONEONFP %s
+; RUN: llc -mtriple=armv7 -mattr=-neon -mcpu=cortex-a8 %s -o - | FileCheck --check-prefix=CHECK-NONEON %s
+
+define arm_aapcs_vfpcc float @test_vmov_f32() {
+; CHECK-LABEL: test_vmov_f32:
+; CHECK: vmov.f32 d0, #1.0
+
+; CHECK-NONEONFP: vmov.f32 s0, #1.0
+ ret float 1.0
+}
+
+define arm_aapcs_vfpcc float @test_vmov_imm() {
+; CHECK-LABEL: test_vmov_imm:
+; CHECK: vmov.i32 d0, #0
+
+; CHECK-NONEON-LABEL: test_vmov_imm:
+; CHECK_NONEON: vldr s0, {{.?LCPI[0-9]+_[0-9]+}}
+ ret float 0.0
+}
+
+define arm_aapcs_vfpcc float @test_vmvn_imm() {
+; CHECK-LABEL: test_vmvn_imm:
+; CHECK: vmvn.i32 d0, #0xb0000000
+
+; CHECK-NONEON-LABEL: test_vmvn_imm:
+; CHECK_NONEON: vldr s0, {{.?LCPI[0-9]+_[0-9]+}}
+ ret float 8589934080.0
+}
+
+define arm_aapcs_vfpcc double @test_vmov_f64() {
+; CHECK-LABEL: test_vmov_f64:
+; CHECK: vmov.f64 d0, #1.0
+
+; CHECK-NONEON-LABEL: test_vmov_f64:
+; CHECK_NONEON: vmov.f64 d0, #1.0
+
+ ret double 1.0
+}
+
+define arm_aapcs_vfpcc double @test_vmov_double_imm() {
+; CHECK-LABEL: test_vmov_double_imm:
+; CHECK: vmov.i32 d0, #0
+
+; CHECK-NONEON-LABEL: test_vmov_double_imm:
+; CHECK_NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+ ret double 0.0
+}
+
+define arm_aapcs_vfpcc double @test_vmvn_double_imm() {
+; CHECK-LABEL: test_vmvn_double_imm:
+; CHECK: vmvn.i32 d0, #0xb0000000
+
+; CHECK-NONEON-LABEL: test_vmvn_double_imm:
+; CHECK_NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+ ret double 0x4fffffff4fffffff
+}
+
+; Make sure we don't ignore the high half of 64-bit values when deciding whether
+; a vmov/vmvn is possible.
+define arm_aapcs_vfpcc double @test_notvmvn_double_imm() {
+; CHECK-LABEL: test_notvmvn_double_imm:
+; CHECK: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+
+; CHECK-NONEON-LABEL: test_notvmvn_double_imm:
+; CHECK_NONEON: vldr d0, {{.?LCPI[0-9]+_[0-9]+}}
+ ret double 0x4fffffffffffffff
+}
diff --git a/test/CodeGen/ARM/copy-paired-reg.ll b/test/CodeGen/ARM/copy-paired-reg.ll
new file mode 100644
index 000000000000..17a4461c682b
--- /dev/null
+++ b/test/CodeGen/ARM/copy-paired-reg.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -verify-machineinstrs
+
+define void @f() {
+ %a = alloca i8, i32 8, align 8
+ %b = alloca i8, i32 8, align 8
+
+ %c = bitcast i8* %a to i64*
+ %d = bitcast i8* %b to i64*
+
+ store atomic i64 0, i64* %c seq_cst, align 8
+ store atomic i64 0, i64* %d seq_cst, align 8
+
+ %e = load atomic i64* %d seq_cst, align 8
+
+ ret void
+}
diff --git a/test/CodeGen/ARM/crash-greedy-v6.ll b/test/CodeGen/ARM/crash-greedy-v6.ll
index fd42254767d3..e165dbdf087a 100644
--- a/test/CodeGen/ARM/crash-greedy-v6.ll
+++ b/test/CodeGen/ARM/crash-greedy-v6.ll
@@ -1,4 +1,5 @@
; RUN: llc -disable-fp-elim -relocation-model=pic < %s
+; RUN: llc -disable-fp-elim -relocation-model=pic -O0 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
target triple = "armv6-apple-ios"
; Reduced from 177.mesa. This test causes a live range split before an LDR_POST instruction.
@@ -11,6 +12,25 @@ for.body.lr.ph: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: add
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: add
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: add
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: add
+; SOURCE-SCHED: str
+; SOURCE-SCHED: str
+; SOURCE-SCHED: str
+; SOURCE-SCHED: str
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: bl
+; SOURCE-SCHED: add
+; SOURCE-SCHED: ldr
+; SOURCE-SCHED: cmp
+; SOURCE-SCHED: bne
%i.031 = phi i32 [ 0, %for.body.lr.ph ], [ %0, %for.body ]
%arrayidx11 = getelementptr float* %t, i32 %i.031
%arrayidx15 = getelementptr float* %u, i32 %i.031
diff --git a/test/CodeGen/ARM/crash-shufflevector.ll b/test/CodeGen/ARM/crash-shufflevector.ll
index bdc0e0ea4db0..0ae866800c8c 100644
--- a/test/CodeGen/ARM/crash-shufflevector.ll
+++ b/test/CodeGen/ARM/crash-shufflevector.ll
@@ -7,4 +7,4 @@ define void @f(<4 x i8> %param1, <4 x i8> %param2) {
%z = shufflevector <16 x i8> %y1, <16 x i8> %y2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
call void @g(<16 x i8> %z)
ret void
-} \ No newline at end of file
+}
diff --git a/test/CodeGen/ARM/ctz.ll b/test/CodeGen/ARM/ctz.ll
index 5ebca53b4692..2c7efc7c5da5 100644
--- a/test/CodeGen/ARM/ctz.ll
+++ b/test/CodeGen/ARM/ctz.ll
@@ -3,7 +3,7 @@
declare i32 @llvm.cttz.i32(i32, i1)
define i32 @f1(i32 %a) {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: rbit
; CHECK: clz
%tmp = call i32 @llvm.cttz.i32( i32 %a, i1 true )
diff --git a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
index 18f57ea41cd8..8950abdef6a3 100644
--- a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
+++ b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
-; CHECK: f:
+; CHECK-LABEL: f:
define float @f(<4 x i16>* nocapture %in) {
; CHECK: vldr
; CHECK: vmovl.u16
diff --git a/test/CodeGen/ARM/dagcombine-concatvector.ll b/test/CodeGen/ARM/dagcombine-concatvector.ll
index e9e0fe3239a7..2927ea2f3ca9 100644
--- a/test/CodeGen/ARM/dagcombine-concatvector.ll
+++ b/test/CodeGen/ARM/dagcombine-concatvector.ll
@@ -1,7 +1,7 @@
-; RUN: llc < %s -mtriple=thumbv7s-apple-ios3.0.0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7s-apple-ios3.0.0 -mcpu=generic | FileCheck %s
; PR15525
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK: ldr.w [[REG:r[0-9]+]], [sp]
; CHECK-NEXT: vmov {{d[0-9]+}}, r1, r2
; CHECK-NEXT: vmov {{d[0-9]+}}, r3, [[REG]]
diff --git a/test/CodeGen/ARM/darwin-eabi.ll b/test/CodeGen/ARM/darwin-eabi.ll
new file mode 100644
index 000000000000..f2cde71dd496
--- /dev/null
+++ b/test/CodeGen/ARM/darwin-eabi.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=thumbv7m-apple-darwin -mcpu=cortex-m3 < %s | FileCheck %s --check-prefix=CHECK-M3
+; RUN: llc -mtriple=thumbv7em-apple-darwin -mcpu=cortex-m4 < %s | FileCheck %s --check-prefix=CHECK-M4
+; RUN: llc -mtriple=thumbv7-apple-darwin -mcpu=cortex-m3 < %s | FileCheck %s --check-prefix=CHECK-M3
+; RUN: llc -mtriple=thumbv7-apple-darwin -mcpu=cortex-m4 < %s | FileCheck %s --check-prefix=CHECK-M4
+
+define float @float_op(float %lhs, float %rhs) {
+ %sum = fadd float %lhs, %rhs
+ ret float %sum
+; CHECK-M3-LABEL: float_op:
+; CHECK-M3: blx ___addsf3
+
+; CHECK-M4-LABEL: float_op:
+; CHECK-M4: vadd.f32
+}
+
+define double @double_op(double %lhs, double %rhs) {
+ %sum = fadd double %lhs, %rhs
+ ret double %sum
+; CHECK-M3-LABEL: double_op:
+; CHECK-M3: blx ___adddf3
+
+; CHECK-M4-LABEL: double_op:
+; CHECK-M4: blx ___adddf3
+}
diff --git a/test/CodeGen/ARM/data-in-code-annotations.ll b/test/CodeGen/ARM/data-in-code-annotations.ll
index a66a9d1292f0..da70178225eb 100644
--- a/test/CodeGen/ARM/data-in-code-annotations.ll
+++ b/test/CodeGen/ARM/data-in-code-annotations.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
define double @f1() nounwind {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: .data_region
; CHECK: .long 1413754129
; CHECK: .long 1074340347
@@ -11,7 +11,7 @@ define double @f1() nounwind {
define i32 @f2() {
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: .data_region jt32
; CHECK: .end_data_region
diff --git a/test/CodeGen/ARM/debug-info-arg.ll b/test/CodeGen/ARM/debug-info-arg.ll
index c162260dcd0c..e8bf3ba9d61f 100644
--- a/test/CodeGen/ARM/debug-info-arg.ll
+++ b/test/CodeGen/ARM/debug-info-arg.ll
@@ -11,7 +11,7 @@ define void @foo(%struct.tag_s* nocapture %this, %struct.tag_s* %c, i64 %x, i64
tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %c}, i64 0, metadata !13), !dbg !21
tail call void @llvm.dbg.value(metadata !{i64 %x}, i64 0, metadata !14), !dbg !22
tail call void @llvm.dbg.value(metadata !{i64 %y}, i64 0, metadata !17), !dbg !23
-;CHECK: @DEBUG_VALUE: foo:y <- R7+4294967295
+;CHECK: @DEBUG_VALUE: foo:y <- [R7+8]
tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %ptr1}, i64 0, metadata !18), !dbg !24
tail call void @llvm.dbg.value(metadata !{%struct.tag_s* %ptr2}, i64 0, metadata !19), !dbg !25
%1 = icmp eq %struct.tag_s* %c, null, !dbg !26
@@ -30,15 +30,16 @@ declare void @foobar(i64, i64)
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!33}
-!0 = metadata !{i32 786449, metadata !32, i32 12, metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, metadata !"", i32 0, null, null, metadata !30, null, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 786478, metadata !2, metadata !2, metadata !"foo", metadata !"foo", metadata !"", i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, void (%struct.tag_s*, %struct.tag_s*, i64, i64, %struct.tag_s*, %struct.tag_s*)* @foo, null, null, metadata !31, i32 11} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786449, metadata !32, i32 12, metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, metadata !"", i32 0, metadata !4, metadata !4, metadata !30, null, null, null} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 786478, metadata !2, metadata !2, metadata !"foo", metadata !"foo", metadata !"", i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void (%struct.tag_s*, %struct.tag_s*, i64, i64, %struct.tag_s*, %struct.tag_s*)* @foo, null, null, metadata !31, i32 11} ; [ DW_TAG_subprogram ] [line 11] [def] [foo]
!2 = metadata !{i32 786473, metadata !32} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786453, metadata !32, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!3 = metadata !{i32 786453, metadata !32, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{null}
!5 = metadata !{i32 786689, metadata !1, metadata !"this", metadata !2, i32 16777227, metadata !6, i32 0, null} ; [ DW_TAG_arg_variable ]
!6 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{i32 786451, metadata !32, metadata !0, metadata !"tag_s", i32 5, i64 96, i64 32, i32 0, i32 0, i32 0, metadata !8, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!7 = metadata !{i32 786451, metadata !32, metadata !0, metadata !"tag_s", i32 5, i64 96, i64 32, i32 0, i32 0, null, metadata !8, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [tag_s] [line 5, size 96, align 32, offset 0] [def] [from ]
!8 = metadata !{metadata !9, metadata !11, metadata !12}
!9 = metadata !{i32 786445, metadata !32, metadata !7, metadata !"x", i32 6, i64 32, i64 32, i64 0, i32 0, metadata !10} ; [ DW_TAG_member ]
!10 = metadata !{i32 786468, null, metadata !0, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
@@ -64,3 +65,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!30 = metadata !{metadata !1}
!31 = metadata !{metadata !5, metadata !13, metadata !14, metadata !17, metadata !18, metadata!19}
!32 = metadata !{metadata !"one.c", metadata !"/Volumes/Athwagate/R10048772"}
+!33 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/debug-info-blocks.ll b/test/CodeGen/ARM/debug-info-blocks.ll
index d0bfecc5af41..6cbe4b4727cd 100644
--- a/test/CodeGen/ARM/debug-info-blocks.ll
+++ b/test/CodeGen/ARM/debug-info-blocks.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 < %s | FileCheck %s
-; CHECK: @DEBUG_VALUE: mydata <- [sp+#{{[0-9]+}}]+#0
+; CHECK: @DEBUG_VALUE: foobar_func_block_invoke_0:mydata <- [SP+{{[0-9]+}}]
; Radar 9331779
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
target triple = "thumbv7-apple-ios"
@@ -93,37 +93,38 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
}
!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!162}
-!0 = metadata !{i32 786449, i32 16, metadata !40, metadata !"Apple clang version 2.1", i1 false, metadata !"", i32 2, metadata !147, null, metadata !148, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 786433, metadata !160, metadata !0, metadata !"", i32 248, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !3, i32 0, i32 0} ; [ DW_TAG_enumeration_type ]
+!0 = metadata !{i32 786449, metadata !153, i32 16, metadata !"Apple clang version 2.1", i1 false, metadata !"", i32 2, metadata !147, metadata !26, metadata !148, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 786436, metadata !160, metadata !0, metadata !"", i32 248, i64 32, i64 32, i32 0, i32 0, null, metadata !3, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 248, size 32, align 32, offset 0] [def] [from ]
!2 = metadata !{i32 786473, metadata !160} ; [ DW_TAG_file_type ]
!3 = metadata !{metadata !4}
!4 = metadata !{i32 786472, metadata !"Ver1", i64 0} ; [ DW_TAG_enumerator ]
-!5 = metadata !{i32 786433, metadata !160, metadata !0, metadata !"Mode", i32 79, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !7, i32 0, i32 0} ; [ DW_TAG_enumeration_type ]
+!5 = metadata !{i32 786436, metadata !160, metadata !0, metadata !"Mode", i32 79, i64 32, i64 32, i32 0, i32 0, null, metadata !7, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [Mode] [line 79, size 32, align 32, offset 0] [def] [from ]
!6 = metadata !{i32 786473, metadata !161} ; [ DW_TAG_file_type ]
!7 = metadata !{metadata !8}
!8 = metadata !{i32 786472, metadata !"One", i64 0} ; [ DW_TAG_enumerator ]
-!9 = metadata !{i32 786433, metadata !149, metadata !0, metadata !"", i32 15, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !11, i32 0, i32 0} ; [ DW_TAG_enumeration_type ]
+!9 = metadata !{i32 786436, metadata !149, metadata !0, metadata !"", i32 15, i64 32, i64 32, i32 0, i32 0, null, metadata !11, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 15, size 32, align 32, offset 0] [def] [from ]
!10 = metadata !{i32 786473, metadata !149} ; [ DW_TAG_file_type ]
!11 = metadata !{metadata !12, metadata !13}
!12 = metadata !{i32 786472, metadata !"Unknown", i64 0} ; [ DW_TAG_enumerator ]
!13 = metadata !{i32 786472, metadata !"Known", i64 1} ; [ DW_TAG_enumerator ]
-!14 = metadata !{i32 786433, metadata !150, metadata !0, metadata !"", i32 20, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !16, i32 0, i32 0} ; [ DW_TAG_enumeration_type ]
+!14 = metadata !{i32 786436, metadata !150, metadata !0, metadata !"", i32 20, i64 32, i64 32, i32 0, i32 0, null, metadata !16, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 20, size 32, align 32, offset 0] [def] [from ]
!15 = metadata !{i32 786473, metadata !150} ; [ DW_TAG_file_type ]
!16 = metadata !{metadata !17, metadata !18}
!17 = metadata !{i32 786472, metadata !"Single", i64 0} ; [ DW_TAG_enumerator ]
!18 = metadata !{i32 786472, metadata !"Double", i64 1} ; [ DW_TAG_enumerator ]
-!19 = metadata !{i32 786433, metadata !151, metadata !0, metadata !"", i32 14, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !21, i32 0, i32 0} ; [ DW_TAG_enumeration_type ]
+!19 = metadata !{i32 786436, metadata !151, metadata !0, metadata !"", i32 14, i64 32, i64 32, i32 0, i32 0, null, metadata !21, i32 0, null, null, null} ; [ DW_TAG_enumeration_type ] [line 14, size 32, align 32, offset 0] [def] [from ]
!20 = metadata !{i32 786473, metadata !151} ; [ DW_TAG_file_type ]
!21 = metadata !{metadata !22}
!22 = metadata !{i32 786472, metadata !"Eleven", i64 0} ; [ DW_TAG_enumerator ]
-!23 = metadata !{i32 786478, metadata !24, metadata !"foobar_func_block_invoke_0", metadata !"foobar_func_block_invoke_0", metadata !"", metadata !24, i32 609, metadata !25, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 false, void (i8*, %0*, [4 x i32], [4 x i32])* @foobar_func_block_invoke_0, null, null, null, i32 609} ; [ DW_TAG_subprogram ]
+!23 = metadata !{i32 786478, metadata !152, metadata !24, metadata !"foobar_func_block_invoke_0", metadata !"foobar_func_block_invoke_0", metadata !"", i32 609, metadata !25, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 false, void (i8*, %0*, [4 x i32], [4 x i32])* @foobar_func_block_invoke_0, null, null, null, i32 609} ; [ DW_TAG_subprogram ] [line 609] [local] [def] [foobar_func_block_invoke_0]
!24 = metadata !{i32 786473, metadata !152} ; [ DW_TAG_file_type ]
-!25 = metadata !{i32 786453, metadata !152, metadata !24, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !26, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!25 = metadata !{i32 786453, metadata !152, metadata !24, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !26, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!26 = metadata !{null}
!27 = metadata !{i32 786689, metadata !23, metadata !".block_descriptor", metadata !24, i32 16777825, metadata !28, i32 64, null} ; [ DW_TAG_arg_variable ]
!28 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 0, i64 0, i32 0, metadata !29} ; [ DW_TAG_pointer_type ]
-!29 = metadata !{i32 786451, metadata !152, metadata !24, metadata !"__block_literal_14", i32 609, i64 256, i64 32, i32 0, i32 0, i32 0, metadata !30, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!29 = metadata !{i32 786451, metadata !152, metadata !24, metadata !"__block_literal_14", i32 609, i64 256, i64 32, i32 0, i32 0, null, metadata !30, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [__block_literal_14] [line 609, size 256, align 32, offset 0] [def] [from ]
!30 = metadata !{metadata !31, metadata !33, metadata !35, metadata !36, metadata !37, metadata !48, metadata !89, metadata !124}
!31 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"__isa", i32 609, i64 32, i64 32, i64 0, i32 0, metadata !32} ; [ DW_TAG_member ]
!32 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
@@ -133,7 +134,7 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!36 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"__FuncPtr", i32 609, i64 32, i64 32, i64 96, i32 0, metadata !32} ; [ DW_TAG_member ]
!37 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"__descriptor", i32 609, i64 32, i64 32, i64 128, i32 0, metadata !38} ; [ DW_TAG_member ]
!38 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !39} ; [ DW_TAG_pointer_type ]
-!39 = metadata !{i32 786451, metadata !153, metadata !0, metadata !"__block_descriptor_withcopydispose", i32 307, i64 128, i64 32, i32 0, i32 0, i32 0, metadata !41, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!39 = metadata !{i32 786451, metadata !153, metadata !0, metadata !"__block_descriptor_withcopydispose", i32 307, i64 128, i64 32, i32 0, i32 0, null, metadata !41, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [__block_descriptor_withcopydispose] [line 307, size 128, align 32, offset 0] [def] [from ]
!40 = metadata !{i32 786473, metadata !153} ; [ DW_TAG_file_type ]
!41 = metadata !{metadata !42, metadata !44, metadata !45, metadata !47}
!42 = metadata !{i32 786445, metadata !153, metadata !40, metadata !"reserved", i32 307, i64 32, i64 32, i64 0, i32 0, metadata !43} ; [ DW_TAG_member ]
@@ -144,7 +145,7 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!47 = metadata !{i32 786445, metadata !153, metadata !40, metadata !"DestroyFuncPtr", i32 307, i64 32, i64 32, i64 96, i32 0, metadata !46} ; [ DW_TAG_member ]
!48 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"mydata", i32 609, i64 32, i64 32, i64 160, i32 0, metadata !49} ; [ DW_TAG_member ]
!49 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 0, i64 0, i32 0, metadata !50} ; [ DW_TAG_pointer_type ]
-!50 = metadata !{i32 786451, metadata !152, metadata !24, metadata !"", i32 0, i64 224, i64 0, i32 0, i32 16, i32 0, metadata !51, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!50 = metadata !{i32 786451, metadata !152, metadata !24, metadata !"", i32 0, i64 224, i64 0, i32 0, i32 16, null, metadata !51, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [line 0, size 224, align 0, offset 0] [def] [from ]
!51 = metadata !{metadata !52, metadata !53, metadata !54, metadata !55, metadata !56, metadata !57, metadata !58}
!52 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"__isa", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !32} ; [ DW_TAG_member ]
!53 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"__forwarding", i32 0, i64 32, i64 32, i64 32, i32 0, metadata !32} ; [ DW_TAG_member ]
@@ -154,27 +155,27 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!57 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"__destroy_helper", i32 0, i64 32, i64 32, i64 160, i32 0, metadata !32} ; [ DW_TAG_member ]
!58 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"mydata", i32 0, i64 32, i64 32, i64 192, i32 0, metadata !59} ; [ DW_TAG_member ]
!59 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !60} ; [ DW_TAG_pointer_type ]
-!60 = metadata !{i32 786451, metadata !154, metadata !24, metadata !"UIMydata", i32 26, i64 128, i64 32, i32 0, i32 0, i32 0, metadata !62, i32 16, i32 0} ; [ DW_TAG_structure_type ]
+!60 = metadata !{i32 786451, metadata !154, metadata !24, metadata !"UIMydata", i32 26, i64 128, i64 32, i32 0, i32 0, null, metadata !62, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [UIMydata] [line 26, size 128, align 32, offset 0] [def] [from ]
!61 = metadata !{i32 786473, metadata !154} ; [ DW_TAG_file_type ]
!62 = metadata !{metadata !63, metadata !71, metadata !75, metadata !79}
!63 = metadata !{i32 786460, metadata !60, null, metadata !61, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !64} ; [ DW_TAG_inheritance ]
-!64 = metadata !{i32 786451, metadata !155, metadata !40, metadata !"NSO", i32 66, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !66, i32 16, i32 0} ; [ DW_TAG_structure_type ]
+!64 = metadata !{i32 786451, metadata !155, metadata !40, metadata !"NSO", i32 66, i64 32, i64 32, i32 0, i32 0, null, metadata !66, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [NSO] [line 66, size 32, align 32, offset 0] [def] [from ]
!65 = metadata !{i32 786473, metadata !155} ; [ DW_TAG_file_type ]
!66 = metadata !{metadata !67}
!67 = metadata !{i32 786445, metadata !155, metadata !65, metadata !"isa", i32 67, i64 32, i64 32, i64 0, i32 2, metadata !68, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!68 = metadata !{i32 786454, metadata !0, metadata !"Class", metadata !40, i32 197, i64 0, i64 0, i64 0, i32 0, metadata !69} ; [ DW_TAG_typedef ]
+!68 = metadata !{i32 786454, metadata !153, metadata !0, metadata !"Class", i32 197, i64 0, i64 0, i64 0, i32 0, metadata !69} ; [ DW_TAG_typedef ]
!69 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !70} ; [ DW_TAG_pointer_type ]
-!70 = metadata !{i32 786451, metadata !40, metadata !0, metadata !"objc_class", i32 0, i64 0, i64 0, i32 0, i32 4, i32 0, null, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!70 = metadata !{i32 786451, metadata !153, metadata !0, metadata !"objc_class", i32 0, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [objc_class] [line 0, size 0, align 0, offset 0] [decl] [from ]
!71 = metadata !{i32 786445, metadata !154, metadata !61, metadata !"_mydataRef", i32 28, i64 32, i64 32, i64 32, i32 0, metadata !72, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!72 = metadata !{i32 786454, metadata !0, metadata !"CFTypeRef", metadata !24, i32 313, i64 0, i64 0, i64 0, i32 0, metadata !73} ; [ DW_TAG_typedef ]
+!72 = metadata !{i32 786454, metadata !152, metadata !0, metadata !"CFTypeRef", i32 313, i64 0, i64 0, i64 0, i32 0, metadata !73} ; [ DW_TAG_typedef ]
!73 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !74} ; [ DW_TAG_pointer_type ]
!74 = metadata !{i32 786470, null, metadata !0, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null} ; [ DW_TAG_const_type ]
!75 = metadata !{i32 786445, metadata !154, metadata !61, metadata !"_scale", i32 29, i64 32, i64 32, i64 64, i32 0, metadata !76, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!76 = metadata !{i32 786454, metadata !0, metadata !"Float", metadata !77, i32 89, i64 0, i64 0, i64 0, i32 0, metadata !78} ; [ DW_TAG_typedef ]
+!76 = metadata !{i32 786454, metadata !156, metadata !0, metadata !"Float", i32 89, i64 0, i64 0, i64 0, i32 0, metadata !78} ; [ DW_TAG_typedef ]
!77 = metadata !{i32 786473, metadata !156} ; [ DW_TAG_file_type ]
!78 = metadata !{i32 786468, null, metadata !0, metadata !"float", i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
!79 = metadata !{i32 786445, metadata !154, metadata !61, metadata !"_mydataFlags", i32 37, i64 8, i64 8, i64 96, i32 0, metadata !80, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!80 = metadata !{i32 786451, metadata !154, metadata !0, metadata !"", i32 30, i64 8, i64 8, i32 0, i32 0, i32 0, metadata !81, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!80 = metadata !{i32 786451, metadata !154, metadata !0, metadata !"", i32 30, i64 8, i64 8, i32 0, i32 0, null, metadata !81, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [line 30, size 8, align 8, offset 0] [def] [from ]
!81 = metadata !{metadata !82, metadata !84, metadata !85, metadata !86, metadata !87, metadata !88}
!82 = metadata !{i32 786445, metadata !154, metadata !61, metadata !"named", i32 31, i64 1, i64 32, i64 0, i32 0, metadata !83} ; [ DW_TAG_member ]
!83 = metadata !{i32 786468, null, metadata !0, metadata !"unsigned int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
@@ -185,43 +186,43 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!88 = metadata !{i32 786445, metadata !154, metadata !61, metadata !"isCIMydata", i32 36, i64 1, i64 32, i64 7, i32 0, metadata !83} ; [ DW_TAG_member ]
!89 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"self", i32 609, i64 32, i64 32, i64 192, i32 0, metadata !90} ; [ DW_TAG_member ]
!90 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !91} ; [ DW_TAG_pointer_type ]
-!91 = metadata !{i32 786451, metadata !152, metadata !40, metadata !"MyWork", i32 36, i64 384, i64 32, i32 0, i32 0, i32 0, metadata !92, i32 16, i32 0} ; [ DW_TAG_structure_type ]
+!91 = metadata !{i32 786451, metadata !152, metadata !40, metadata !"MyWork", i32 36, i64 384, i64 32, i32 0, i32 0, null, metadata !92, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [MyWork] [line 36, size 384, align 32, offset 0] [def] [from ]
!92 = metadata !{metadata !93, metadata !98, metadata !101, metadata !107, metadata !123}
-!93 = metadata !{i32 786460, metadata !91, null, metadata !24, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !94} ; [ DW_TAG_inheritance ]
-!94 = metadata !{i32 786451, metadata !157, metadata !40, metadata !"twork", i32 43, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !96, i32 16, i32 0} ; [ DW_TAG_structure_type ]
+!93 = metadata !{i32 786460, metadata !152, metadata !91, null, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !94} ; [ DW_TAG_inheritance ]
+!94 = metadata !{i32 786451, metadata !157, metadata !40, metadata !"twork", i32 43, i64 32, i64 32, i32 0, i32 0, null, metadata !96, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [twork] [line 43, size 32, align 32, offset 0] [def] [from ]
!95 = metadata !{i32 786473, metadata !157} ; [ DW_TAG_file_type ]
!96 = metadata !{metadata !97}
!97 = metadata !{i32 786460, metadata !94, null, metadata !95, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !64} ; [ DW_TAG_inheritance ]
!98 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"_itemID", i32 38, i64 64, i64 32, i64 32, i32 1, metadata !99, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!99 = metadata !{i32 786454, metadata !0, metadata !"uint64_t", metadata !40, i32 55, i64 0, i64 0, i64 0, i32 0, metadata !100} ; [ DW_TAG_typedef ]
+!99 = metadata !{i32 786454, metadata !153, metadata !0, metadata !"uint64_t", i32 55, i64 0, i64 0, i64 0, i32 0, metadata !100} ; [ DW_TAG_typedef ]
!100 = metadata !{i32 786468, null, metadata !0, metadata !"long long unsigned int", i32 0, i64 64, i64 32, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
!101 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"_library", i32 39, i64 32, i64 32, i64 96, i32 1, metadata !102, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
!102 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !103} ; [ DW_TAG_pointer_type ]
-!103 = metadata !{i32 786451, metadata !158, metadata !40, metadata !"MyLibrary2", i32 22, i64 32, i64 32, i32 0, i32 0, i32 0, metadata !105, i32 16, i32 0} ; [ DW_TAG_structure_type ]
+!103 = metadata !{i32 786451, metadata !158, metadata !40, metadata !"MyLibrary2", i32 22, i64 32, i64 32, i32 0, i32 0, null, metadata !105, i32 16, null, null, null} ; [ DW_TAG_structure_type ] [MyLibrary2] [line 22, size 32, align 32, offset 0] [def] [from ]
!104 = metadata !{i32 786473, metadata !158} ; [ DW_TAG_file_type ]
!105 = metadata !{metadata !106}
!106 = metadata !{i32 786460, metadata !103, null, metadata !104, i32 0, i64 0, i64 0, i64 0, i32 0, metadata !64} ; [ DW_TAG_inheritance ]
!107 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"_bounds", i32 40, i64 128, i64 32, i64 128, i32 1, metadata !108, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
-!108 = metadata !{i32 786454, metadata !0, metadata !"CR", metadata !40, i32 33, i64 0, i64 0, i64 0, i32 0, metadata !109} ; [ DW_TAG_typedef ]
-!109 = metadata !{i32 786451, metadata !156, metadata !0, metadata !"CR", i32 29, i64 128, i64 32, i32 0, i32 0, i32 0, metadata !110, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!108 = metadata !{i32 786454, metadata !153, metadata !0, metadata !"CR", i32 33, i64 0, i64 0, i64 0, i32 0, metadata !109} ; [ DW_TAG_typedef ]
+!109 = metadata !{i32 786451, metadata !156, metadata !0, metadata !"CR", i32 29, i64 128, i64 32, i32 0, i32 0, null, metadata !110, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [CR] [line 29, size 128, align 32, offset 0] [def] [from ]
!110 = metadata !{metadata !111, metadata !117}
!111 = metadata !{i32 786445, metadata !156, metadata !77, metadata !"origin", i32 30, i64 64, i64 32, i64 0, i32 0, metadata !112} ; [ DW_TAG_member ]
-!112 = metadata !{i32 786454, metadata !0, metadata !"CP", metadata !77, i32 17, i64 0, i64 0, i64 0, i32 0, metadata !113} ; [ DW_TAG_typedef ]
-!113 = metadata !{i32 786451, metadata !156, metadata !0, metadata !"CP", i32 13, i64 64, i64 32, i32 0, i32 0, i32 0, metadata !114, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!112 = metadata !{i32 786454, metadata !156, metadata !0, metadata !"CP", i32 17, i64 0, i64 0, i64 0, i32 0, metadata !113} ; [ DW_TAG_typedef ]
+!113 = metadata !{i32 786451, metadata !156, metadata !0, metadata !"CP", i32 13, i64 64, i64 32, i32 0, i32 0, null, metadata !114, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [CP] [line 13, size 64, align 32, offset 0] [def] [from ]
!114 = metadata !{metadata !115, metadata !116}
!115 = metadata !{i32 786445, metadata !156, metadata !77, metadata !"x", i32 14, i64 32, i64 32, i64 0, i32 0, metadata !76} ; [ DW_TAG_member ]
!116 = metadata !{i32 786445, metadata !156, metadata !77, metadata !"y", i32 15, i64 32, i64 32, i64 32, i32 0, metadata !76} ; [ DW_TAG_member ]
!117 = metadata !{i32 786445, metadata !156, metadata !77, metadata !"size", i32 31, i64 64, i64 32, i64 64, i32 0, metadata !118} ; [ DW_TAG_member ]
-!118 = metadata !{i32 786454, metadata !0, metadata !"Size", metadata !77, i32 25, i64 0, i64 0, i64 0, i32 0, metadata !119} ; [ DW_TAG_typedef ]
-!119 = metadata !{i32 786451, metadata !156, metadata !0, metadata !"Size", i32 21, i64 64, i64 32, i32 0, i32 0, i32 0, metadata !120, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!118 = metadata !{i32 786454, metadata !156, metadata !0, metadata !"Size", i32 25, i64 0, i64 0, i64 0, i32 0, metadata !119} ; [ DW_TAG_typedef ]
+!119 = metadata !{i32 786451, metadata !156, metadata !0, metadata !"Size", i32 21, i64 64, i64 32, i32 0, i32 0, null, metadata !120, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [Size] [line 21, size 64, align 32, offset 0] [def] [from ]
!120 = metadata !{metadata !121, metadata !122}
!121 = metadata !{i32 786445, metadata !156, metadata !77, metadata !"width", i32 22, i64 32, i64 32, i64 0, i32 0, metadata !76} ; [ DW_TAG_member ]
!122 = metadata !{i32 786445, metadata !156, metadata !77, metadata !"height", i32 23, i64 32, i64 32, i64 32, i32 0, metadata !76} ; [ DW_TAG_member ]
!123 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"_data", i32 40, i64 128, i64 32, i64 256, i32 1, metadata !108, metadata !"", metadata !"", metadata !"", i32 0} ; [ DW_TAG_member ]
!124 = metadata !{i32 786445, metadata !152, metadata !24, metadata !"semi", i32 609, i64 32, i64 32, i64 224, i32 0, metadata !125} ; [ DW_TAG_member ]
-!125 = metadata !{i32 786454, metadata !0, metadata !"d_t", metadata !24, i32 35, i64 0, i64 0, i64 0, i32 0, metadata !126} ; [ DW_TAG_typedef ]
+!125 = metadata !{i32 786454, metadata !152, metadata !0, metadata !"d_t", i32 35, i64 0, i64 0, i64 0, i32 0, metadata !126} ; [ DW_TAG_typedef ]
!126 = metadata !{i32 786447, null, metadata !0, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !127} ; [ DW_TAG_pointer_type ]
-!127 = metadata !{i32 786451, metadata !159, metadata !0, metadata !"my_struct", i32 49, i64 0, i64 0, i32 0, i32 4, i32 0, null, i32 0, i32 0} ; [ DW_TAG_structure_type ]
+!127 = metadata !{i32 786451, metadata !159, metadata !0, metadata !"my_struct", i32 49, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null, null} ; [ DW_TAG_structure_type ] [my_struct] [line 49, size 0, align 0, offset 0] [decl] [from ]
!128 = metadata !{i32 786473, metadata !159} ; [ DW_TAG_file_type ]
!129 = metadata !{i32 609, i32 144, metadata !23, null}
!130 = metadata !{i32 786689, metadata !23, metadata !"loadedMydata", metadata !24, i32 33555041, metadata !59, i32 0, null} ; [ DW_TAG_arg_variable ]
@@ -236,7 +237,7 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!139 = metadata !{i32 786688, metadata !23, metadata !"semi", metadata !24, i32 607, metadata !125, i32 0, null, i64 1, i64 28} ; [ DW_TAG_auto_variable ]
!140 = metadata !{i32 607, i32 30, metadata !23, null}
!141 = metadata !{i32 610, i32 17, metadata !142, null}
-!142 = metadata !{i32 786443, metadata !23, i32 609, i32 200, metadata !24, i32 94} ; [ DW_TAG_lexical_block ]
+!142 = metadata !{i32 786443, metadata !152, metadata !23, i32 609, i32 200, i32 94} ; [ DW_TAG_lexical_block ]
!143 = metadata !{i32 611, i32 17, metadata !142, null}
!144 = metadata !{i32 612, i32 17, metadata !142, null}
!145 = metadata !{i32 613, i32 17, metadata !142, null}
@@ -256,3 +257,4 @@ define hidden void @foobar_func_block_invoke_0(i8* %.block_descriptor, %0* %load
!159 = metadata !{metadata !"header15.h", metadata !"/Volumes/Sandbox/llvm"}
!160 = metadata !{metadata !"header.h", metadata !"/Volumes/Sandbox/llvm"}
!161 = metadata !{metadata !"header2.h", metadata !"/Volumes/Sandbox/llvm"}
+!162 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/debug-info-branch-folding.ll b/test/CodeGen/ARM/debug-info-branch-folding.ll
index 38945ac2ea7b..8505f5365567 100644
--- a/test/CodeGen/ARM/debug-info-branch-folding.ll
+++ b/test/CodeGen/ARM/debug-info-branch-folding.ll
@@ -5,8 +5,8 @@ target triple = "thumbv7-apple-macosx10.6.7"
;CHECK: vadd.f32 q4, q8, q8
;CHECK-NEXT: LBB0_1
-;CHECK:@DEBUG_VALUE: x <- Q4+0
-;CHECK-NEXT:@DEBUG_VALUE: y <- Q4+0
+;CHECK:@DEBUG_VALUE: x <- Q4{{$}}
+;CHECK-NEXT:@DEBUG_VALUE: y <- Q4{{$}}
@.str = external constant [13 x i8]
@@ -38,23 +38,25 @@ declare i32 @printf(i8* nocapture, ...) nounwind
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-!0 = metadata !{i32 786478, i32 0, metadata !1, metadata !"test0001", metadata !"test0001", metadata !"", metadata !1, i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, <4 x float> (float)* @test0001, null, null, metadata !51, i32 0} ; [ DW_TAG_subprogram ]
+!llvm.module.flags = !{!56}
+
+!0 = metadata !{i32 786478, metadata !54, null, metadata !"test0001", metadata !"test0001", metadata !"", i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, <4 x float> (float)* @test0001, null, null, metadata !51, i32 0} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !54} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !50, null, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, metadata !17, metadata !17, metadata !50, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786454, metadata !54, metadata !2, metadata !"v4f32", i32 14, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_typedef ]
-!6 = metadata !{i32 786691, metadata !2, metadata !"", metadata !2, i32 0, i64 128, i64 128, i32 0, i32 0, metadata !7, metadata !8, i32 0, i32 0} ; [ DW_TAG_vector_type ]
+!6 = metadata !{i32 786433, metadata !54, metadata !2, metadata !"", i32 0, i64 128, i64 128, i32 0, i32 0, metadata !7, metadata !8, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [from float]
!7 = metadata !{i32 786468, null, metadata !2, metadata !"float", i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
!8 = metadata !{metadata !9}
!9 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ]
-!10 = metadata !{i32 786478, i32 0, metadata !1, metadata !"main", metadata !"main", metadata !"", metadata !1, i32 59, metadata !11, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i8**, i1)* @main, null, null, metadata !52, i32 0} ; [ DW_TAG_subprogram ]
-!11 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !12, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!10 = metadata !{i32 786478, metadata !54, null, metadata !"main", metadata !"main", metadata !"", i32 59, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**, i1)* @main, null, null, metadata !52, i32 0} ; [ DW_TAG_subprogram ] [line 59] [def] [scope 0] [main]
+!11 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!12 = metadata !{metadata !13}
!13 = metadata !{i32 786468, null, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!14 = metadata !{i32 786478, i32 0, metadata !15, metadata !"printFV", metadata !"printFV", metadata !"", metadata !15, i32 41, metadata !16, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, null, null, null, metadata !53, i32 0} ; [ DW_TAG_subprogram ]
+!14 = metadata !{i32 786478, metadata !55, null, metadata !"printFV", metadata !"printFV", metadata !"", i32 41, metadata !16, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, metadata !53, i32 0} ; [ DW_TAG_subprogram ] [line 41] [local] [def] [scope 0] [printFV]
!15 = metadata !{i32 786473, metadata !55} ; [ DW_TAG_file_type ]
-!16 = metadata !{i32 786453, metadata !55, metadata !15, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !17, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!16 = metadata !{i32 786453, metadata !55, metadata !15, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !17, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!17 = metadata !{null}
!18 = metadata !{i32 786689, metadata !0, metadata !"a", metadata !1, i32 16777219, metadata !7, i32 0, null} ; [ DW_TAG_arg_variable ]
!19 = metadata !{i32 786689, metadata !10, metadata !"argc", metadata !1, i32 16777275, metadata !13, i32 0, null} ; [ DW_TAG_arg_variable ]
@@ -94,3 +96,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!53 = metadata !{metadata !30}
!54 = metadata !{metadata !"build2.c", metadata !"/private/tmp"}
!55 = metadata !{metadata !"/Volumes/Lalgate/work/llvm/projects/llvm-test/SingleSource/UnitTests/Vector/helpers.h", metadata !"/private/tmp"}
+!56 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/debug-info-d16-reg.ll b/test/CodeGen/ARM/debug-info-d16-reg.ll
index e4040fa02caa..30a3e2dcdc2c 100644
--- a/test/CodeGen/ARM/debug-info-d16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-d16-reg.ll
@@ -57,23 +57,24 @@ entry:
declare i32 @puts(i8* nocapture) nounwind
!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!48}
-!0 = metadata !{i32 786478, metadata !1, metadata !"printer", metadata !"printer", metadata !"printer", metadata !1, i32 12, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, double, i8)* @printer, null, null, metadata !43, i32 12} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786478, metadata !46, metadata !1, metadata !"printer", metadata !"printer", metadata !"printer", i32 12, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, double, i8)* @printer, null, null, metadata !43, i32 12} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !46} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"(LLVM build 00)", i1 true, metadata !"", i32 0, null, null, metadata !42, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!2 = metadata !{i32 786449, metadata !46, i32 1, metadata !"(LLVM build 00)", i1 true, metadata !"", i32 0, metadata !47, metadata !47, metadata !42, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786453, metadata !46, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5, metadata !6, metadata !7, metadata !8}
-!5 = metadata !{i32 786468, metadata !1, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 786447, metadata !1, metadata !"", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{i32 786468, metadata !1, metadata !"double", metadata !1, i32 0, i64 64, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
-!8 = metadata !{i32 786468, metadata !1, metadata !"unsigned char", metadata !1, i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
-!9 = metadata !{i32 786478, metadata !1, metadata !"inlineprinter", metadata !"inlineprinter", metadata !"inlineprinter", metadata !1, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, double, i8)* @inlineprinter, null, null, metadata !44, i32 5} ; [ DW_TAG_subprogram ]
-!10 = metadata !{i32 786478, metadata !1, metadata !"main", metadata !"main", metadata !"main", metadata !1, i32 18, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !45, i32 18} ; [ DW_TAG_subprogram ]
-!11 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null} ; [ DW_TAG_subroutine_type ]
+!5 = metadata !{i32 786468, metadata !46, metadata !1, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 786447, metadata !46, metadata !1, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
+!7 = metadata !{i32 786468, metadata !46, metadata !1, metadata !"double", i32 0, i64 64, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
+!8 = metadata !{i32 786468, metadata !46, metadata !1, metadata !"unsigned char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
+!9 = metadata !{i32 786478, metadata !46, metadata !1, metadata !"inlineprinter", metadata !"inlineprinter", metadata !"inlineprinter", i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, double, i8)* @inlineprinter, null, null, metadata !44, i32 5} ; [ DW_TAG_subprogram ]
+!10 = metadata !{i32 786478, metadata !46, metadata !1, metadata !"main", metadata !"main", metadata !"main", i32 18, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !45, i32 18} ; [ DW_TAG_subprogram ]
+!11 = metadata !{i32 786453, metadata !46, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!12 = metadata !{metadata !5, metadata !5, metadata !13}
-!13 = metadata !{i32 786447, metadata !1, metadata !"", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !14} ; [ DW_TAG_pointer_type ]
-!14 = metadata !{i32 786447, metadata !1, metadata !"", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !15} ; [ DW_TAG_pointer_type ]
-!15 = metadata !{i32 786468, metadata !1, metadata !"char", metadata !1, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
+!13 = metadata !{i32 786447, metadata !46, metadata !1, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !14} ; [ DW_TAG_pointer_type ]
+!14 = metadata !{i32 786447, metadata !46, metadata !1, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !15} ; [ DW_TAG_pointer_type ]
+!15 = metadata !{i32 786468, metadata !46, metadata !1, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
!16 = metadata !{i32 786689, metadata !0, metadata !"ptr", metadata !1, i32 11, metadata !6, i32 0, null} ; [ DW_TAG_arg_variable ]
!17 = metadata !{i32 786689, metadata !0, metadata !"val", metadata !1, i32 11, metadata !7, i32 0, null} ; [ DW_TAG_arg_variable ]
!18 = metadata !{i32 786689, metadata !0, metadata !"c", metadata !1, i32 11, metadata !8, i32 0, null} ; [ DW_TAG_arg_variable ]
@@ -83,14 +84,14 @@ declare i32 @puts(i8* nocapture) nounwind
!22 = metadata !{i32 786689, metadata !10, metadata !"argc", metadata !1, i32 17, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!23 = metadata !{i32 786689, metadata !10, metadata !"argv", metadata !1, i32 17, metadata !13, i32 0, null} ; [ DW_TAG_arg_variable ]
!24 = metadata !{i32 786688, metadata !25, metadata !"dval", metadata !1, i32 19, metadata !7, i32 0, null} ; [ DW_TAG_auto_variable ]
-!25 = metadata !{i32 786443, metadata !1, metadata !10, i32 18, i32 0, i32 2} ; [ DW_TAG_lexical_block ]
+!25 = metadata !{i32 786443, metadata !46, metadata !10, i32 18, i32 0, i32 2} ; [ DW_TAG_lexical_block ]
!26 = metadata !{i32 4, i32 0, metadata !9, null}
!27 = metadata !{i32 6, i32 0, metadata !28, null}
-!28 = metadata !{i32 786443, metadata !1, metadata !9, i32 5, i32 0, i32 1} ; [ DW_TAG_lexical_block ]
+!28 = metadata !{i32 786443, metadata !46, metadata !9, i32 5, i32 0, i32 1} ; [ DW_TAG_lexical_block ]
!29 = metadata !{i32 7, i32 0, metadata !28, null}
!30 = metadata !{i32 11, i32 0, metadata !0, null}
!31 = metadata !{i32 13, i32 0, metadata !32, null}
-!32 = metadata !{i32 786443, metadata !1, metadata !0, i32 12, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
+!32 = metadata !{i32 786443, metadata !46, metadata !0, i32 12, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
!33 = metadata !{i32 14, i32 0, metadata !32, null}
!34 = metadata !{i32 17, i32 0, metadata !10, null}
!35 = metadata !{i32 19, i32 0, metadata !25, null}
@@ -105,3 +106,5 @@ declare i32 @puts(i8* nocapture) nounwind
!44 = metadata !{metadata !19, metadata !20, metadata !21}
!45 = metadata !{metadata !22, metadata !23, metadata !24}
!46 = metadata !{metadata !"a.c", metadata !"/tmp/"}
+!47 = metadata !{i32 0}
+!48 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/debug-info-qreg.ll b/test/CodeGen/ARM/debug-info-qreg.ll
index 1de6ffaeec7d..ee515fd55c81 100644
--- a/test/CodeGen/ARM/debug-info-qreg.ll
+++ b/test/CodeGen/ARM/debug-info-qreg.ll
@@ -36,24 +36,25 @@ declare i32 @printf(i8* nocapture, ...) nounwind
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!56}
-!0 = metadata !{i32 786478, metadata !1, metadata !"test0001", metadata !"test0001", metadata !"", metadata !1, i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, <4 x float> (float)* @test0001, null, null, metadata !51, i32 3} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786478, metadata !54, metadata !1, metadata !"test0001", metadata !"test0001", metadata !"", i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, <4 x float> (float)* @test0001, null, null, metadata !51, i32 3} ; [ DW_TAG_subprogram ] [line 3] [def] [test0001]
!1 = metadata !{i32 786473, metadata !54} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !50, null, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, metadata !17, metadata !17, metadata !50, null, null, null} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786454, metadata !54, metadata !2, metadata !"v4f32", i32 14, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_typedef ]
-!6 = metadata !{i32 786691, metadata !2, metadata !"", metadata !2, i32 0, i64 128, i64 128, i32 0, i32 0, metadata !7, metadata !8, i32 0, i32 0} ; [ DW_TAG_vector_type ]
+!6 = metadata !{i32 786433, metadata !2, null, metadata !2, i32 0, i64 128, i64 128, i32 0, i32 0, metadata !7, metadata !8, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [from float]
!7 = metadata !{i32 786468, null, metadata !2, metadata !"float", i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
!8 = metadata !{metadata !9}
!9 = metadata !{i32 786465, i64 0, i64 4} ; [ DW_TAG_subrange_type ]
-!10 = metadata !{i32 786478, metadata !1, metadata !"main", metadata !"main", metadata !"", metadata !1, i32 59, metadata !11, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !52, i32 59} ; [ DW_TAG_subprogram ]
-!11 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !12, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!10 = metadata !{i32 786478, metadata !54, metadata !1, metadata !"main", metadata !"main", metadata !"", i32 59, metadata !11, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !52, i32 59} ; [ DW_TAG_subprogram ] [line 59] [def] [main]
+!11 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !12, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!12 = metadata !{metadata !13}
!13 = metadata !{i32 786468, null, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!14 = metadata !{i32 786478, metadata !15, metadata !"printFV", metadata !"printFV", metadata !"", metadata !15, i32 41, metadata !16, i1 true, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, null, null, null, metadata !53, i32 41} ; [ DW_TAG_subprogram ]
+!14 = metadata !{i32 786478, metadata !55, metadata !15, metadata !"printFV", metadata !"printFV", metadata !"", i32 41, metadata !16, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, metadata !53, i32 41} ; [ DW_TAG_subprogram ] [line 41] [local] [def] [printFV]
!15 = metadata !{i32 786473, metadata !55} ; [ DW_TAG_file_type ]
-!16 = metadata !{i32 786453, metadata !55, metadata !15, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !17, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!16 = metadata !{i32 786453, metadata !55, metadata !15, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !17, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!17 = metadata !{null}
!18 = metadata !{i32 786689, metadata !0, metadata !"a", metadata !1, i32 16777219, metadata !7, i32 0, null} ; [ DW_TAG_arg_variable ]
!19 = metadata !{i32 786689, metadata !10, metadata !"argc", metadata !1, i32 16777275, metadata !13, i32 0, null} ; [ DW_TAG_arg_variable ]
@@ -62,7 +63,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!22 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !23} ; [ DW_TAG_pointer_type ]
!23 = metadata !{i32 786468, null, metadata !2, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
!24 = metadata !{i32 786688, metadata !25, metadata !"i", metadata !1, i32 60, metadata !13, i32 0, null} ; [ DW_TAG_auto_variable ]
-!25 = metadata !{i32 786443, metadata !1, metadata !10, i32 59, i32 33, i32 14} ; [ DW_TAG_lexical_block ]
+!25 = metadata !{i32 786443, metadata !54, metadata !10, i32 59, i32 33, i32 14} ; [ DW_TAG_lexical_block ]
!26 = metadata !{i32 786688, metadata !25, metadata !"j", metadata !1, i32 60, metadata !13, i32 0, null} ; [ DW_TAG_auto_variable ]
!27 = metadata !{i32 786688, metadata !25, metadata !"x", metadata !1, i32 61, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
!28 = metadata !{i32 786688, metadata !25, metadata !"y", metadata !1, i32 62, metadata !5, i32 0, null} ; [ DW_TAG_auto_variable ]
@@ -70,21 +71,21 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!30 = metadata !{i32 786689, metadata !14, metadata !"F", metadata !15, i32 16777257, metadata !31, i32 0, null} ; [ DW_TAG_arg_variable ]
!31 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !32} ; [ DW_TAG_pointer_type ]
!32 = metadata !{i32 786454, metadata !55, metadata !2, metadata !"FV", i32 25, i64 0, i64 0, i64 0, i32 0, metadata !33} ; [ DW_TAG_typedef ]
-!33 = metadata !{i32 786455, metadata !55, metadata !2, metadata !"", i32 22, i64 128, i64 128, i64 0, i32 0, i32 0, metadata !34, i32 0, i32 0} ; [ DW_TAG_union_type ]
+!33 = metadata !{i32 786455, metadata !55, metadata !2, metadata !"", i32 22, i64 128, i64 128, i64 0, i32 0, i32 0, metadata !34, i32 0, null} ; [ DW_TAG_union_type ]
!34 = metadata !{metadata !35, metadata !37}
!35 = metadata !{i32 786445, metadata !55, metadata !15, metadata !"V", i32 23, i64 128, i64 128, i64 0, i32 0, metadata !36} ; [ DW_TAG_member ]
!36 = metadata !{i32 786454, metadata !55, metadata !2, metadata !"v4sf", i32 3, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_typedef ]
!37 = metadata !{i32 786445, metadata !55, metadata !15, metadata !"A", i32 24, i64 128, i64 32, i64 0, i32 0, metadata !38} ; [ DW_TAG_member ]
!38 = metadata !{i32 786433, null, metadata !2, metadata !"", i32 0, i64 128, i64 32, i32 0, i32 0, metadata !7, metadata !8, i32 0, i32 0} ; [ DW_TAG_array_type ]
!39 = metadata !{i32 79, i32 7, metadata !40, null}
-!40 = metadata !{i32 786443, metadata !1, metadata !41, i32 75, i32 35, i32 18} ; [ DW_TAG_lexical_block ]
-!41 = metadata !{i32 786443, metadata !1, metadata !42, i32 75, i32 5, i32 17} ; [ DW_TAG_lexical_block ]
-!42 = metadata !{i32 786443, metadata !1, metadata !43, i32 71, i32 32, i32 16} ; [ DW_TAG_lexical_block ]
-!43 = metadata !{i32 786443, metadata !1, metadata !25, i32 71, i32 3, i32 15} ; [ DW_TAG_lexical_block ]
+!40 = metadata !{i32 786443, metadata !54, metadata !41, i32 75, i32 35, i32 18} ; [ DW_TAG_lexical_block ]
+!41 = metadata !{i32 786443, metadata !54, metadata !42, i32 75, i32 5, i32 17} ; [ DW_TAG_lexical_block ]
+!42 = metadata !{i32 786443, metadata !54, metadata !43, i32 71, i32 32, i32 16} ; [ DW_TAG_lexical_block ]
+!43 = metadata !{i32 786443, metadata !54, metadata !25, i32 71, i32 3, i32 15} ; [ DW_TAG_lexical_block ]
!44 = metadata !{i32 75, i32 5, metadata !42, null}
!45 = metadata !{i32 42, i32 2, metadata !46, metadata !48}
-!46 = metadata !{i32 786443, metadata !15, metadata !47, i32 42, i32 2, i32 20} ; [ DW_TAG_lexical_block ]
-!47 = metadata !{i32 786443, metadata !15, metadata !14, i32 41, i32 28, i32 19} ; [ DW_TAG_lexical_block ]
+!46 = metadata !{i32 786443, metadata !55, metadata !47, i32 42, i32 2, i32 20} ; [ DW_TAG_lexical_block ]
+!47 = metadata !{i32 786443, metadata !55, metadata !14, i32 41, i32 28, i32 19} ; [ DW_TAG_lexical_block ]
!48 = metadata !{i32 95, i32 3, metadata !25, null}
!49 = metadata !{i32 99, i32 3, metadata !25, null}
!50 = metadata !{metadata !0, metadata !10, metadata !14}
@@ -93,3 +94,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!53 = metadata !{metadata !30}
!54 = metadata !{metadata !"build2.c", metadata !"/private/tmp"}
!55 = metadata !{metadata !"/Volumes/Lalgate/work/llvm/projects/llvm-test/SingleSource/UnitTests/Vector/helpers.h", metadata !"/private/tmp"}
+!56 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/debug-info-s16-reg.ll b/test/CodeGen/ARM/debug-info-s16-reg.ll
index 186894232eaf..e92d9776db8c 100644
--- a/test/CodeGen/ARM/debug-info-s16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-s16-reg.ll
@@ -62,42 +62,43 @@ declare i32 @puts(i8* nocapture) nounwind optsize
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!53}
-!0 = metadata !{i32 786478, metadata !1, metadata !"inlineprinter", metadata !"inlineprinter", metadata !"", metadata !1, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i8*, float, i8)* @inlineprinter, null, null, metadata !48, i32 5} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786478, metadata !51, metadata !1, metadata !"inlineprinter", metadata !"inlineprinter", metadata !"", i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, float, i8)* @inlineprinter, null, null, metadata !48, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [inlineprinter]
!1 = metadata !{i32 786473, metadata !51} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !47, null, null, null} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!2 = metadata !{i32 786449, metadata !51, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, metadata !52, metadata !52, metadata !47, null, null, null} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786453, metadata !51, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{metadata !5}
-!5 = metadata !{i32 786468, metadata !2, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 786478, metadata !1, metadata !"printer", metadata !"printer", metadata !"", metadata !1, i32 12, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i8*, float, i8)* @printer, null, null, metadata !49, i32 12} ; [ DW_TAG_subprogram ]
-!7 = metadata !{i32 786478, metadata !1, metadata !"main", metadata !"main", metadata !"", metadata !1, i32 18, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !50, i32 18} ; [ DW_TAG_subprogram ]
+!5 = metadata !{i32 786468, null, metadata !2, metadata !"int", i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 786478, metadata !51, metadata !1, metadata !"printer", metadata !"printer", metadata !"", i32 12, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, float, i8)* @printer, null, null, metadata !49, i32 12} ; [ DW_TAG_subprogram ] [line 12] [def] [printer]
+!7 = metadata !{i32 786478, metadata !51, metadata !1, metadata !"main", metadata !"main", metadata !"", i32 18, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i32, i8**)* @main, null, null, metadata !50, i32 18} ; [ DW_TAG_subprogram ] [line 18] [def] [main]
!8 = metadata !{i32 786689, metadata !0, metadata !"ptr", metadata !1, i32 16777220, metadata !9, i32 0, null} ; [ DW_TAG_arg_variable ]
-!9 = metadata !{i32 786447, metadata !2, metadata !"", null, i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
+!9 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
!10 = metadata !{i32 786689, metadata !0, metadata !"val", metadata !1, i32 33554436, metadata !11, i32 0, null} ; [ DW_TAG_arg_variable ]
-!11 = metadata !{i32 786468, metadata !2, metadata !"float", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
+!11 = metadata !{i32 786468, null, metadata !2, metadata !"float", i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
!12 = metadata !{i32 786689, metadata !0, metadata !"c", metadata !1, i32 50331652, metadata !13, i32 0, null} ; [ DW_TAG_arg_variable ]
-!13 = metadata !{i32 786468, metadata !2, metadata !"unsigned char", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
+!13 = metadata !{i32 786468, null, metadata !2, metadata !"unsigned char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 8} ; [ DW_TAG_base_type ]
!14 = metadata !{i32 786689, metadata !6, metadata !"ptr", metadata !1, i32 16777227, metadata !9, i32 0, null} ; [ DW_TAG_arg_variable ]
!15 = metadata !{i32 786689, metadata !6, metadata !"val", metadata !1, i32 33554443, metadata !11, i32 0, null} ; [ DW_TAG_arg_variable ]
!16 = metadata !{i32 786689, metadata !6, metadata !"c", metadata !1, i32 50331659, metadata !13, i32 0, null} ; [ DW_TAG_arg_variable ]
!17 = metadata !{i32 786689, metadata !7, metadata !"argc", metadata !1, i32 16777233, metadata !5, i32 0, null} ; [ DW_TAG_arg_variable ]
!18 = metadata !{i32 786689, metadata !7, metadata !"argv", metadata !1, i32 33554449, metadata !19, i32 0, null} ; [ DW_TAG_arg_variable ]
-!19 = metadata !{i32 786447, metadata !2, metadata !"", null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !20} ; [ DW_TAG_pointer_type ]
-!20 = metadata !{i32 786447, metadata !2, metadata !"", null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !21} ; [ DW_TAG_pointer_type ]
-!21 = metadata !{i32 786468, metadata !2, metadata !"char", null, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
+!19 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !20} ; [ DW_TAG_pointer_type ]
+!20 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !21} ; [ DW_TAG_pointer_type ]
+!21 = metadata !{i32 786468, null, metadata !2, metadata !"char", i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
!22 = metadata !{i32 786688, metadata !23, metadata !"dval", metadata !1, i32 19, metadata !11, i32 0, null} ; [ DW_TAG_auto_variable ]
-!23 = metadata !{i32 786443, metadata !1, metadata !7, i32 18, i32 1, i32 2} ; [ DW_TAG_lexical_block ]
+!23 = metadata !{i32 786443, metadata !51, metadata !7, i32 18, i32 1, i32 2} ; [ DW_TAG_lexical_block ]
!24 = metadata !{i32 4, i32 22, metadata !0, null}
!25 = metadata !{i32 4, i32 33, metadata !0, null}
!26 = metadata !{i32 4, i32 52, metadata !0, null}
!27 = metadata !{i32 6, i32 3, metadata !28, null}
-!28 = metadata !{i32 786443, metadata !1, metadata !0, i32 5, i32 1, i32 0} ; [ DW_TAG_lexical_block ]
+!28 = metadata !{i32 786443, metadata !51, metadata !0, i32 5, i32 1, i32 0} ; [ DW_TAG_lexical_block ]
!29 = metadata !{i32 7, i32 3, metadata !28, null}
!30 = metadata !{i32 11, i32 42, metadata !6, null}
!31 = metadata !{i32 11, i32 53, metadata !6, null}
!32 = metadata !{i32 11, i32 72, metadata !6, null}
!33 = metadata !{i32 13, i32 3, metadata !34, null}
-!34 = metadata !{i32 786443, metadata !1, metadata !6, i32 12, i32 1, i32 1} ; [ DW_TAG_lexical_block ]
+!34 = metadata !{i32 786443, metadata !51, metadata !6, i32 12, i32 1, i32 1} ; [ DW_TAG_lexical_block ]
!35 = metadata !{i32 14, i32 3, metadata !34, null}
!36 = metadata !{i32 17, i32 15, metadata !7, null}
!37 = metadata !{i32 17, i32 28, metadata !7, null}
@@ -115,3 +116,5 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!49 = metadata !{metadata !14, metadata !15, metadata !16}
!50 = metadata !{metadata !17, metadata !18, metadata !22}
!51 = metadata !{metadata !"a.c", metadata !"/private/tmp"}
+!52 = metadata !{i32 0}
+!53 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/debug-info-sreg2.ll b/test/CodeGen/ARM/debug-info-sreg2.ll
index ba83f797e2ce..854fcabbae87 100644
--- a/test/CodeGen/ARM/debug-info-sreg2.ll
+++ b/test/CodeGen/ARM/debug-info-sreg2.ll
@@ -40,18 +40,19 @@ declare float @_Z2f3f(float) optsize
declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!20}
-!0 = metadata !{i32 786449, i32 4, metadata !2, metadata !"clang version 3.0 (trunk 130845)", i1 true, metadata !"", i32 0, null, null, metadata !16, null, null, null} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"_Z3foov", metadata !2, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, void ()* @_Z3foov, null, null, metadata !17, i32 5} ; [ DW_TAG_subprogram ]
+!0 = metadata !{i32 786449, metadata !18, i32 4, metadata !"clang version 3.0 (trunk 130845)", i1 true, metadata !"", i32 0, metadata !19, metadata !19, metadata !16, null, null, null} ; [ DW_TAG_compile_unit ]
+!1 = metadata !{i32 786478, metadata !18, metadata !2, metadata !"foo", metadata !"foo", metadata !"_Z3foov", i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, void ()* @_Z3foov, null, null, metadata !17, i32 5} ; [ DW_TAG_subprogram ] [line 5] [def] [foo]
!2 = metadata !{i32 786473, metadata !18} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
+!3 = metadata !{i32 786453, metadata !18, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, null, metadata !4, i32 0, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ]
!4 = metadata !{null}
!5 = metadata !{i32 786688, metadata !6, metadata !"k", metadata !2, i32 6, metadata !7, i32 0, null} ; [ DW_TAG_auto_variable ]
-!6 = metadata !{i32 786443, metadata !2, metadata !1, i32 5, i32 12, i32 0} ; [ DW_TAG_lexical_block ]
-!7 = metadata !{i32 786468, metadata !0, metadata !"float", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
+!6 = metadata !{i32 786443, metadata !18, metadata !1, i32 5, i32 12, i32 0} ; [ DW_TAG_lexical_block ]
+!7 = metadata !{i32 786468, null, metadata !0, metadata !"float", i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
!8 = metadata !{i32 786688, metadata !9, metadata !"y", metadata !2, i32 8, metadata !7, i32 0, null} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{i32 786443, metadata !2, metadata !10, i32 7, i32 25, i32 2} ; [ DW_TAG_lexical_block ]
-!10 = metadata !{i32 786443, metadata !2, metadata !6, i32 7, i32 3, i32 1} ; [ DW_TAG_lexical_block ]
+!9 = metadata !{i32 786443, metadata !18, metadata !10, i32 7, i32 25, i32 2} ; [ DW_TAG_lexical_block ]
+!10 = metadata !{i32 786443, metadata !18, metadata !6, i32 7, i32 3, i32 1} ; [ DW_TAG_lexical_block ]
!11 = metadata !{i32 6, i32 18, metadata !6, null}
!12 = metadata !{i32 7, i32 3, metadata !6, null}
!13 = metadata !{i32 8, i32 20, metadata !9, null}
@@ -60,3 +61,5 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!16 = metadata !{metadata !1}
!17 = metadata !{metadata !5, metadata !8}
!18 = metadata !{metadata !"k.cc", metadata !"/private/tmp"}
+!19 = metadata !{i32 0}
+!20 = metadata !{i32 1, metadata !"Debug Info Version", i32 1}
diff --git a/test/CodeGen/ARM/div.ll b/test/CodeGen/ARM/div.ll
index 82cfca182b80..a339c816c578 100644
--- a/test/CodeGen/ARM/div.ll
+++ b/test/CodeGen/ARM/div.ll
@@ -1,13 +1,14 @@
; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK-ARM
-; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=swift | FileCheck %s -check-prefix=CHECK-SWIFT
+; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=swift | FileCheck %s -check-prefix=CHECK-HWDIV
+; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-r5 | FileCheck %s -check-prefix=CHECK-HWDIV
define i32 @f1(i32 %a, i32 %b) {
entry:
; CHECK-ARM: f1
; CHECK-ARM: __divsi3
-; CHECK-SWIFT: f1
-; CHECK-SWIFT: sdiv
+; CHECK-HWDIV: f1
+; CHECK-HWDIV: sdiv
%tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -17,8 +18,8 @@ entry:
; CHECK-ARM: f2
; CHECK-ARM: __udivsi3
-; CHECK-SWIFT: f2
-; CHECK-SWIFT: udiv
+; CHECK-HWDIV: f2
+; CHECK-HWDIV: udiv
%tmp1 = udiv i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -28,9 +29,9 @@ entry:
; CHECK-ARM: f3
; CHECK-ARM: __modsi3
-; CHECK-SWIFT: f3
-; CHECK-SWIFT: sdiv
-; CHECK-SWIFT: mls
+; CHECK-HWDIV: f3
+; CHECK-HWDIV: sdiv
+; CHECK-HWDIV: mls
%tmp1 = srem i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -40,9 +41,9 @@ entry:
; CHECK-ARM: f4
; CHECK-ARM: __umodsi3
-; CHECK-SWIFT: f4
-; CHECK-SWIFT: udiv
-; CHECK-SWIFT: mls
+; CHECK-HWDIV: f4
+; CHECK-HWDIV: udiv
+; CHECK-HWDIV: mls
%tmp1 = urem i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp1
}
diff --git a/test/CodeGen/ARM/divmod-eabi.ll b/test/CodeGen/ARM/divmod-eabi.ll
new file mode 100644
index 000000000000..404cae0da2b2
--- /dev/null
+++ b/test/CodeGen/ARM/divmod-eabi.ll
@@ -0,0 +1,202 @@
+; RUN: llc -mtriple armv7-none-eabi %s -o - | FileCheck %s --check-prefix=EABI
+; RUN: llc -mtriple armv7-linux-gnueabi %s -o - | FileCheck %s --check-prefix=GNU
+; RUN: llc -mtriple armv7-apple-darwin %s -o - | FileCheck %s --check-prefix=DARWIN
+
+define signext i16 @f16(i16 signext %a, i16 signext %b) {
+; EABI-LABEL: f16:
+; GNU-LABEL: f16:
+; DARWIN-LABEL: f16:
+entry:
+ %conv = sext i16 %a to i32
+ %conv1 = sext i16 %b to i32
+ %div = sdiv i32 %conv, %conv1
+ %rem = srem i32 %conv, %conv1
+; EABI: __aeabi_idivmod
+; EABI: mov [[div:r[0-9]+]], r0
+; EABI: mov [[rem:r[0-9]+]], r1
+; GNU: __aeabi_idiv
+; GNU: mov [[sum:r[0-9]+]], r0
+; GNU: __modsi3
+; GNU: add [[sum]]{{.*}}r0
+; DARWIN: ___divsi3
+; DARWIN: mov [[sum:r[0-9]+]], r0
+; DARWIN: __modsi3
+; DARWIN: add [[sum]]{{.*}}r0
+ %rem8 = srem i32 %conv1, %conv
+; EABI: __aeabi_idivmod
+; GNU: __modsi3
+; DARWIN: __modsi3
+ %add = add nsw i32 %rem, %div
+ %add13 = add nsw i32 %add, %rem8
+ %conv14 = trunc i32 %add13 to i16
+; EABI: add r0{{.*}}r1
+; EABI: sxth r0, r0
+; GNU: add r0{{.*}}[[sum]]
+; GNU: sxth r0, r0
+; DARWIN: add r0{{.*}}[[sum]]
+; DARWIN: sxth r0, r0
+ ret i16 %conv14
+}
+
+define i32 @f32(i32 %a, i32 %b) {
+; EABI-LABEL: f32:
+; GNU-LABEL: f32:
+; DARWIN-LABEL: f32:
+entry:
+ %div = sdiv i32 %a, %b
+ %rem = srem i32 %a, %b
+; EABI: __aeabi_idivmod
+; EABI: mov [[div:r[0-9]+]], r0
+; EABI: mov [[rem:r[0-9]+]], r1
+; GNU: __aeabi_idiv
+; GNU: mov [[sum:r[0-9]+]], r0
+; GNU: __modsi3
+; GNU: add [[sum]]{{.*}}r0
+; DARWIN: ___divsi3
+; DARWIN: mov [[sum:r[0-9]+]], r0
+; DARWIN: __modsi3
+; DARWIN: add [[sum]]{{.*}}r0
+ %rem1 = srem i32 %b, %a
+; EABI: __aeabi_idivmod
+; GNU: __modsi3
+; DARWIN: __modsi3
+ %add = add nsw i32 %rem, %div
+ %add2 = add nsw i32 %add, %rem1
+; EABI: add r0{{.*}}r1
+; GNU: add r0{{.*}}[[sum]]
+; DARWIN: add r0{{.*}}[[sum]]
+ ret i32 %add2
+}
+
+define i32 @uf(i32 %a, i32 %b) {
+; EABI-LABEL: uf:
+; GNU-LABEL: uf:
+; DARWIN-LABEL: uf:
+entry:
+ %div = udiv i32 %a, %b
+ %rem = urem i32 %a, %b
+; EABI: __aeabi_uidivmod
+; GNU: __aeabi_uidiv
+; GNU: mov [[sum:r[0-9]+]], r0
+; GNU: __umodsi3
+; GNU: add [[sum]]{{.*}}r0
+; DARWIN: ___udivsi3
+; DARWIN: mov [[sum:r[0-9]+]], r0
+; DARWIN: __umodsi3
+; DARWIN: add [[sum]]{{.*}}r0
+ %rem1 = urem i32 %b, %a
+; EABI: __aeabi_uidivmod
+; GNU: __umodsi3
+; DARWIN: __umodsi3
+ %add = add nuw i32 %rem, %div
+ %add2 = add nuw i32 %add, %rem1
+; EABI: add r0{{.*}}r1
+; GNU: add r0{{.*}}[[sum]]
+; DARWIN: add r0{{.*}}[[sum]]
+ ret i32 %add2
+}
+
+; FIXME: AEABI is not lowering long u/srem into u/ldivmod
+define i64 @longf(i64 %a, i64 %b) {
+; EABI-LABEL: longf:
+; GNU-LABEL: longf:
+; DARWIN-LABEL: longf:
+entry:
+ %div = sdiv i64 %a, %b
+ %rem = srem i64 %a, %b
+; EABI: __aeabi_ldivmod
+; GNU: __aeabi_ldivmod
+; GNU: mov [[div1:r[0-9]+]], r0
+; GNU: mov [[div2:r[0-9]+]], r1
+; DARWIN: ___divdi3
+; DARWIN: mov [[div1:r[0-9]+]], r0
+; DARWIN: mov [[div2:r[0-9]+]], r1
+; DARWIN: __moddi3
+ %add = add nsw i64 %rem, %div
+; GNU: adds r0{{.*}}[[div1]]
+; GNU: adc r1{{.*}}[[div2]]
+; DARWIN: adds r0{{.*}}[[div1]]
+; DARWIN: adc r1{{.*}}[[div2]]
+ ret i64 %add
+}
+
+define i32 @g1(i32 %a, i32 %b) {
+; EABI-LABEL: g1:
+; GNU-LABEL: g1:
+; DARWIN-LABEL: g1:
+entry:
+ %div = sdiv i32 %a, %b
+ %rem = srem i32 %a, %b
+; EABI: __aeabi_idivmod
+; GNU: __aeabi_idiv
+; GNU: mov [[sum:r[0-9]+]], r0
+; GNU: __modsi3
+; DARWIN: ___divsi3
+; DARWIN: mov [[sum:r[0-9]+]], r0
+; DARWIN: __modsi3
+ %add = add nsw i32 %rem, %div
+; EABI: add r0{{.*}}r1
+; GNU: add r0{{.*}}[[sum]]
+; DARWIN: add r0{{.*}}[[sum]]
+ ret i32 %add
+}
+
+; On both Darwin and Gnu, this is just a call to __modsi3
+define i32 @g2(i32 %a, i32 %b) {
+; EABI-LABEL: g2:
+; GNU-LABEL: g2:
+; DARWIN-LABEL: g2:
+entry:
+ %rem = srem i32 %a, %b
+; EABI: __aeabi_idivmod
+; GNU: __modsi3
+; DARWIN: __modsi3
+ ret i32 %rem
+; EABI: mov r0, r1
+}
+
+define i32 @g3(i32 %a, i32 %b) {
+; EABI-LABEL: g3:
+; GNU-LABEL: g3:
+; DARWIN-LABEL: g3:
+entry:
+ %rem = srem i32 %a, %b
+; EABI: __aeabi_idivmod
+; EABI: mov [[mod:r[0-9]+]], r1
+; GNU: __modsi3
+; GNU: mov [[sum:r[0-9]+]], r0
+; DARWIN: __modsi3
+; DARWIN: mov [[sum:r[0-9]+]], r0
+ %rem1 = srem i32 %b, %rem
+; EABI: __aeabi_idivmod
+; GNU: __modsi3
+; DARWIN: __modsi3
+ %add = add nsw i32 %rem1, %rem
+; EABI: add r0, r1, [[mod]]
+; GNU: add r0{{.*}}[[sum]]
+; DARWIN: add r0{{.*}}[[sum]]
+ ret i32 %add
+}
+
+define i32 @g4(i32 %a, i32 %b) {
+; EABI-LABEL: g4:
+; GNU-LABEL: g4:
+; DARWIN-LABEL: g4:
+entry:
+ %div = sdiv i32 %a, %b
+; EABI: __aeabi_idivmod
+; EABI: mov [[div:r[0-9]+]], r0
+; GNU __aeabi_idiv
+; GNU: mov [[sum:r[0-9]+]], r0
+; DARWIN: ___divsi3
+; DARWIN: mov [[sum:r[0-9]+]], r0
+ %rem = srem i32 %b, %div
+; EABI: __aeabi_idivmod
+; GNU: __modsi3
+; DARWIN: __modsi3
+ %add = add nsw i32 %rem, %div
+; EABI: add r0, r1, [[div]]
+; GNU: add r0{{.*}}[[sum]]
+; DARWIN: add r0{{.*}}[[sum]]
+ ret i32 %add
+}
diff --git a/test/CodeGen/ARM/divmod.ll b/test/CodeGen/ARM/divmod.ll
index 577f8aa7d39b..7be0c796bd21 100644
--- a/test/CodeGen/ARM/divmod.ll
+++ b/test/CodeGen/ARM/divmod.ll
@@ -5,11 +5,11 @@
define void @foo(i32 %x, i32 %y, i32* nocapture %P) nounwind ssp {
entry:
-; A8: foo:
+; A8-LABEL: foo:
; A8: bl ___divmodsi4
; A8-NOT: bl ___divmodsi4
-; SWIFT: foo:
+; SWIFT-LABEL: foo:
; SWIFT: sdiv
; SWIFT: mls
; SWIFT-NOT: bl __divmodsi4
@@ -23,11 +23,11 @@ entry:
define void @bar(i32 %x, i32 %y, i32* nocapture %P) nounwind ssp {
entry:
-; A8: bar:
+; A8-LABEL: bar:
; A8: bl ___udivmodsi4
; A8-NOT: bl ___udivmodsi4
-; SWIFT: bar:
+; SWIFT-LABEL: bar:
; SWIFT: udiv
; SWIFT: mls
; SWIFT-NOT: bl __udivmodsi4
@@ -45,8 +45,8 @@ entry:
define void @do_indent(i32 %cols) nounwind {
entry:
-; A8: do_indent:
-; SWIFT: do_indent:
+; A8-LABEL: do_indent:
+; SWIFT-LABEL: do_indent:
%0 = load i32* @flags, align 4
%1 = and i32 %0, 67108864
%2 = icmp eq i32 %1, 0
@@ -60,7 +60,7 @@ bb:
%3 = load i32* @tabsize, align 4
%4 = srem i32 %cols, %3
%5 = sdiv i32 %cols, %3
- %6 = tail call i32 @llvm.objectsize.i32(i8* null, i1 false)
+ %6 = tail call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 false)
%7 = tail call i8* @__memset_chk(i8* null, i32 9, i32 %5, i32 %6) nounwind
br label %bb1
@@ -71,17 +71,17 @@ bb1:
ret void
}
-declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readnone
+declare i32 @llvm.objectsize.i32.p0i8(i8*, i1) nounwind readnone
declare i8* @__memset_chk(i8*, i32, i32, i32) nounwind
; rdar://11714607
define i32 @howmany(i32 %x, i32 %y) nounwind {
entry:
-; A8: howmany:
+; A8-LABEL: howmany:
; A8: bl ___udivmodsi4
; A8-NOT: ___udivsi3
-; SWIFT: howmany:
+; SWIFT-LABEL: howmany:
; SWIFT: udiv
; SWIFT: mls
; SWIFT-NOT: bl __udivmodsi4
diff --git a/test/CodeGen/ARM/domain-conv-vmovs.ll b/test/CodeGen/ARM/domain-conv-vmovs.ll
index b5586cc99fc1..d6528db741cd 100644
--- a/test/CodeGen/ARM/domain-conv-vmovs.ll
+++ b/test/CodeGen/ARM/domain-conv-vmovs.ll
@@ -1,7 +1,7 @@
; RUN: llc -verify-machineinstrs -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a9 -mattr=+neon,+neonfp -float-abi=hard < %s | FileCheck %s
define <2 x float> @test_vmovs_via_vext_lane0to0(float %arg, <2 x float> %in) {
-; CHECK: test_vmovs_via_vext_lane0to0:
+; CHECK-LABEL: test_vmovs_via_vext_lane0to0:
%vec = insertelement <2 x float> %in, float %arg, i32 0
%res = fadd <2 x float> %vec, %vec
@@ -13,7 +13,7 @@ define <2 x float> @test_vmovs_via_vext_lane0to0(float %arg, <2 x float> %in) {
}
define <2 x float> @test_vmovs_via_vext_lane0to1(float %arg, <2 x float> %in) {
-; CHECK: test_vmovs_via_vext_lane0to1:
+; CHECK-LABEL: test_vmovs_via_vext_lane0to1:
%vec = insertelement <2 x float> %in, float %arg, i32 1
%res = fadd <2 x float> %vec, %vec
@@ -25,7 +25,7 @@ define <2 x float> @test_vmovs_via_vext_lane0to1(float %arg, <2 x float> %in) {
}
define <2 x float> @test_vmovs_via_vext_lane1to0(float, float %arg, <2 x float> %in) {
-; CHECK: test_vmovs_via_vext_lane1to0:
+; CHECK-LABEL: test_vmovs_via_vext_lane1to0:
%vec = insertelement <2 x float> %in, float %arg, i32 0
%res = fadd <2 x float> %vec, %vec
@@ -37,7 +37,7 @@ define <2 x float> @test_vmovs_via_vext_lane1to0(float, float %arg, <2 x float>
}
define <2 x float> @test_vmovs_via_vext_lane1to1(float, float %arg, <2 x float> %in) {
-; CHECK: test_vmovs_via_vext_lane1to1:
+; CHECK-LABEL: test_vmovs_via_vext_lane1to1:
%vec = insertelement <2 x float> %in, float %arg, i32 1
%res = fadd <2 x float> %vec, %vec
@@ -50,7 +50,7 @@ define <2 x float> @test_vmovs_via_vext_lane1to1(float, float %arg, <2 x float>
define float @test_vmovs_via_vdup(float, float %ret, float %lhs, float %rhs) {
-; CHECK: test_vmovs_via_vdup:
+; CHECK-LABEL: test_vmovs_via_vdup:
; Do an operation (which will end up NEON because of +neonfp) to convince the
; execution-domain pass that NEON is a good thing to use.
@@ -68,7 +68,7 @@ declare void @bar()
; This is a comp
define float @test_ineligible(float, float %in) {
-; CHECK: test_ineligible:
+; CHECK-LABEL: test_ineligible:
%sqrt = call float @llvm.sqrt.f32(float %in)
%val = fadd float %sqrt, %sqrt
@@ -85,7 +85,7 @@ define float @test_ineligible(float, float %in) {
}
define i32 @test_vmovs_no_sreg(i32 %in) {
-; CHECK: test_vmovs_no_sreg:
+; CHECK-LABEL: test_vmovs_no_sreg:
; Check that the movement to and from GPRs takes place in the NEON domain.
; CHECK: vmov.32 d
diff --git a/test/CodeGen/ARM/eh-dispcont.ll b/test/CodeGen/ARM/eh-dispcont.ll
index 935965bbdf8b..57ab15feca5e 100644
--- a/test/CodeGen/ARM/eh-dispcont.ll
+++ b/test/CodeGen/ARM/eh-dispcont.ll
@@ -65,10 +65,10 @@ attributes #2 = { noreturn }
; THUMB1-PIC: cxa_throw
; THUMB1-PIC: trap
-; THUMB1-PIC: adr [[REG0:r[0-9]+]], [[LJTI:.*]]
-; THUMB1-PIC: adds [[REG1:r[0-9]+]], [[REG1]], [[REG0]]
-; THUMB1-PIC: ldr [[REG1]]
-; THUMB1-PIC: adds [[REG0]], [[REG1]], [[REG0]]
+; THUMB1-PIC: adr [[REG1:r[0-9]+]], [[LJTI:.*]]
+; THUMB1-PIC: adds [[REG0:r[0-9]+]], [[REG0]], [[REG1]]
+; THUMB1-PIC: ldr [[REG0]]
+; THUMB1-PIC: adds [[REG0]], [[REG0]], [[REG1]]
; THUMB1-PIC: mov pc, [[REG0]]
; THUMB1-PIC: [[LJTI]]
; THUMB1-PIC: .data_region jt32
diff --git a/test/CodeGen/ARM/ehabi-filters.ll b/test/CodeGen/ARM/ehabi-filters.ll
index 4c92a2975d39..cb5291b20e62 100644
--- a/test/CodeGen/ARM/ehabi-filters.ll
+++ b/test/CodeGen/ARM/ehabi-filters.ll
@@ -15,7 +15,7 @@ declare void @__cxa_throw(i8*, i8*, i8*)
declare void @__cxa_call_unexpected(i8*)
define i32 @main() {
-; CHECK: main:
+; CHECK-LABEL: main:
entry:
%exception.i = tail call i8* @__cxa_allocate_exception(i32 4) nounwind
%0 = bitcast i8* %exception.i to i32*
diff --git a/test/CodeGen/ARM/ehabi-mc-cantunwind.ll b/test/CodeGen/ARM/ehabi-mc-cantunwind.ll
deleted file mode 100644
index 698d76e56580..000000000000
--- a/test/CodeGen/ARM/ehabi-mc-cantunwind.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s
-
-define void @test() nounwind {
-entry:
- ret void
-}
-
-; CHECK: section .text
-; CHECK: section .ARM.exidx
-; CHECK-NEXT: 0000 00000000 01000000
diff --git a/test/CodeGen/ARM/ehabi-mc-compact-pr0.ll b/test/CodeGen/ARM/ehabi-mc-compact-pr0.ll
deleted file mode 100644
index 11f3e6db0fe5..000000000000
--- a/test/CodeGen/ARM/ehabi-mc-compact-pr0.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -disable-fp-elim -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -disable-fp-elim -filetype=obj -o - %s \
-; RUN: | llvm-objdump -r - \
-; RUN: | FileCheck %s --check-prefix=CHECK-RELOC
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -r - \
-; RUN: | FileCheck %s --check-prefix=CHECK-RELOC
-
-define void @_Z4testv() {
-entry:
- tail call void @_Z15throw_exceptionv()
- ret void
-}
-
-declare void @_Z15throw_exceptionv()
-
-; CHECK-NOT: section .ARM.extab
-; CHECK: section .text
-; CHECK-NOT: section .ARM.extab
-; CHECK: section .ARM.exidx
-; CHECK-NEXT: 0000 00000000 80849b80
-; CHECK-NOT: section .ARM.extab
-
-; CHECK-FP-ELIM-NOT: section .ARM.extab
-; CHECK-FP-ELIM: section .text
-; CHECK-FP-ELIM-NOT: section .ARM.extab
-; CHECK-FP-ELIM: section .ARM.exidx
-; CHECK-FP-ELIM-NEXT: 0000 00000000 b0808480
-; CHECK-FP-ELIM-NOT: section .ARM.extab
-
-; CHECK-RELOC: RELOCATION RECORDS FOR [.ARM.exidx]
-; CHECK-RELOC-NEXT: 0 R_ARM_PREL31 .text
-; CHECK-RELOC-NEXT: 0 R_ARM_NONE __aeabi_unwind_cpp_pr0
diff --git a/test/CodeGen/ARM/ehabi-mc-compact-pr1.ll b/test/CodeGen/ARM/ehabi-mc-compact-pr1.ll
deleted file mode 100644
index 79dba084c044..000000000000
--- a/test/CodeGen/ARM/ehabi-mc-compact-pr1.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -disable-fp-elim -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -disable-fp-elim -filetype=obj -o - %s \
-; RUN: | llvm-objdump -r - \
-; RUN: | FileCheck %s --check-prefix=CHECK-RELOC
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -r - \
-; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM-RELOC
-
-define i32 @_Z3addiiiiiiii(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
-entry:
- %add = add nsw i32 %b, %a
- %add1 = add nsw i32 %add, %c
- %add2 = add nsw i32 %add1, %d
- tail call void @_Z15throw_exceptioni(i32 %add2)
- %add3 = add nsw i32 %f, %e
- %add4 = add nsw i32 %add3, %g
- %add5 = add nsw i32 %add4, %h
- tail call void @_Z15throw_exceptioni(i32 %add5)
- %add6 = add nsw i32 %add5, %add2
- ret i32 %add6
-}
-
-declare void @_Z15throw_exceptioni(i32)
-
-; CHECK-NOT: section .ARM.extab
-; CHECK: section .text
-; CHECK: section .ARM.extab
-; CHECK-NEXT: 0000 419b0181 b0b08384
-; CHECK: section .ARM.exidx
-; CHECK-NEXT: 0000 00000000 00000000
-; CHECK-NOT: section .ARM.extab
-
-; CHECK-FP-ELIM-NOT: section .ARM.extab
-; CHECK-FP-ELIM: section .text
-; CHECK-FP-ELIM-NOT: section .ARM.extab
-; CHECK-FP-ELIM: section .ARM.exidx
-; CHECK-FP-ELIM-NEXT: 0000 00000000 b0838480
-; CHECK-FP-ELIM-NOT: section .ARM.extab
-
-; CHECK-RELOC: RELOCATION RECORDS FOR [.ARM.exidx]
-; CHECK-RELOC-NEXT: 0 R_ARM_PREL31 .text
-; CHECK-RELOC-NEXT: 0 R_ARM_NONE __aeabi_unwind_cpp_pr1
-
-; CHECK-FP-ELIM-RELOC: RELOCATION RECORDS FOR [.ARM.exidx]
-; CHECK-FP-ELIM-RELOC-NEXT: 0 R_ARM_PREL31 .text
-; CHECK-FP-ELIM-RELOC-NEXT: 0 R_ARM_NONE __aeabi_unwind_cpp_pr0
diff --git a/test/CodeGen/ARM/ehabi-mc-section-group.ll b/test/CodeGen/ARM/ehabi-mc-section-group.ll
deleted file mode 100644
index 616aa1ba46e7..000000000000
--- a/test/CodeGen/ARM/ehabi-mc-section-group.ll
+++ /dev/null
@@ -1,88 +0,0 @@
-; Test section group of the function with linkonce_odr
-
-; The instantiation of C++ function template will come with linkonce_odr,
-; which indicates that the linker can remove the duplicated instantiation.
-; However, to make this feature work, we have to group the section properly.
-; .text, .ARM.extab, and .ARM.exidx should be grouped together.
-
-; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-readobj -s -sd \
-; RUN: | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
-target triple = "armv4t--linux-gnueabi"
-
-define void @_Z11instantiatev() {
-entry:
- tail call void @_Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_(i32 1, i32 2, i32 3, i32 4, i32 5, double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01)
- ret void
-}
-
-define linkonce_odr void @_Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5, double %v1, double %v2, double %v3, double %v4, double %v5) {
-entry:
- invoke void @_Z5printiiiii(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5)
- to label %try.cont unwind label %lpad
-
-lpad: ; preds = %entry
- %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* null
- %1 = extractvalue { i8*, i32 } %0, 0
- %2 = tail call i8* @__cxa_begin_catch(i8* %1) nounwind
- invoke void @_Z5printddddd(double %v1, double %v2, double %v3, double %v4, double %v5)
- to label %invoke.cont2 unwind label %lpad1
-
-invoke.cont2: ; preds = %lpad
- tail call void @__cxa_end_catch()
- br label %try.cont
-
-try.cont: ; preds = %entry, %invoke.cont2
- ret void
-
-lpad1: ; preds = %lpad
- %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- cleanup
- invoke void @__cxa_end_catch()
- to label %eh.resume unwind label %terminate.lpad
-
-eh.resume: ; preds = %lpad1
- resume { i8*, i32 } %3
-
-terminate.lpad: ; preds = %lpad1
- %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* null
- tail call void @_ZSt9terminatev() noreturn nounwind
- unreachable
-}
-
-declare void @_Z5printiiiii(i32, i32, i32, i32, i32)
-
-declare i32 @__gxx_personality_v0(...)
-
-declare i8* @__cxa_begin_catch(i8*)
-
-declare void @_Z5printddddd(double, double, double, double, double)
-
-declare void @__cxa_end_catch()
-
-declare void @_ZSt9terminatev()
-
-; CHECK: Section {
-; CHECK: Index: 1
-; CHECK-NEXT: Name: .group (47)
-; CHECK: SectionData (
-; CHECK-NEXT: 0000: 01000000 09000000 0B000000 0D000000
-; CHECK-NEXT: )
-
-; CHECK: Section {
-; CHECK: Index: 9
-; CHECK-NEXT: Name: .text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_ (214)
-
-; CHECK: Section {
-; CHECK: Index: 11
-; CHECK-NEXT: Name: .ARM.extab.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_ (204)
-
-; CHECK: Section {
-; CHECK: Index: 13
-; CHECK-NEXT: Name: .ARM.exidx.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_ (90)
diff --git a/test/CodeGen/ARM/ehabi-mc-section.ll b/test/CodeGen/ARM/ehabi-mc-section.ll
deleted file mode 100644
index 4e6e46829148..000000000000
--- a/test/CodeGen/ARM/ehabi-mc-section.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -disable-fp-elim -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
-
-define void @_Z4testiiiiiddddd(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5, double %v1, double %v2, double %v3, double %v4, double %v5) section ".test_section" {
-entry:
- invoke void @_Z5printiiiii(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5)
- to label %try.cont unwind label %lpad
-
-lpad: ; preds = %entry
- %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* null
- %1 = extractvalue { i8*, i32 } %0, 0
- %2 = tail call i8* @__cxa_begin_catch(i8* %1) nounwind
- invoke void @_Z5printddddd(double %v1, double %v2, double %v3, double %v4, double %v5)
- to label %invoke.cont2 unwind label %lpad1
-
-invoke.cont2: ; preds = %lpad
- tail call void @__cxa_end_catch()
- br label %try.cont
-
-try.cont: ; preds = %entry, %invoke.cont2
- ret void
-
-lpad1: ; preds = %lpad
- %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- cleanup
- invoke void @__cxa_end_catch()
- to label %eh.resume unwind label %terminate.lpad
-
-eh.resume: ; preds = %lpad1
- resume { i8*, i32 } %3
-
-terminate.lpad: ; preds = %lpad1
- %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* null
- tail call void @_ZSt9terminatev() noreturn nounwind
- unreachable
-}
-
-declare void @_Z5printiiiii(i32, i32, i32, i32, i32)
-
-declare i32 @__gxx_personality_v0(...)
-
-declare i8* @__cxa_begin_catch(i8*)
-
-declare void @_Z5printddddd(double, double, double, double, double)
-
-declare void @__cxa_end_catch()
-
-declare void @_ZSt9terminatev()
-
-; CHECK: section .test_section
-; CHECK: section .ARM.extab.test_section
-; CHECK-NEXT: 0000 00000000 c9409b01 b0818484
-; CHECK: section .ARM.exidx.test_section
-; CHECK-NEXT: 0000 00000000 00000000
-
-; CHECK-FP-ELIM: section .test_section
-; CHECK-FP-ELIM: section .ARM.extab.test_section
-; CHECK-FP-ELIM-NEXT: 0000 00000000 84c90501 b0b0b0a8
-; CHECK-FP-ELIM: section .ARM.exidx.test_section
-; CHECK-FP-ELIM-NEXT: 0000 00000000 00000000
diff --git a/test/CodeGen/ARM/ehabi-mc-sh_link.ll b/test/CodeGen/ARM/ehabi-mc-sh_link.ll
deleted file mode 100644
index ac0a0fc9309a..000000000000
--- a/test/CodeGen/ARM/ehabi-mc-sh_link.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; Test the sh_link in Elf32_Shdr.
-
-; The .ARM.exidx section should be linked with corresponding text section.
-; The sh_link in Elf32_Shdr should be filled with the section index of
-; the text section.
-
-; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-readobj -s \
-; RUN: | FileCheck %s
-
-define void @test1() nounwind {
-entry:
- ret void
-}
-
-define void @test2() nounwind section ".test_section" {
-entry:
- ret void
-}
-
-; CHECK: Sections [
-; CHECK: Section {
-; CHECK: Index: 1
-; CHECK-NEXT: Name: .text (16)
-
-; CHECK: Section {
-; CHECK: Name: .ARM.exidx (5)
-; CHECK-NEXT: Type: SHT_ARM_EXIDX
-; CHECK-NEXT: Flags [ (0x82)
-; CHECK-NEXT: SHF_ALLOC
-; CHECK-NEXT: SHF_LINK_ORDER
-; CHECK-NEXT: ]
-; CHECK-NEXT: Address: 0x0
-; CHECK-NEXT: Offset: 0x5C
-; CHECK-NEXT: Size: 8
-; CHECK-NEXT: Link: 1
-; CHECK-NEXT: Info: 0
-; CHECK-NEXT: AddressAlignment: 4
-
-; CHECK: Section {
-; CHECK: Index: 7
-; CHECK-NEXT: Name: .test_section (57)
-
-; CHECK: Section {
-; CHECK: Name: .ARM.exidx.test_section (47)
-; CHECK-NEXT: Type: SHT_ARM_EXIDX
-; CHECK-NEXT: Flags [ (0x82)
-; CHECK-NEXT: SHF_ALLOC
-; CHECK-NEXT: SHF_LINK_ORDER
-; CHECK-NEXT: ]
-; CHECK-NEXT: Address: 0x0
-; CHECK-NEXT: Offset: 0x68
-; CHECK-NEXT: Size: 8
-; CHECK-NEXT: Link: 7
-; CHECK-NEXT: Info: 0
-; CHECK-NEXT: AddressAlignment: 4
diff --git a/test/CodeGen/ARM/ehabi-mc.ll b/test/CodeGen/ARM/ehabi-mc.ll
deleted file mode 100644
index 83b8425af7c4..000000000000
--- a/test/CodeGen/ARM/ehabi-mc.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -disable-fp-elim -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK
-
-; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
-; RUN: -filetype=obj -o - %s \
-; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
-
-define void @_Z4testiiiiiddddd(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5, double %v1, double %v2, double %v3, double %v4, double %v5) {
-entry:
- invoke void @_Z5printiiiii(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5)
- to label %try.cont unwind label %lpad
-
-lpad: ; preds = %entry
- %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* null
- %1 = extractvalue { i8*, i32 } %0, 0
- %2 = tail call i8* @__cxa_begin_catch(i8* %1) nounwind
- invoke void @_Z5printddddd(double %v1, double %v2, double %v3, double %v4, double %v5)
- to label %invoke.cont2 unwind label %lpad1
-
-invoke.cont2: ; preds = %lpad
- tail call void @__cxa_end_catch()
- br label %try.cont
-
-try.cont: ; preds = %entry, %invoke.cont2
- ret void
-
-lpad1: ; preds = %lpad
- %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- cleanup
- invoke void @__cxa_end_catch()
- to label %eh.resume unwind label %terminate.lpad
-
-eh.resume: ; preds = %lpad1
- resume { i8*, i32 } %3
-
-terminate.lpad: ; preds = %lpad1
- %4 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
- catch i8* null
- tail call void @_ZSt9terminatev() noreturn nounwind
- unreachable
-}
-
-declare void @_Z5printiiiii(i32, i32, i32, i32, i32)
-
-declare i32 @__gxx_personality_v0(...)
-
-declare i8* @__cxa_begin_catch(i8*)
-
-declare void @_Z5printddddd(double, double, double, double, double)
-
-declare void @__cxa_end_catch()
-
-declare void @_ZSt9terminatev()
-
-; CHECK: section .text
-; CHECK: section .ARM.extab
-; CHECK-NEXT: 0000 00000000 c9409b01 b0818484
-; CHECK: section .ARM.exidx
-; CHECK-NEXT: 0000 00000000 00000000
-
-; CHECK-FP-ELIM: section .text
-; CHECK-FP-ELIM: section .ARM.extab
-; CHECK-FP-ELIM-NEXT: 0000 00000000 84c90501 b0b0b0a8
-; CHECK-FP-ELIM: section .ARM.exidx
-; CHECK-FP-ELIM-NEXT: 0000 00000000 00000000
diff --git a/test/CodeGen/ARM/ehabi.ll b/test/CodeGen/ARM/ehabi.ll
new file mode 100644
index 000000000000..66446528c31a
--- /dev/null
+++ b/test/CodeGen/ARM/ehabi.ll
@@ -0,0 +1,298 @@
+; ARM EHABI integrated test
+
+; This test case checks whether the ARM unwind directives are properly
+; generated or not.
+
+; The purpose of the test:
+; (1) .fnstart and .fnend directives should wrap the function.
+; (2) .setfp directive should be available if frame pointer is not eliminated.
+; (3) .save directive should come with push instruction.
+; (4) .vsave directive should come with vpush instruction.
+; (5) .pad directive should come with stack pointer adjustment.
+; (6) .cantunwind directive should be available if the function is marked with
+; nounwind function attribute.
+
+; We have to check several cases:
+; (1) arm with -disable-fp-elim
+; (2) arm without -disable-fp-elim
+; (3) armv7 with -disable-fp-elim
+; (4) armv7 without -disable-fp-elim
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP
+
+; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -filetype=asm -o - %s \
+; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
+
+;-------------------------------------------------------------------------------
+; Test 1
+;-------------------------------------------------------------------------------
+; This is the LLVM assembly generated from following C++ code:
+;
+; extern void print(int, int, int, int, int);
+; extern void print(double, double, double, double, double);
+;
+; void test(int a, int b, int c, int d, int e,
+; double m, double n, double p, double q, double r) {
+; try {
+; print(a, b, c, d, e);
+; } catch (...) {
+; print(m, n, p, q, r);
+; }
+; }
+
+declare void @_Z5printiiiii(i32, i32, i32, i32, i32)
+
+declare void @_Z5printddddd(double, double, double, double, double)
+
+define void @_Z4testiiiiiddddd(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e,
+ double %m, double %n, double %p,
+ double %q, double %r) {
+entry:
+ invoke void @_Z5printiiiii(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e)
+ to label %try.cont unwind label %lpad
+
+lpad:
+ %0 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %1 = extractvalue { i8*, i32 } %0, 0
+ %2 = tail call i8* @__cxa_begin_catch(i8* %1)
+ invoke void @_Z5printddddd(double %m, double %n, double %p,
+ double %q, double %r)
+ to label %invoke.cont2 unwind label %lpad1
+
+invoke.cont2:
+ tail call void @__cxa_end_catch()
+ br label %try.cont
+
+try.cont:
+ ret void
+
+lpad1:
+ %3 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ invoke void @__cxa_end_catch()
+ to label %eh.resume unwind label %terminate.lpad
+
+eh.resume:
+ resume { i8*, i32 } %3
+
+terminate.lpad:
+ %4 = landingpad { i8*, i32 }
+ personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ catch i8* null
+ %5 = extractvalue { i8*, i32 } %4, 0
+ tail call void @__clang_call_terminate(i8* %5)
+ unreachable
+}
+
+declare void @__clang_call_terminate(i8*)
+
+declare i32 @__gxx_personality_v0(...)
+
+declare i8* @__cxa_begin_catch(i8*)
+
+declare void @__cxa_end_catch()
+
+declare void @_ZSt9terminatev()
+
+; CHECK-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-FP: .fnstart
+; CHECK-FP: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP: .setfp r11, sp, #28
+; CHECK-FP: add r11, sp, #28
+; CHECK-FP: .pad #28
+; CHECK-FP: sub sp, sp, #28
+; CHECK-FP: .personality __gxx_personality_v0
+; CHECK-FP: .handlerdata
+; CHECK-FP: .fnend
+
+; CHECK-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-FP-ELIM: .fnstart
+; CHECK-FP-ELIM: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP-ELIM: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-FP-ELIM: .pad #28
+; CHECK-FP-ELIM: sub sp, sp, #28
+; CHECK-FP-ELIM: .personality __gxx_personality_v0
+; CHECK-FP-ELIM: .handlerdata
+; CHECK-FP-ELIM: .fnend
+
+; CHECK-V7-FP-LABEL: _Z4testiiiiiddddd:
+; CHECK-V7-FP: .fnstart
+; CHECK-V7-FP: .save {r4, r11, lr}
+; CHECK-V7-FP: push {r4, r11, lr}
+; CHECK-V7-FP: .setfp r11, sp, #4
+; CHECK-V7-FP: add r11, sp, #4
+; CHECK-V7-FP: .vsave {d8, d9, d10, d11, d12}
+; CHECK-V7-FP: vpush {d8, d9, d10, d11, d12}
+; CHECK-V7-FP: .pad #28
+; CHECK-V7-FP: sub sp, sp, #28
+; CHECK-V7-FP: .personality __gxx_personality_v0
+; CHECK-V7-FP: .handlerdata
+; CHECK-V7-FP: .fnend
+
+; CHECK-V7-FP-ELIM-LABEL: _Z4testiiiiiddddd:
+; CHECK-V7-FP-ELIM: .fnstart
+; CHECK-V7-FP-ELIM: .save {r4, lr}
+; CHECK-V7-FP-ELIM: push {r4, lr}
+; CHECK-V7-FP-ELIM: .vsave {d8, d9, d10, d11, d12}
+; CHECK-V7-FP-ELIM: vpush {d8, d9, d10, d11, d12}
+; CHECK-V7-FP-ELIM: .pad #24
+; CHECK-V7-FP-ELIM: sub sp, sp, #24
+; CHECK-V7-FP-ELIM: .personality __gxx_personality_v0
+; CHECK-V7-FP-ELIM: .handlerdata
+; CHECK-V7-FP-ELIM: .fnend
+
+
+;-------------------------------------------------------------------------------
+; Test 2
+;-------------------------------------------------------------------------------
+
+declare void @throw_exception_2()
+
+define void @test2() {
+entry:
+ tail call void @throw_exception_2()
+ ret void
+}
+
+; CHECK-FP-LABEL: test2:
+; CHECK-FP: .fnstart
+; CHECK-FP: .save {r11, lr}
+; CHECK-FP: push {r11, lr}
+; CHECK-FP: .setfp r11, sp
+; CHECK-FP: mov r11, sp
+; CHECK-FP: pop {r11, lr}
+; CHECK-FP: mov pc, lr
+; CHECK-FP: .fnend
+
+; CHECK-FP-ELIM-LABEL: test2:
+; CHECK-FP-ELIM: .fnstart
+; CHECK-FP-ELIM: .save {r11, lr}
+; CHECK-FP-ELIM: push {r11, lr}
+; CHECK-FP-ELIM: pop {r11, lr}
+; CHECK-FP-ELIM: mov pc, lr
+; CHECK-FP-ELIM: .fnend
+
+; CHECK-V7-FP-LABEL: test2:
+; CHECK-V7-FP: .fnstart
+; CHECK-V7-FP: .save {r11, lr}
+; CHECK-V7-FP: push {r11, lr}
+; CHECK-V7-FP: .setfp r11, sp
+; CHECK-V7-FP: mov r11, sp
+; CHECK-V7-FP: pop {r11, pc}
+; CHECK-V7-FP: .fnend
+
+; CHECK-V7-FP-ELIM-LABEL: test2:
+; CHECK-V7-FP-ELIM: .fnstart
+; CHECK-V7-FP-ELIM: .save {r11, lr}
+; CHECK-V7-FP-ELIM: push {r11, lr}
+; CHECK-V7-FP-ELIM: pop {r11, pc}
+; CHECK-V7-FP-ELIM: .fnend
+
+
+;-------------------------------------------------------------------------------
+; Test 3
+;-------------------------------------------------------------------------------
+
+declare void @throw_exception_3(i32)
+
+define i32 @test3(i32 %a, i32 %b, i32 %c, i32 %d,
+ i32 %e, i32 %f, i32 %g, i32 %h) {
+entry:
+ %add = add nsw i32 %b, %a
+ %add1 = add nsw i32 %add, %c
+ %add2 = add nsw i32 %add1, %d
+ tail call void @throw_exception_3(i32 %add2)
+ %add3 = add nsw i32 %f, %e
+ %add4 = add nsw i32 %add3, %g
+ %add5 = add nsw i32 %add4, %h
+ tail call void @throw_exception_3(i32 %add5)
+ %add6 = add nsw i32 %add5, %add2
+ ret i32 %add6
+}
+
+; CHECK-FP-LABEL: test3:
+; CHECK-FP: .fnstart
+; CHECK-FP: .save {r4, r5, r11, lr}
+; CHECK-FP: push {r4, r5, r11, lr}
+; CHECK-FP: .setfp r11, sp, #8
+; CHECK-FP: add r11, sp, #8
+; CHECK-FP: pop {r4, r5, r11, lr}
+; CHECK-FP: mov pc, lr
+; CHECK-FP: .fnend
+
+; CHECK-FP-ELIM-LABEL: test3:
+; CHECK-FP-ELIM: .fnstart
+; CHECK-FP-ELIM: .save {r4, r5, r11, lr}
+; CHECK-FP-ELIM: push {r4, r5, r11, lr}
+; CHECK-FP-ELIM: pop {r4, r5, r11, lr}
+; CHECK-FP-ELIM: mov pc, lr
+; CHECK-FP-ELIM: .fnend
+
+; CHECK-V7-FP-LABEL: test3:
+; CHECK-V7-FP: .fnstart
+; CHECK-V7-FP: .save {r4, r5, r11, lr}
+; CHECK-V7-FP: push {r4, r5, r11, lr}
+; CHECK-V7-FP: .setfp r11, sp, #8
+; CHECK-V7-FP: add r11, sp, #8
+; CHECK-V7-FP: pop {r4, r5, r11, pc}
+; CHECK-V7-FP: .fnend
+
+; CHECK-V7-FP-ELIM-LABEL: test3:
+; CHECK-V7-FP-ELIM: .fnstart
+; CHECK-V7-FP-ELIM: .save {r4, r5, r11, lr}
+; CHECK-V7-FP-ELIM: push {r4, r5, r11, lr}
+; CHECK-V7-FP-ELIM: pop {r4, r5, r11, pc}
+; CHECK-V7-FP-ELIM: .fnend
+
+
+;-------------------------------------------------------------------------------
+; Test 4
+;-------------------------------------------------------------------------------
+
+define void @test4() nounwind {
+entry:
+ ret void
+}
+
+; CHECK-FP-LABEL: test4:
+; CHECK-FP: .fnstart
+; CHECK-FP: mov pc, lr
+; CHECK-FP: .cantunwind
+; CHECK-FP: .fnend
+
+; CHECK-FP-ELIM-LABEL: test4:
+; CHECK-FP-ELIM: .fnstart
+; CHECK-FP-ELIM: mov pc, lr
+; CHECK-FP-ELIM: .cantunwind
+; CHECK-FP-ELIM: .fnend
+
+; CHECK-V7-FP-LABEL: test4:
+; CHECK-V7-FP: .fnstart
+; CHECK-V7-FP: bx lr
+; CHECK-V7-FP: .cantunwind
+; CHECK-V7-FP: .fnend
+
+; CHECK-V7-FP-ELIM-LABEL: test4:
+; CHECK-V7-FP-ELIM: .fnstart
+; CHECK-V7-FP-ELIM: bx lr
+; CHECK-V7-FP-ELIM: .cantunwind
+; CHECK-V7-FP-ELIM: .fnend
diff --git a/test/CodeGen/ARM/emit-big-cst.ll b/test/CodeGen/ARM/emit-big-cst.ll
new file mode 100644
index 000000000000..9a3367dab1a1
--- /dev/null
+++ b/test/CodeGen/ARM/emit-big-cst.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=thumbv7-unknown-unknown < %s | FileCheck %s
+; Check assembly printing of odd constants.
+
+; CHECK: bigCst:
+; CHECK-NEXT: .long 1694510592
+; CHECK-NEXT: .long 2960197
+; CHECK-NEXT: .long 26220
+; CHECK-NEXT: .size bigCst, 12
+
+@bigCst = internal constant i82 483673642326615442599424
+
+define void @accessBig(i64* %storage) {
+ %addr = bitcast i64* %storage to i82*
+ %bigLoadedCst = load volatile i82* @bigCst
+ %tmp = add i82 %bigLoadedCst, 1
+ store i82 %tmp, i82* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/extload-knownzero.ll b/test/CodeGen/ARM/extload-knownzero.ll
index 8fd6b6bd777a..8ccf58c39170 100644
--- a/test/CodeGen/ARM/extload-knownzero.ll
+++ b/test/CodeGen/ARM/extload-knownzero.ll
@@ -3,7 +3,7 @@
define void @foo(i16* %ptr, i32 %a) nounwind {
entry:
-; CHECK: foo:
+; CHECK-LABEL: foo:
%tmp1 = icmp ult i32 %a, 100
br i1 %tmp1, label %bb1, label %bb2
bb1:
diff --git a/test/CodeGen/ARM/fabs-neon.ll b/test/CodeGen/ARM/fabs-neon.ll
index 614117ff7bca..e3094aaf57d0 100644
--- a/test/CodeGen/ARM/fabs-neon.ll
+++ b/test/CodeGen/ARM/fabs-neon.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=armv7-eabi -float-abi=hard -mcpu=cortex-a8 | FileCheck %s
-; CHECK: test:
+; CHECK-LABEL: test:
; CHECK: vabs.f32 q0, q0
define <4 x float> @test(<4 x float> %a) {
%foo = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
@@ -8,7 +8,7 @@ define <4 x float> @test(<4 x float> %a) {
}
declare <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK: vabs.f32 d0, d0
define <2 x float> @test2(<2 x float> %a) {
%foo = call <2 x float> @llvm.fabs.v2f32(<2 x float> %a)
diff --git a/test/CodeGen/ARM/fabss.ll b/test/CodeGen/ARM/fabss.ll
index c3e00ce47019..77c21c5be91a 100644
--- a/test/CodeGen/ARM/fabss.ll
+++ b/test/CodeGen/ARM/fabss.ll
@@ -13,17 +13,17 @@ entry:
declare float @fabsf(float)
-; VFP2: test:
+; VFP2-LABEL: test:
; VFP2: vabs.f32 s
-; NFP1: test:
+; NFP1-LABEL: test:
; NFP1: vabs.f32 d
-; NFP0: test:
+; NFP0-LABEL: test:
; NFP0: vabs.f32 s
-; CORTEXA8: test:
+; CORTEXA8-LABEL: test:
; CORTEXA8: vadd.f32 [[D1:d[0-9]+]]
; CORTEXA8: vabs.f32 {{d[0-9]+}}, [[D1]]
-; CORTEXA9: test:
+; CORTEXA9-LABEL: test:
; CORTEXA9: vabs.f32 s{{.}}, s{{.}}
diff --git a/test/CodeGen/ARM/fadds.ll b/test/CodeGen/ARM/fadds.ll
index c7e2f5d094b8..21219ce18e26 100644
--- a/test/CodeGen/ARM/fadds.ll
+++ b/test/CodeGen/ARM/fadds.ll
@@ -11,17 +11,17 @@ entry:
ret float %0
}
-; VFP2: test:
+; VFP2-LABEL: test:
; VFP2: vadd.f32 s
-; NFP1: test:
+; NFP1-LABEL: test:
; NFP1: vadd.f32 d
-; NFP0: test:
+; NFP0-LABEL: test:
; NFP0: vadd.f32 s
-; CORTEXA8: test:
+; CORTEXA8-LABEL: test:
; CORTEXA8: vadd.f32 s
-; CORTEXA8U: test:
+; CORTEXA8U-LABEL: test:
; CORTEXA8U: vadd.f32 d
-; CORTEXA9: test:
+; CORTEXA9-LABEL: test:
; CORTEXA9: vadd.f32 s
diff --git a/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll b/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll
index 60bc6a62f5d3..05a6bab99dbf 100644
--- a/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll
+++ b/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB
%struct.A = type { i32, [2 x [2 x i32]], i8, [3 x [3 x [3 x i32]]] }
@@ -26,8 +27,8 @@ entry:
; THUMB: t2
%addr = alloca i32*, align 4
store i32* getelementptr inbounds ([3 x [3 x %struct.A]]* @A, i32 0, i32 2, i32 2, i32 3, i32 1, i32 2, i32 2), i32** %addr, align 4
-; ARM: movw r1, #1148
-; ARM: add r0, r0, r1
+; ARM: movw [[R:r[0-9]+]], #1148
+; ARM: add r0, r{{[0-9]+}}, [[R]]
; THUMB: addw r0, r0, #1148
%0 = load i32** %addr, align 4
ret i32* %0
diff --git a/test/CodeGen/ARM/fast-isel-align.ll b/test/CodeGen/ARM/fast-isel-align.ll
new file mode 100644
index 000000000000..9c9a18858289
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-align.ll
@@ -0,0 +1,144 @@
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
+; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
+
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
+; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
+
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-nacl -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-nacl -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
+
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
+; RUN: llc < %s -O0 -arm-no-strict-align -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -arm-no-strict-align -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
+; RUN: llc < %s -O0 -relocation-model=dynamic-no-pic -mtriple=thumbv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
+
+; Check unaligned stores
+%struct.anon = type <{ float }>
+
+@a = common global %struct.anon* null, align 4
+
+define void @unaligned_store(float %x, float %y) nounwind {
+entry:
+; ARM: @unaligned_store
+; ARM: vmov r1, s0
+; ARM: str r1, [r0]
+
+; THUMB: @unaligned_store
+; THUMB: vmov r1, s0
+; THUMB: str r1, [r0]
+
+ %add = fadd float %x, %y
+ %0 = load %struct.anon** @a, align 4
+ %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0
+ store float %add, float* %x1, align 1
+ ret void
+}
+
+; Doublewords require only word-alignment.
+; rdar://10528060
+%struct.anon.0 = type { double }
+
+@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4
+
+define void @word_aligned_f64_store(double %a, double %b) nounwind {
+entry:
+; ARM: @word_aligned_f64_store
+; THUMB: @word_aligned_f64_store
+ %add = fadd double %a, %b
+ store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4
+; ARM: vstr d16, [r0]
+; THUMB: vstr d16, [r0]
+ ret void
+}
+
+; Check unaligned loads of floats
+%class.TAlignTest = type <{ i16, float }>
+
+define zeroext i1 @unaligned_f32_load(%class.TAlignTest* %this) nounwind align 2 {
+entry:
+; ARM: @unaligned_f32_load
+; THUMB: @unaligned_f32_load
+ %0 = alloca %class.TAlignTest*, align 4
+ store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
+ %1 = load %class.TAlignTest** %0
+ %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1
+ %3 = load float* %2, align 1
+ %4 = fcmp une float %3, 0.000000e+00
+; ARM: ldr r[[R:[0-9]+]], [r0, #2]
+; ARM: vmov s0, r[[R]]
+; ARM: vcmpe.f32 s0, #0
+; THUMB: ldr.w r[[R:[0-9]+]], [r0, #2]
+; THUMB: vmov s0, r[[R]]
+; THUMB: vcmpe.f32 s0, #0
+ ret i1 %4
+}
+
+define void @unaligned_i16_store(i16 %x, i16* %y) nounwind {
+entry:
+; ARM-STRICT-ALIGN: @unaligned_i16_store
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+
+; THUMB-STRICT-ALIGN: @unaligned_i16_store
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+
+ store i16 %x, i16* %y, align 1
+ ret void
+}
+
+define i16 @unaligned_i16_load(i16* %x) nounwind {
+entry:
+; ARM-STRICT-ALIGN: @unaligned_i16_load
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+
+; THUMB-STRICT-ALIGN: @unaligned_i16_load
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+
+ %0 = load i16* %x, align 1
+ ret i16 %0
+}
+
+define void @unaligned_i32_store(i32 %x, i32* %y) nounwind {
+entry:
+; ARM-STRICT-ALIGN: @unaligned_i32_store
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+; ARM-STRICT-ALIGN: strb
+
+; THUMB-STRICT-ALIGN: @unaligned_i32_store
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+; THUMB-STRICT-ALIGN: strb
+
+ store i32 %x, i32* %y, align 1
+ ret void
+}
+
+define i32 @unaligned_i32_load(i32* %x) nounwind {
+entry:
+; ARM-STRICT-ALIGN: @unaligned_i32_load
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+; ARM-STRICT-ALIGN: ldrb
+
+; THUMB-STRICT-ALIGN: @unaligned_i32_load
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+; THUMB-STRICT-ALIGN: ldrb
+
+ %0 = load i32* %x, align 1
+ ret i32 %0
+}
diff --git a/test/CodeGen/ARM/fast-isel-binary.ll b/test/CodeGen/ARM/fast-isel-binary.ll
index 723383e04b8e..e1a2a4f33835 100644
--- a/test/CodeGen/ARM/fast-isel-binary.ll
+++ b/test/CodeGen/ARM/fast-isel-binary.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
; Test add with non-legal types
diff --git a/test/CodeGen/ARM/fast-isel-br-const.ll b/test/CodeGen/ARM/fast-isel-br-const.ll
index 4e6efd248997..2e28b08fc8d6 100644
--- a/test/CodeGen/ARM/fast-isel-br-const.ll
+++ b/test/CodeGen/ARM/fast-isel-br-const.ll
@@ -1,14 +1,15 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
define i32 @t1(i32 %a, i32 %b) nounwind uwtable ssp {
entry:
-; THUMB: t1:
-; ARM: t1:
+; THUMB-LABEL: t1:
+; ARM-LABEL: t1:
%x = add i32 %a, %b
br i1 1, label %if.then, label %if.else
-; THUMB-NOT: b LBB0_1
-; ARM-NOT: b LBB0_1
+; THUMB-NOT: b {{\.?}}LBB0_1
+; ARM-NOT: b {{\.?}}LBB0_1
if.then: ; preds = %entry
call void @foo1()
@@ -16,8 +17,8 @@ if.then: ; preds = %entry
if.else: ; preds = %entry
br i1 0, label %if.then2, label %if.else3
-; THUMB: b LBB0_4
-; ARM: b LBB0_4
+; THUMB: b {{\.?}}LBB0_4
+; ARM: b {{\.?}}LBB0_4
if.then2: ; preds = %if.else
call void @foo2()
@@ -26,8 +27,8 @@ if.then2: ; preds = %if.else
if.else3: ; preds = %if.else
%y = sub i32 %a, %b
br i1 1, label %if.then5, label %if.end
-; THUMB-NOT: b LBB0_5
-; ARM-NOT: b LBB0_5
+; THUMB-NOT: b {{\.?}}LBB0_5
+; ARM-NOT: b {{\.?}}LBB0_5
if.then5: ; preds = %if.else3
call void @foo1()
diff --git a/test/CodeGen/ARM/fast-isel-br-phi.ll b/test/CodeGen/ARM/fast-isel-br-phi.ll
index a0aba694e43c..3b9d4652b755 100644
--- a/test/CodeGen/ARM/fast-isel-br-phi.ll
+++ b/test/CodeGen/ARM/fast-isel-br-phi.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios
; This test ensures HandlePHINodesInSuccessorBlocks() is able to promote basic
; non-legal integer types (i.e., i1, i8, i16).
diff --git a/test/CodeGen/ARM/fast-isel-call-multi-reg-return.ll b/test/CodeGen/ARM/fast-isel-call-multi-reg-return.ll
index b6f201728c2b..da829e929ef0 100644
--- a/test/CodeGen/ARM/fast-isel-call-multi-reg-return.ll
+++ b/test/CodeGen/ARM/fast-isel-call-multi-reg-return.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
; Fast-isel can't handle non-double multi-reg retvals.
; This test just check to make sure we don't hit the assert in FinishCall.
diff --git a/test/CodeGen/ARM/fast-isel-call.ll b/test/CodeGen/ARM/fast-isel-call.ll
index b6c9098613fe..917a15d28bd7 100644
--- a/test/CodeGen/ARM/fast-isel-call.ll
+++ b/test/CodeGen/ARM/fast-isel-call.ll
@@ -1,9 +1,18 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=ARM-LONG
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=THUMB-LONG
-; RUN: llc < %s -O0 -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=ARM-NOVFP
-; RUN: llc < %s -O0 -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=THUMB-NOVFP
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=ARM-LONG
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -arm-long-calls | FileCheck %s --check-prefix=ARM-LONG
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=THUMB-LONG
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=ARM-NOVFP
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -mattr=-vfp2 | FileCheck %s --check-prefix=ARM-NOVFP
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=THUMB-NOVFP
+
+; XFAIL: vg_leak
+
+; Note that some of these tests assume that relocations are either
+; movw/movt or constant pool loads. Different platforms will select
+; different approaches.
define i32 @t0(i1 zeroext %a) nounwind {
%1 = zext i1 %a to i32
@@ -44,9 +53,9 @@ define void @foo(i8 %a, i16 %b) nounwind {
; THUMB: sxtb r2, r1
; THUMB: mov r0, r2
%2 = call i32 @t1(i8 signext %a)
-; ARM: uxtb r2, r1
+; ARM: and r2, r1, #255
; ARM: mov r0, r2
-; THUMB: uxtb r2, r1
+; THUMB: and r2, r1, #255
; THUMB: mov r0, r2
%3 = call i32 @t2(i8 zeroext %a)
; ARM: sxth r2, r1
@@ -85,56 +94,56 @@ declare signext i8 @t7();
declare zeroext i8 @t8();
declare zeroext i1 @t9();
-define i32 @t10(i32 %argc, i8** nocapture %argv) {
+define i32 @t10() {
entry:
; ARM: @t10
-; ARM: movw r0, #0
-; ARM: movw r1, #248
-; ARM: movw r2, #187
-; ARM: movw r3, #28
-; ARM: movw r9, #40
-; ARM: movw r12, #186
-; ARM: uxtb r0, r0
-; ARM: uxtb r1, r1
-; ARM: uxtb r2, r2
-; ARM: uxtb r3, r3
-; ARM: uxtb r9, r9
-; ARM: str r9, [sp]
-; ARM: uxtb r9, r12
-; ARM: str r9, [sp, #4]
-; ARM: bl _bar
+; ARM: movw [[R0:l?r[0-9]*]], #0
+; ARM: movw [[R1:l?r[0-9]*]], #248
+; ARM: movw [[R2:l?r[0-9]*]], #187
+; ARM: movw [[R3:l?r[0-9]*]], #28
+; ARM: movw [[R4:l?r[0-9]*]], #40
+; ARM: movw [[R5:l?r[0-9]*]], #186
+; ARM: and [[R0]], [[R0]], #255
+; ARM: and [[R1]], [[R1]], #255
+; ARM: and [[R2]], [[R2]], #255
+; ARM: and [[R3]], [[R3]], #255
+; ARM: and [[R4]], [[R4]], #255
+; ARM: str [[R4]], [sp]
+; ARM: and [[R4]], [[R5]], #255
+; ARM: str [[R4]], [sp, #4]
+; ARM: bl {{_?}}bar
; ARM-LONG: @t10
-; ARM-LONG: movw lr, :lower16:L_bar$non_lazy_ptr
-; ARM-LONG: movt lr, :upper16:L_bar$non_lazy_ptr
-; ARM-LONG: ldr lr, [lr]
-; ARM-LONG: blx lr
+; ARM-LONG: {{(movw)|(ldr)}} [[R:l?r[0-9]*]], {{(:lower16:L_bar\$non_lazy_ptr)|(.LCPI)}}
+; ARM-LONG: {{(movt [[R]], :upper16:L_bar\$non_lazy_ptr)?}}
+; ARM-LONG: ldr [[R]], {{\[}}[[R]]{{\]}}
+; ARM-LONG: blx [[R]]
; THUMB: @t10
-; THUMB: movs r0, #0
-; THUMB: movt r0, #0
-; THUMB: movs r1, #248
-; THUMB: movt r1, #0
-; THUMB: movs r2, #187
-; THUMB: movt r2, #0
-; THUMB: movs r3, #28
-; THUMB: movt r3, #0
-; THUMB: movw r9, #40
-; THUMB: movt r9, #0
-; THUMB: movw r12, #186
-; THUMB: movt r12, #0
-; THUMB: uxtb r0, r0
-; THUMB: uxtb r1, r1
-; THUMB: uxtb r2, r2
-; THUMB: uxtb r3, r3
-; THUMB: uxtb.w r9, r9
-; THUMB: str.w r9, [sp]
-; THUMB: uxtb.w r9, r12
-; THUMB: str.w r9, [sp, #4]
-; THUMB: bl _bar
+; THUMB: movs [[R0:l?r[0-9]*]], #0
+; THUMB: movt [[R0]], #0
+; THUMB: movs [[R1:l?r[0-9]*]], #248
+; THUMB: movt [[R1]], #0
+; THUMB: movs [[R2:l?r[0-9]*]], #187
+; THUMB: movt [[R2]], #0
+; THUMB: movs [[R3:l?r[0-9]*]], #28
+; THUMB: movt [[R3]], #0
+; THUMB: movw [[R4:l?r[0-9]*]], #40
+; THUMB: movt [[R4]], #0
+; THUMB: movw [[R5:l?r[0-9]*]], #186
+; THUMB: movt [[R5]], #0
+; THUMB: and [[R0]], [[R0]], #255
+; THUMB: and [[R1]], [[R1]], #255
+; THUMB: and [[R2]], [[R2]], #255
+; THUMB: and [[R3]], [[R3]], #255
+; THUMB: and [[R4]], [[R4]], #255
+; THUMB: str.w [[R4]], [sp]
+; THUMB: and [[R4]], [[R5]], #255
+; THUMB: str.w [[R4]], [sp, #4]
+; THUMB: bl {{_?}}bar
; THUMB-LONG: @t10
-; THUMB-LONG: movw lr, :lower16:L_bar$non_lazy_ptr
-; THUMB-LONG: movt lr, :upper16:L_bar$non_lazy_ptr
-; THUMB-LONG: ldr.w lr, [lr]
-; THUMB-LONG: blx lr
+; THUMB-LONG: {{(movw)|(ldr.n)}} [[R:l?r[0-9]*]], {{(:lower16:L_bar\$non_lazy_ptr)|(.LCPI)}}
+; THUMB-LONG: {{(movt [[R]], :upper16:L_bar\$non_lazy_ptr)?}}
+; THUMB-LONG: ldr{{(.w)?}} [[R]], {{\[}}[[R]]{{\]}}
+; THUMB-LONG: blx [[R]]
%call = call i32 @bar(i8 zeroext 0, i8 zeroext -8, i8 zeroext -69, i8 zeroext 28, i8 zeroext 40, i8 zeroext -70)
ret i32 0
}
@@ -147,12 +156,12 @@ define i32 @bar0(i32 %i) nounwind {
define void @foo3() uwtable {
; ARM: movw r0, #0
-; ARM: movw r1, :lower16:_bar0
-; ARM: movt r1, :upper16:_bar0
+; ARM: {{(movw r1, :lower16:_?bar0)|(ldr r1, .LCPI)}}
+; ARM: {{(movt r1, :upper16:_?bar0)|(ldr r1, \[r1\])}}
; ARM: blx r1
; THUMB: movs r0, #0
-; THUMB: movw r1, :lower16:_bar0
-; THUMB: movt r1, :upper16:_bar0
+; THUMB: {{(movw r1, :lower16:_?bar0)|(ldr.n r1, .LCPI)}}
+; THUMB: {{(movt r1, :upper16:_?bar0)|(ldr r1, \[r1\])}}
; THUMB: blx r1
%fptr = alloca i32 (i32)*, align 8
store i32 (i32)* @bar0, i32 (i32)** %fptr, align 8
@@ -164,66 +173,23 @@ define void @foo3() uwtable {
define i32 @LibCall(i32 %a, i32 %b) {
entry:
; ARM: LibCall
-; ARM: bl ___udivsi3
+; ARM: bl {{___udivsi3|__aeabi_uidiv}}
; ARM-LONG: LibCall
-; ARM-LONG: movw r2, :lower16:L___udivsi3$non_lazy_ptr
-; ARM-LONG: movt r2, :upper16:L___udivsi3$non_lazy_ptr
+; ARM-LONG: {{(movw r2, :lower16:L___udivsi3\$non_lazy_ptr)|(ldr r2, .LCPI)}}
+; ARM-LONG: {{(movt r2, :upper16:L___udivsi3\$non_lazy_ptr)?}}
; ARM-LONG: ldr r2, [r2]
; ARM-LONG: blx r2
; THUMB: LibCall
-; THUMB: bl ___udivsi3
+; THUMB: bl {{___udivsi3|__aeabi_uidiv}}
; THUMB-LONG: LibCall
-; THUMB-LONG: movw r2, :lower16:L___udivsi3$non_lazy_ptr
-; THUMB-LONG: movt r2, :upper16:L___udivsi3$non_lazy_ptr
+; THUMB-LONG: {{(movw r2, :lower16:L___udivsi3\$non_lazy_ptr)|(ldr.n r2, .LCPI)}}
+; THUMB-LONG: {{(movt r2, :upper16:L___udivsi3\$non_lazy_ptr)?}}
; THUMB-LONG: ldr r2, [r2]
; THUMB-LONG: blx r2
%tmp1 = udiv i32 %a, %b ; <i32> [#uses=1]
ret i32 %tmp1
}
-define i32 @VarArg() nounwind {
-entry:
- %i = alloca i32, align 4
- %j = alloca i32, align 4
- %k = alloca i32, align 4
- %m = alloca i32, align 4
- %n = alloca i32, align 4
- %tmp = alloca i32, align 4
- %0 = load i32* %i, align 4
- %1 = load i32* %j, align 4
- %2 = load i32* %k, align 4
- %3 = load i32* %m, align 4
- %4 = load i32* %n, align 4
-; ARM: VarArg
-; ARM: mov r7, sp
-; ARM: movw r0, #5
-; ARM: ldr r1, [r7, #-4]
-; ARM: ldr r2, [r7, #-8]
-; ARM: ldr r3, [r7, #-12]
-; ARM: ldr r9, [sp, #16]
-; ARM: ldr r12, [sp, #12]
-; ARM: str r9, [sp]
-; ARM: str r12, [sp, #4]
-; ARM: bl _CallVariadic
-; THUMB: mov r7, sp
-; THUMB: movs r0, #5
-; THUMB: movt r0, #0
-; THUMB: ldr r1, [sp, #28]
-; THUMB: ldr r2, [sp, #24]
-; THUMB: ldr r3, [sp, #20]
-; THUMB: ldr.w r9, [sp, #16]
-; THUMB: ldr.w r12, [sp, #12]
-; THUMB: str.w r9, [sp]
-; THUMB: str.w r12, [sp, #4]
-; THUMB: bl _CallVariadic
- %call = call i32 (i32, ...)* @CallVariadic(i32 5, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4)
- store i32 %call, i32* %tmp, align 4
- %5 = load i32* %tmp, align 4
- ret i32 %5
-}
-
-declare i32 @CallVariadic(i32, ...)
-
; Test fastcc
define fastcc void @fast_callee(float %i) ssp {
diff --git a/test/CodeGen/ARM/fast-isel-cmp-imm.ll b/test/CodeGen/ARM/fast-isel-cmp-imm.ll
index 660156aa48bd..55baf488a425 100644
--- a/test/CodeGen/ARM/fast-isel-cmp-imm.ll
+++ b/test/CodeGen/ARM/fast-isel-cmp-imm.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
define void @t1a(float %a) uwtable ssp {
entry:
diff --git a/test/CodeGen/ARM/fast-isel-conversion.ll b/test/CodeGen/ARM/fast-isel-conversion.ll
index 686ccad029d8..5983493a818b 100644
--- a/test/CodeGen/ARM/fast-isel-conversion.ll
+++ b/test/CodeGen/ARM/fast-isel-conversion.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -verify-machineinstrs -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -verify-machineinstrs -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -verify-machineinstrs -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
; Test sitofp
@@ -130,11 +131,11 @@ entry:
define void @uitofp_single_i8(i8 %a) nounwind ssp {
entry:
; ARM: uitofp_single_i8
-; ARM: uxtb r0, r0
+; ARM: and r0, r0, #255
; ARM: vmov s0, r0
; ARM: vcvt.f32.u32 s0, s0
; THUMB: uitofp_single_i8
-; THUMB: uxtb r0, r0
+; THUMB: and r0, r0, #255
; THUMB: vmov s0, r0
; THUMB: vcvt.f32.u32 s0, s0
%b.addr = alloca float, align 4
@@ -176,11 +177,11 @@ entry:
define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
entry:
; ARM: uitofp_double_i8
-; ARM: uxtb r0, r0
+; ARM: and r0, r0, #255
; ARM: vmov s0, r0
; ARM: vcvt.f64.u32 d16, s0
; THUMB: uitofp_double_i8
-; THUMB: uxtb r0, r0
+; THUMB: and r0, r0, #255
; THUMB: vmov s0, r0
; THUMB: vcvt.f64.u32 d16, s0
%b.addr = alloca double, align 8
diff --git a/test/CodeGen/ARM/fast-isel-crash.ll b/test/CodeGen/ARM/fast-isel-crash.ll
index 8fb4b66b7dd4..ec9cf8d95019 100644
--- a/test/CodeGen/ARM/fast-isel-crash.ll
+++ b/test/CodeGen/ARM/fast-isel-crash.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=thumbv7-apple-darwin
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-apple-darwin
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-linux-gnueabi
%union.anon = type { <16 x i32> }
diff --git a/test/CodeGen/ARM/fast-isel-crash2.ll b/test/CodeGen/ARM/fast-isel-crash2.ll
index f245168a8e30..d606877673dc 100644
--- a/test/CodeGen/ARM/fast-isel-crash2.ll
+++ b/test/CodeGen/ARM/fast-isel-crash2.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=thumbv7-apple-darwin
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-apple-darwin
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -mtriple=thumbv7-linux-gnueabi
; rdar://9515076
; (Make sure this doesn't crash.)
diff --git a/test/CodeGen/ARM/fast-isel-deadcode.ll b/test/CodeGen/ARM/fast-isel-deadcode.ll
index 3a943d854b4a..5e6666c47d3e 100644
--- a/test/CodeGen/ARM/fast-isel-deadcode.ll
+++ b/test/CodeGen/ARM/fast-isel-deadcode.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
; Target-specific selector can't properly handle the double because it isn't
; being passed via a register, so the materialized arguments become dead code.
diff --git a/test/CodeGen/ARM/fast-isel-ext.ll b/test/CodeGen/ARM/fast-isel-ext.ll
new file mode 100644
index 000000000000..de0dd1917eb7
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-ext.ll
@@ -0,0 +1,137 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=v7
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=v7
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv4t-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=prev6
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv4t-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=prev6
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv5-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=prev6
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv5-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=prev6
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=v7
+
+; Can't test pre-ARMv6 Thumb because ARM FastISel currently only supports
+; Thumb2. The ARMFastISel::ARMEmitIntExt code should work for Thumb by always
+; using two shifts.
+
+; Note that lsl, asr and lsr in Thumb are all encoded as 16-bit instructions
+; and therefore must set flags. {{s?}} below denotes this, instead of
+; duplicating tests.
+
+; zext
+
+define i8 @zext_1_8(i1 %a) nounwind ssp {
+; v7-LABEL: zext_1_8:
+; v7: and r0, r0, #1
+; prev6-LABEL: zext_1_8:
+; prev6: and r0, r0, #1
+ %r = zext i1 %a to i8
+ ret i8 %r
+}
+
+define i16 @zext_1_16(i1 %a) nounwind ssp {
+; v7-LABEL: zext_1_16:
+; v7: and r0, r0, #1
+; prev6-LABEL: zext_1_16:
+; prev6: and r0, r0, #1
+ %r = zext i1 %a to i16
+ ret i16 %r
+}
+
+define i32 @zext_1_32(i1 %a) nounwind ssp {
+; v7-LABEL: zext_1_32:
+; v7: and r0, r0, #1
+; prev6-LABEL: zext_1_32:
+; prev6: and r0, r0, #1
+ %r = zext i1 %a to i32
+ ret i32 %r
+}
+
+define i16 @zext_8_16(i8 %a) nounwind ssp {
+; v7-LABEL: zext_8_16:
+; v7: and r0, r0, #255
+; prev6-LABEL: zext_8_16:
+; prev6: and r0, r0, #255
+ %r = zext i8 %a to i16
+ ret i16 %r
+}
+
+define i32 @zext_8_32(i8 %a) nounwind ssp {
+; v7-LABEL: zext_8_32:
+; v7: and r0, r0, #255
+; prev6-LABEL: zext_8_32:
+; prev6: and r0, r0, #255
+ %r = zext i8 %a to i32
+ ret i32 %r
+}
+
+define i32 @zext_16_32(i16 %a) nounwind ssp {
+; v7-LABEL: zext_16_32:
+; v7: uxth r0, r0
+; prev6-LABEL: zext_16_32:
+; prev6: lsl{{s?}} r0, r0, #16
+; prev6: lsr{{s?}} r0, r0, #16
+ %r = zext i16 %a to i32
+ ret i32 %r
+}
+
+; sext
+
+define i8 @sext_1_8(i1 %a) nounwind ssp {
+; v7-LABEL: sext_1_8:
+; v7: lsl{{s?}} r0, r0, #31
+; v7: asr{{s?}} r0, r0, #31
+; prev6-LABEL: sext_1_8:
+; prev6: lsl{{s?}} r0, r0, #31
+; prev6: asr{{s?}} r0, r0, #31
+ %r = sext i1 %a to i8
+ ret i8 %r
+}
+
+define i16 @sext_1_16(i1 %a) nounwind ssp {
+; v7-LABEL: sext_1_16:
+; v7: lsl{{s?}} r0, r0, #31
+; v7: asr{{s?}} r0, r0, #31
+; prev6-LABEL: sext_1_16:
+; prev6: lsl{{s?}} r0, r0, #31
+; prev6: asr{{s?}} r0, r0, #31
+ %r = sext i1 %a to i16
+ ret i16 %r
+}
+
+define i32 @sext_1_32(i1 %a) nounwind ssp {
+; v7-LABEL: sext_1_32:
+; v7: lsl{{s?}} r0, r0, #31
+; v7: asr{{s?}} r0, r0, #31
+; prev6-LABEL: sext_1_32:
+; prev6: lsl{{s?}} r0, r0, #31
+; prev6: asr{{s?}} r0, r0, #31
+ %r = sext i1 %a to i32
+ ret i32 %r
+}
+
+define i16 @sext_8_16(i8 %a) nounwind ssp {
+; v7-LABEL: sext_8_16:
+; v7: sxtb r0, r0
+; prev6-LABEL: sext_8_16:
+; prev6: lsl{{s?}} r0, r0, #24
+; prev6: asr{{s?}} r0, r0, #24
+ %r = sext i8 %a to i16
+ ret i16 %r
+}
+
+define i32 @sext_8_32(i8 %a) nounwind ssp {
+; v7-LABEL: sext_8_32:
+; v7: sxtb r0, r0
+; prev6-LABEL: sext_8_32:
+; prev6: lsl{{s?}} r0, r0, #24
+; prev6: asr{{s?}} r0, r0, #24
+ %r = sext i8 %a to i32
+ ret i32 %r
+}
+
+define i32 @sext_16_32(i16 %a) nounwind ssp {
+; v7-LABEL: sext_16_32:
+; v7: sxth r0, r0
+; prev6-LABEL: sext_16_32:
+; prev6: lsl{{s?}} r0, r0, #16
+; prev6: asr{{s?}} r0, r0, #16
+ %r = sext i16 %a to i32
+ ret i32 %r
+}
diff --git a/test/CodeGen/ARM/fast-isel-fold.ll b/test/CodeGen/ARM/fast-isel-fold.ll
index 7a65295f01b6..e8ed8cbf34e9 100644
--- a/test/CodeGen/ARM/fast-isel-fold.ll
+++ b/test/CodeGen/ARM/fast-isel-fold.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB
@a = global i8 1, align 1
@@ -8,9 +9,11 @@ define void @t1() nounwind uwtable ssp {
; ARM: t1
; ARM: ldrb
; ARM-NOT: uxtb
+; ARM-NOT: and{{.*}}, #255
; THUMB: t1
; THUMB: ldrb
; THUMB-NOT: uxtb
+; THUMB-NOT: and{{.*}}, #255
%1 = load i8* @a, align 1
call void @foo1(i8 zeroext %1)
ret void
@@ -35,9 +38,11 @@ define i32 @t3() nounwind uwtable ssp {
; ARM: t3
; ARM: ldrb
; ARM-NOT: uxtb
+; ARM-NOT: and{{.*}}, #255
; THUMB: t3
; THUMB: ldrb
; THUMB-NOT: uxtb
+; THUMB-NOT: and{{.*}}, #255
%1 = load i8* @a, align 1
%2 = zext i8 %1 to i32
ret i32 %2
diff --git a/test/CodeGen/ARM/fast-isel-frameaddr.ll b/test/CodeGen/ARM/fast-isel-frameaddr.ll
index c256e73ab98c..8542bb5e27d2 100644
--- a/test/CodeGen/ARM/fast-isel-frameaddr.ll
+++ b/test/CodeGen/ARM/fast-isel-frameaddr.ll
@@ -5,22 +5,22 @@
define i8* @frameaddr_index0() nounwind {
entry:
-; DARWIN-ARM: frameaddr_index0:
+; DARWIN-ARM-LABEL: frameaddr_index0:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
; DARWIN-ARM: mov r0, r7
-; DARWIN-THUMB2: frameaddr_index0:
+; DARWIN-THUMB2-LABEL: frameaddr_index0:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
; DARWIN-THUMB2: mov r0, r7
-; LINUX-ARM: frameaddr_index0:
+; LINUX-ARM-LABEL: frameaddr_index0:
; LINUX-ARM: push {r11}
; LINUX-ARM: mov r11, sp
; LINUX-ARM: mov r0, r11
-; LINUX-THUMB2: frameaddr_index0:
+; LINUX-THUMB2-LABEL: frameaddr_index0:
; LINUX-THUMB2: str r7, [sp, #-4]!
; LINUX-THUMB2: mov r7, sp
; LINUX-THUMB2: mov r0, r7
@@ -31,25 +31,24 @@ entry:
define i8* @frameaddr_index1() nounwind {
entry:
-; DARWIN-ARM: frameaddr_index1:
+; DARWIN-ARM-LABEL: frameaddr_index1:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
; DARWIN-ARM: mov r0, r7
; DARWIN-ARM: ldr r0, [r0]
-; DARWIN-THUMB2: frameaddr_index1:
+; DARWIN-THUMB2-LABEL: frameaddr_index1:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
; DARWIN-THUMB2: mov r0, r7
; DARWIN-THUMB2: ldr r0, [r0]
-; LINUX-ARM: frameaddr_index1:
+; LINUX-ARM-LABEL: frameaddr_index1:
; LINUX-ARM: push {r11}
; LINUX-ARM: mov r11, sp
-; LINUX-ARM: mov r0, r11
-; LINUX-ARM: ldr r0, [r0]
+; LINUX-ARM: ldr r0, [r11]
-; LINUX-THUMB2: frameaddr_index1:
+; LINUX-THUMB2-LABEL: frameaddr_index1:
; LINUX-THUMB2: str r7, [sp, #-4]!
; LINUX-THUMB2: mov r7, sp
; LINUX-THUMB2: mov r0, r7
@@ -61,7 +60,7 @@ entry:
define i8* @frameaddr_index3() nounwind {
entry:
-; DARWIN-ARM: frameaddr_index3:
+; DARWIN-ARM-LABEL: frameaddr_index3:
; DARWIN-ARM: push {r7}
; DARWIN-ARM: mov r7, sp
; DARWIN-ARM: mov r0, r7
@@ -69,7 +68,7 @@ entry:
; DARWIN-ARM: ldr r0, [r0]
; DARWIN-ARM: ldr r0, [r0]
-; DARWIN-THUMB2: frameaddr_index3:
+; DARWIN-THUMB2-LABEL: frameaddr_index3:
; DARWIN-THUMB2: str r7, [sp, #-4]!
; DARWIN-THUMB2: mov r7, sp
; DARWIN-THUMB2: mov r0, r7
@@ -77,15 +76,14 @@ entry:
; DARWIN-THUMB2: ldr r0, [r0]
; DARWIN-THUMB2: ldr r0, [r0]
-; LINUX-ARM: frameaddr_index3:
+; LINUX-ARM-LABEL: frameaddr_index3:
; LINUX-ARM: push {r11}
; LINUX-ARM: mov r11, sp
-; LINUX-ARM: mov r0, r11
-; LINUX-ARM: ldr r0, [r0]
+; LINUX-ARM: ldr r0, [r11]
; LINUX-ARM: ldr r0, [r0]
; LINUX-ARM: ldr r0, [r0]
-; LINUX-THUMB2: frameaddr_index3:
+; LINUX-THUMB2-LABEL: frameaddr_index3:
; LINUX-THUMB2: str r7, [sp, #-4]!
; LINUX-THUMB2: mov r7, sp
; LINUX-THUMB2: mov r0, r7
diff --git a/test/CodeGen/ARM/fast-isel-icmp.ll b/test/CodeGen/ARM/fast-isel-icmp.ll
index 8357ed5c549c..85f449e3d71d 100644
--- a/test/CodeGen/ARM/fast-isel-icmp.ll
+++ b/test/CodeGen/ARM/fast-isel-icmp.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
define i32 @icmp_i16_signed(i16 %a, i16 %b) nounwind {
entry:
@@ -49,12 +50,12 @@ entry:
define i32 @icmp_i8_unsigned(i8 %a, i8 %b) nounwind {
entry:
; ARM: icmp_i8_unsigned
-; ARM: uxtb r0, r0
-; ARM: uxtb r1, r1
+; ARM: and r0, r0, #255
+; ARM: and r1, r1, #255
; ARM: cmp r0, r1
; THUMB: icmp_i8_unsigned
-; THUMB: uxtb r0, r0
-; THUMB: uxtb r1, r1
+; THUMB: and r0, r0, #255
+; THUMB: and r1, r1, #255
; THUMB: cmp r0, r1
%cmp = icmp ugt i8 %a, %b
%conv2 = zext i1 %cmp to i32
diff --git a/test/CodeGen/ARM/fast-isel-indirectbr.ll b/test/CodeGen/ARM/fast-isel-indirectbr.ll
index ebc0e8426d55..2456ef442040 100644
--- a/test/CodeGen/ARM/fast-isel-indirectbr.ll
+++ b/test/CodeGen/ARM/fast-isel-indirectbr.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
define void @t1(i8* %x) {
diff --git a/test/CodeGen/ARM/fast-isel-intrinsic.ll b/test/CodeGen/ARM/fast-isel-intrinsic.ll
index 48105dd3893b..b08b72baa61e 100644
--- a/test/CodeGen/ARM/fast-isel-intrinsic.ll
+++ b/test/CodeGen/ARM/fast-isel-intrinsic.ll
@@ -1,35 +1,43 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=ARM-LONG
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -arm-long-calls | FileCheck %s --check-prefix=THUMB-LONG
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -arm-long-calls -verify-machineinstrs | FileCheck %s --check-prefix=ARM-LONG
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -arm-long-calls -verify-machineinstrs | FileCheck %s --check-prefix=ARM-LONG
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -arm-long-calls -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-LONG
+
+; XFAIL: vg_leak
+
+; Note that some of these tests assume that relocations are either
+; movw/movt or constant pool loads. Different platforms will select
+; different approaches.
@message1 = global [60 x i8] c"The LLVM Compiler Infrastructure\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 1
@temp = common global [60 x i8] zeroinitializer, align 1
define void @t1() nounwind ssp {
; ARM: t1
-; ARM: movw r0, :lower16:_message1
-; ARM: movt r0, :upper16:_message1
+; ARM: {{(movw r0, :lower16:_?message1)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:_?message1)|(ldr r0, \[r0\])}}
; ARM: add r0, r0, #5
; ARM: movw r1, #64
; ARM: movw r2, #10
-; ARM: uxtb r1, r1
-; ARM: bl _memset
+; ARM: and r1, r1, #255
+; ARM: bl {{_?}}memset
; ARM-LONG: t1
-; ARM-LONG: movw r3, :lower16:L_memset$non_lazy_ptr
-; ARM-LONG: movt r3, :upper16:L_memset$non_lazy_ptr
+; ARM-LONG: {{(movw r3, :lower16:L_memset\$non_lazy_ptr)|(ldr r3, .LCPI)}}
+; ARM-LONG: {{(movt r3, :upper16:L_memset\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
; THUMB: t1
-; THUMB: movw r0, :lower16:_message1
-; THUMB: movt r0, :upper16:_message1
+; THUMB: {{(movw r0, :lower16:_?message1)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:_?message1)|(ldr r0, \[r0\])}}
; THUMB: adds r0, #5
; THUMB: movs r1, #64
; THUMB: movt r1, #0
; THUMB: movs r2, #10
; THUMB: movt r2, #0
-; THUMB: uxtb r1, r1
-; THUMB: bl _memset
+; THUMB: and r1, r1, #255
+; THUMB: bl {{_?}}memset
; THUMB-LONG: t1
; THUMB-LONG: movw r3, :lower16:L_memset$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memset$non_lazy_ptr
@@ -43,31 +51,33 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
define void @t2() nounwind ssp {
; ARM: t2
-; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
-; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
+; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: add r1, r0, #4
; ARM: add r0, r0, #16
; ARM: movw r2, #17
-; ARM: str r0, [sp] @ 4-byte Spill
+; ARM: str r0, [sp[[SLOT:[, #0-9]*]]] @ 4-byte Spill
; ARM: mov r0, r1
-; ARM: ldr r1, [sp] @ 4-byte Reload
-; ARM: bl _memcpy
+; ARM: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
+; ARM: bl {{_?}}memcpy
; ARM-LONG: t2
-; ARM-LONG: movw r3, :lower16:L_memcpy$non_lazy_ptr
-; ARM-LONG: movt r3, :upper16:L_memcpy$non_lazy_ptr
+; ARM-LONG: {{(movw r3, :lower16:L_memcpy\$non_lazy_ptr)|(ldr r3, .LCPI)}}
+; ARM-LONG: {{(movt r3, :upper16:L_memcpy\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
; THUMB: t2
-; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
-; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
+; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: adds r1, r0, #4
; THUMB: adds r0, #16
; THUMB: movs r2, #17
; THUMB: movt r2, #0
+; THUMB: str r0, [sp[[SLOT:[, #0-9]*]]] @ 4-byte Spill
; THUMB: mov r0, r1
-; THUMB: bl _memcpy
+; THUMB: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
+; THUMB: bl {{_?}}memcpy
; THUMB-LONG: t2
; THUMB-LONG: movw r3, :lower16:L_memcpy$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memcpy$non_lazy_ptr
@@ -81,29 +91,31 @@ declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32,
define void @t3() nounwind ssp {
; ARM: t3
-; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
-; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
+; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: add r1, r0, #4
; ARM: add r0, r0, #16
; ARM: movw r2, #10
; ARM: mov r0, r1
-; ARM: bl _memmove
+; ARM: bl {{_?}}memmove
; ARM-LONG: t3
-; ARM-LONG: movw r3, :lower16:L_memmove$non_lazy_ptr
-; ARM-LONG: movt r3, :upper16:L_memmove$non_lazy_ptr
+; ARM-LONG: {{(movw r3, :lower16:L_memmove\$non_lazy_ptr)|(ldr r3, .LCPI)}}
+; ARM-LONG: {{(movt r3, :upper16:L_memmove\$non_lazy_ptr)?}}
; ARM-LONG: ldr r3, [r3]
; ARM-LONG: blx r3
; THUMB: t3
-; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
-; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
+; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: adds r1, r0, #4
; THUMB: adds r0, #16
; THUMB: movs r2, #10
; THUMB: movt r2, #0
+; THUMB: str r0, [sp[[SLOT:[, #0-9]*]]] @ 4-byte Spill
; THUMB: mov r0, r1
-; THUMB: bl _memmove
+; THUMB: ldr r1, [sp[[SLOT]]] @ 4-byte Reload
+; THUMB: bl {{_?}}memmove
; THUMB-LONG: t3
; THUMB-LONG: movw r3, :lower16:L_memmove$non_lazy_ptr
; THUMB-LONG: movt r3, :upper16:L_memmove$non_lazy_ptr
@@ -115,8 +127,8 @@ define void @t3() nounwind ssp {
define void @t4() nounwind ssp {
; ARM: t4
-; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
-; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
+; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldr r1, [r0, #16]
; ARM: str r1, [r0, #4]
@@ -126,8 +138,8 @@ define void @t4() nounwind ssp {
; ARM: strh r1, [r0, #12]
; ARM: bx lr
; THUMB: t4
-; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
-; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
+; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldr r1, [r0, #16]
; THUMB: str r1, [r0, #4]
@@ -144,8 +156,8 @@ declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32,
define void @t5() nounwind ssp {
; ARM: t5
-; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
-; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
+; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldrh r1, [r0, #16]
; ARM: strh r1, [r0, #4]
@@ -159,8 +171,8 @@ define void @t5() nounwind ssp {
; ARM: strh r1, [r0, #12]
; ARM: bx lr
; THUMB: t5
-; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
-; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
+; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldrh r1, [r0, #16]
; THUMB: strh r1, [r0, #4]
@@ -179,8 +191,8 @@ define void @t5() nounwind ssp {
define void @t6() nounwind ssp {
; ARM: t6
-; ARM: movw r0, :lower16:L_temp$non_lazy_ptr
-; ARM: movt r0, :upper16:L_temp$non_lazy_ptr
+; ARM: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldrb r1, [r0, #16]
; ARM: strb r1, [r0, #4]
@@ -204,8 +216,8 @@ define void @t6() nounwind ssp {
; ARM: strb r1, [r0, #13]
; ARM: bx lr
; THUMB: t6
-; THUMB: movw r0, :lower16:L_temp$non_lazy_ptr
-; THUMB: movt r0, :upper16:L_temp$non_lazy_ptr
+; THUMB: {{(movw r0, :lower16:L_temp\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:L_temp\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldrb r1, [r0, #16]
; THUMB: strb r1, [r0, #4]
diff --git a/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll b/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
index dfb8c53735a3..cf294bcfbece 100644
--- a/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
+++ b/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=ARM
define i32 @t1(i32* nocapture %ptr) nounwind readonly {
entry:
diff --git a/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll b/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
index 2a88678da767..d9c9cc459c7e 100644
--- a/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
+++ b/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
@@ -1,10 +1,10 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
define i32 @t1(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t1
%add.ptr = getelementptr inbounds i32* %ptr, i32 -1
- %0 = load i32* %add.ptr, align 4, !tbaa !0
+ %0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0, #-4]
ret i32 %0
}
@@ -13,7 +13,7 @@ define i32 @t2(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t2
%add.ptr = getelementptr inbounds i32* %ptr, i32 -63
- %0 = load i32* %add.ptr, align 4, !tbaa !0
+ %0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0, #-252]
ret i32 %0
}
@@ -22,7 +22,7 @@ define i32 @t3(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t3
%add.ptr = getelementptr inbounds i32* %ptr, i32 -64
- %0 = load i32* %add.ptr, align 4, !tbaa !0
+ %0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0]
ret i32 %0
}
@@ -31,7 +31,7 @@ define zeroext i16 @t4(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t4
%add.ptr = getelementptr inbounds i16* %ptr, i32 -1
- %0 = load i16* %add.ptr, align 2, !tbaa !3
+ %0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0, #-2]
ret i16 %0
}
@@ -40,7 +40,7 @@ define zeroext i16 @t5(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t5
%add.ptr = getelementptr inbounds i16* %ptr, i32 -127
- %0 = load i16* %add.ptr, align 2, !tbaa !3
+ %0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0, #-254]
ret i16 %0
}
@@ -49,7 +49,7 @@ define zeroext i16 @t6(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t6
%add.ptr = getelementptr inbounds i16* %ptr, i32 -128
- %0 = load i16* %add.ptr, align 2, !tbaa !3
+ %0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0]
ret i16 %0
}
@@ -58,7 +58,7 @@ define zeroext i8 @t7(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t7
%add.ptr = getelementptr inbounds i8* %ptr, i32 -1
- %0 = load i8* %add.ptr, align 1, !tbaa !1
+ %0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0, #-1]
ret i8 %0
}
@@ -67,7 +67,7 @@ define zeroext i8 @t8(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t8
%add.ptr = getelementptr inbounds i8* %ptr, i32 -255
- %0 = load i8* %add.ptr, align 1, !tbaa !1
+ %0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0, #-255]
ret i8 %0
}
@@ -76,7 +76,7 @@ define zeroext i8 @t9(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t9
%add.ptr = getelementptr inbounds i8* %ptr, i32 -256
- %0 = load i8* %add.ptr, align 1, !tbaa !1
+ %0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0]
ret i8 %0
}
@@ -85,7 +85,7 @@ define void @t10(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t10
%add.ptr = getelementptr inbounds i32* %ptr, i32 -1
- store i32 0, i32* %add.ptr, align 4, !tbaa !0
+ store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0, #-4]
ret void
}
@@ -94,7 +94,7 @@ define void @t11(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t11
%add.ptr = getelementptr inbounds i32* %ptr, i32 -63
- store i32 0, i32* %add.ptr, align 4, !tbaa !0
+ store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0, #-252]
ret void
}
@@ -103,7 +103,7 @@ define void @t12(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t12
%add.ptr = getelementptr inbounds i32* %ptr, i32 -64
- store i32 0, i32* %add.ptr, align 4, !tbaa !0
+ store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0]
ret void
}
@@ -112,7 +112,7 @@ define void @t13(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t13
%add.ptr = getelementptr inbounds i16* %ptr, i32 -1
- store i16 0, i16* %add.ptr, align 2, !tbaa !3
+ store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0, #-2]
ret void
}
@@ -121,7 +121,7 @@ define void @t14(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t14
%add.ptr = getelementptr inbounds i16* %ptr, i32 -127
- store i16 0, i16* %add.ptr, align 2, !tbaa !3
+ store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0, #-254]
ret void
}
@@ -130,7 +130,7 @@ define void @t15(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t15
%add.ptr = getelementptr inbounds i16* %ptr, i32 -128
- store i16 0, i16* %add.ptr, align 2, !tbaa !3
+ store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0]
ret void
}
@@ -139,7 +139,7 @@ define void @t16(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t16
%add.ptr = getelementptr inbounds i8* %ptr, i32 -1
- store i8 0, i8* %add.ptr, align 1, !tbaa !1
+ store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0, #-1]
ret void
}
@@ -148,7 +148,7 @@ define void @t17(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t17
%add.ptr = getelementptr inbounds i8* %ptr, i32 -255
- store i8 0, i8* %add.ptr, align 1, !tbaa !1
+ store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0, #-255]
ret void
}
@@ -157,12 +157,7 @@ define void @t18(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t18
%add.ptr = getelementptr inbounds i8* %ptr, i32 -256
- store i8 0, i8* %add.ptr, align 1, !tbaa !1
+ store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0]
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"short", metadata !1}
diff --git a/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll b/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
index 0b5267ddc973..c05ea398d72e 100644
--- a/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
+++ b/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
; rdar://10418009
define zeroext i16 @t1(i16* nocapture %a) nounwind uwtable readonly ssp {
diff --git a/test/CodeGen/ARM/fast-isel-load-store-verify.ll b/test/CodeGen/ARM/fast-isel-load-store-verify.ll
new file mode 100644
index 000000000000..710d88b3158c
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-load-store-verify.ll
@@ -0,0 +1,70 @@
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ALL
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ALL
+
+; FIXME Add tests for thumbv7, they currently fail MI verification because
+; of a mismatch in register classes in uses.
+
+; This test verifies that load/store instructions are properly generated,
+; and that they pass MI verification (wasn't the case until 2013-06-08).
+
+@a = global i8 1, align 1
+@b = global i16 2, align 2
+@c = global i32 4, align 4
+
+; ldr
+
+define i8 @t1() nounwind uwtable ssp {
+; ALL: @t1
+; ALL: ldrb
+; ALL: add
+ %1 = load i8* @a, align 1
+ %2 = add nsw i8 %1, 1
+ ret i8 %2
+}
+
+define i16 @t2() nounwind uwtable ssp {
+; ALL: @t2
+; ALL: ldrh
+; ALL: add
+ %1 = load i16* @b, align 2
+ %2 = add nsw i16 %1, 1
+ ret i16 %2
+}
+
+define i32 @t3() nounwind uwtable ssp {
+; ALL: @t3
+; ALL: ldr
+; ALL: add
+ %1 = load i32* @c, align 4
+ %2 = add nsw i32 %1, 1
+ ret i32 %2
+}
+
+; str
+
+define void @t4(i8 %v) nounwind uwtable ssp {
+; ALL: @t4
+; ALL: add
+; ALL: strb
+ %1 = add nsw i8 %v, 1
+ store i8 %1, i8* @a, align 1
+ ret void
+}
+
+define void @t5(i16 %v) nounwind uwtable ssp {
+; ALL: @t5
+; ALL: add
+; ALL: strh
+ %1 = add nsw i16 %v, 1
+ store i16 %1, i16* @b, align 2
+ ret void
+}
+
+define void @t6(i32 %v) nounwind uwtable ssp {
+; ALL: @t6
+; ALL: add
+; ALL: str
+ %1 = add nsw i32 %v, 1
+ store i32 %1, i32* @c, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/fast-isel-mvn.ll b/test/CodeGen/ARM/fast-isel-mvn.ll
index b180e439dd6f..0bc9395e2d78 100644
--- a/test/CodeGen/ARM/fast-isel-mvn.ll
+++ b/test/CodeGen/ARM/fast-isel-mvn.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
; rdar://10412592
; Note: The Thumb code is being generated by the target-independent selector.
diff --git a/test/CodeGen/ARM/fast-isel-pic.ll b/test/CodeGen/ARM/fast-isel-pic.ll
index 867d53f973db..838c103e7c09 100644
--- a/test/CodeGen/ARM/fast-isel-pic.ll
+++ b/test/CodeGen/ARM/fast-isel-pic.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=arm-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARMv7
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-none-linux-gnueabi | FileCheck %s --check-prefix=THUMB-ELF
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=pic -mtriple=armv7-none-linux-gnueabi | FileCheck %s --check-prefix=ARMv7-ELF
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=pic -mtriple=arm-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARMv7
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=pic -mtriple=thumbv7-none-linux-gnueabi | FileCheck %s --check-prefix=THUMB-ELF
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=pic -mtriple=armv7-none-linux-gnueabi | FileCheck %s --check-prefix=ARMv7-ELF
@g = global i32 0, align 4
@@ -13,9 +13,9 @@ entry:
; THUMB: movt [[reg0]],
; THUMB: add [[reg0]], pc
; THUMB-ELF: LoadGV
-; THUMB-ELF: ldr.n r[[reg0:[0-9]+]],
-; THUMB-ELF: ldr.n r[[reg1:[0-9]+]],
-; THUMB-ELF: ldr r[[reg0]], [r[[reg1]], r[[reg0]]]
+; THUMB-ELF: ldr r[[reg0:[0-9]+]],
+; THUMB-ELF: ldr r[[reg1:[0-9]+]],
+; THUMB-ELF: ldr r[[reg0]], [r[[reg0]], r[[reg1]]]
; ARM: LoadGV
; ARM: ldr [[reg1:r[0-9]+]],
; ARM: add [[reg1]], pc, [[reg1]]
@@ -25,6 +25,8 @@ entry:
; ARMv7: add [[reg2]], pc, [[reg2]]
; ARMv7-ELF: LoadGV
; ARMv7-ELF: ldr r[[reg2:[0-9]+]],
+; ARMv7-ELF: .LPC
+; ARMv7-ELF-NEXT: add r[[reg2]], pc
; ARMv7-ELF: ldr r[[reg3:[0-9]+]],
; ARMv7-ELF: ldr r[[reg2]], [r[[reg3]], r[[reg2]]]
%tmp = load i32* @g
@@ -41,9 +43,9 @@ entry:
; THUMB: add r[[reg3]], pc
; THUMB: ldr r[[reg3]], [r[[reg3]]]
; THUMB-ELF: LoadIndirectSymbol
-; THUMB-ELF: ldr.n r[[reg3:[0-9]+]],
-; THUMB-ELF: ldr.n r[[reg4:[0-9]+]],
-; THUMB-ELF: ldr r[[reg3]], [r[[reg4]], r[[reg3]]]
+; THUMB-ELF: ldr r[[reg3:[0-9]+]],
+; THUMB-ELF: ldr r[[reg4:[0-9]+]],
+; THUMB-ELF: ldr r[[reg3]], [r[[reg3]], r[[reg4]]]
; ARM: LoadIndirectSymbol
; ARM: ldr [[reg4:r[0-9]+]],
; ARM: ldr [[reg4]], [pc, [[reg4]]]
@@ -54,6 +56,8 @@ entry:
; ARMv7: ldr r[[reg5]], [r[[reg5]]]
; ARMv7-ELF: LoadIndirectSymbol
; ARMv7-ELF: ldr r[[reg5:[0-9]+]],
+; ARMv7-ELF: .LPC
+; ARMv7-ELF-NEXT: add r[[reg5]], pc
; ARMv7-ELF: ldr r[[reg6:[0-9]+]],
; ARMv7-ELF: ldr r[[reg5]], [r[[reg6]], r[[reg5]]]
%tmp = load i32* @i
diff --git a/test/CodeGen/ARM/fast-isel-pred.ll b/test/CodeGen/ARM/fast-isel-pred.ll
index 27731def1f57..48f93225b6b8 100644
--- a/test/CodeGen/ARM/fast-isel-pred.ll
+++ b/test/CodeGen/ARM/fast-isel-pred.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -verify-machineinstrs -mtriple=armv7-apple-darwin < %s
+; RUN: llc -O0 -verify-machineinstrs -mtriple=armv7-linux-gnueabi < %s
define i32 @main() nounwind ssp {
entry:
diff --git a/test/CodeGen/ARM/fast-isel-redefinition.ll b/test/CodeGen/ARM/fast-isel-redefinition.ll
index 563880dab0a9..ee150facac96 100644
--- a/test/CodeGen/ARM/fast-isel-redefinition.ll
+++ b/test/CodeGen/ARM/fast-isel-redefinition.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O0 -verify-machineinstrs -optimize-regalloc -regalloc=basic < %s
+; RUN: llc -O0 -verify-machineinstrs -fast-isel-abort -optimize-regalloc -regalloc=basic < %s
; This isn't exactly a useful set of command-line options, but check that it
; doesn't crash. (It was crashing because a register was getting redefined.)
diff --git a/test/CodeGen/ARM/fast-isel-ret.ll b/test/CodeGen/ARM/fast-isel-ret.ll
index 689b169ee32f..8a68309dc831 100644
--- a/test/CodeGen/ARM/fast-isel-ret.ll
+++ b/test/CodeGen/ARM/fast-isel-ret.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s
; Sign-extend of i1 currently not supported by fast-isel
;define signext i1 @ret0(i1 signext %a) nounwind uwtable ssp {
@@ -26,7 +27,7 @@ entry:
define zeroext i8 @ret3(i8 signext %a) nounwind uwtable ssp {
entry:
; CHECK: ret3
-; CHECK: uxtb r0, r0
+; CHECK: and r0, r0, #255
; CHECK: bx lr
ret i8 %a
}
diff --git a/test/CodeGen/ARM/fast-isel-select.ll b/test/CodeGen/ARM/fast-isel-select.ll
index b83a73366948..40f88075039e 100644
--- a/test/CodeGen/ARM/fast-isel-select.ll
+++ b/test/CodeGen/ARM/fast-isel-select.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv8-apple-ios | FileCheck %s --check-prefix=THUMB
define i32 @t1(i1 %c) nounwind readnone {
entry:
@@ -38,15 +40,16 @@ define i32 @t3(i1 %c, i32 %a, i32 %b) nounwind readnone {
entry:
; ARM: t3
; ARM: cmp r0, #0
-; ARM: movne r{{[1-9]}}, r{{[1-9]}}
-; ARM: mov r0, r{{[1-9]}}
+; ARM: movne r2, r1
+; ARM: add r0, r2, r1
; THUMB: t3
; THUMB: cmp r0, #0
; THUMB: it ne
-; THUMB: movne r{{[1-9]}}, r{{[1-9]}}
-; THUMB: mov r0, r{{[1-9]}}
+; THUMB: movne r2, r1
+; THUMB: add.w r0, r2, r1
%0 = select i1 %c, i32 %a, i32 %b
- ret i32 %0
+ %1 = add i32 %0, %a
+ ret i32 %1
}
define i32 @t4(i1 %c) nounwind readnone {
diff --git a/test/CodeGen/ARM/fast-isel-shifter.ll b/test/CodeGen/ARM/fast-isel-shifter.ll
index 111818b289e8..eb4b2b2ce0ae 100644
--- a/test/CodeGen/ARM/fast-isel-shifter.ll
+++ b/test/CodeGen/ARM/fast-isel-shifter.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM
define i32 @shl() nounwind ssp {
entry:
diff --git a/test/CodeGen/ARM/fast-isel-static.ll b/test/CodeGen/ARM/fast-isel-static.ll
index e8759a7fc4ce..93c14a09205e 100644
--- a/test/CodeGen/ARM/fast-isel-static.ll
+++ b/test/CodeGen/ARM/fast-isel-static.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -verify-machineinstrs -relocation-model=static -arm-long-calls | FileCheck -check-prefix=LONG %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -verify-machineinstrs -relocation-model=static | FileCheck -check-prefix=NORM %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static -arm-long-calls | FileCheck -check-prefix=CHECK-LONG %s
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static -arm-long-calls | FileCheck -check-prefix=CHECK-LONG %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static | FileCheck -check-prefix=CHECK-NORM %s
+; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=static | FileCheck -check-prefix=CHECK-NORM %s
define void @myadd(float* %sum, float* %addend) nounwind {
entry:
@@ -24,7 +26,7 @@ entry:
store float 0.000000e+00, float* %ztot, align 4
store float 1.000000e+00, float* %z, align 4
; CHECK-LONG: blx r
-; CHECK-NORM: bl _myadd
+; CHECK-NORM: bl {{_?}}myadd
call void @myadd(float* %ztot, float* %z)
ret i32 0
}
diff --git a/test/CodeGen/ARM/fast-isel-vararg.ll b/test/CodeGen/ARM/fast-isel-vararg.ll
new file mode 100644
index 000000000000..0b7b0bd1c6f0
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-vararg.ll
@@ -0,0 +1,47 @@
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
+
+define i32 @VarArg() nounwind {
+entry:
+ %i = alloca i32, align 4
+ %j = alloca i32, align 4
+ %k = alloca i32, align 4
+ %m = alloca i32, align 4
+ %n = alloca i32, align 4
+ %tmp = alloca i32, align 4
+ %0 = load i32* %i, align 4
+ %1 = load i32* %j, align 4
+ %2 = load i32* %k, align 4
+ %3 = load i32* %m, align 4
+ %4 = load i32* %n, align 4
+; ARM: VarArg
+; ARM: mov [[FP:r[0-9]+]], sp
+; ARM: sub sp, sp, #32
+; ARM: movw r0, #5
+; ARM: ldr r1, {{\[}}[[FP]], #-4]
+; ARM: ldr r2, {{\[}}[[FP]], #-8]
+; ARM: ldr r3, {{\[}}[[FP]], #-12]
+; ARM: ldr [[Ra:r[0-9]+]], [sp, #16]
+; ARM: ldr [[Rb:[lr]+[0-9]*]], [sp, #12]
+; ARM: str [[Ra]], [sp]
+; ARM: str [[Rb]], [sp, #4]
+; ARM: bl {{_?CallVariadic}}
+; THUMB: sub sp, #32
+; THUMB: movs r0, #5
+; THUMB: movt r0, #0
+; THUMB: ldr r1, [sp, #28]
+; THUMB: ldr r2, [sp, #24]
+; THUMB: ldr r3, [sp, #20]
+; THUMB: ldr.w {{[a-z0-9]+}}, [sp, #16]
+; THUMB: ldr.w {{[a-z0-9]+}}, [sp, #12]
+; THUMB: str.w {{[a-z0-9]+}}, [sp]
+; THUMB: str.w {{[a-z0-9]+}}, [sp, #4]
+; THUMB: bl {{_?}}CallVariadic
+ %call = call i32 (i32, ...)* @CallVariadic(i32 5, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4)
+ store i32 %call, i32* %tmp, align 4
+ %5 = load i32* %tmp, align 4
+ ret i32 %5
+}
+
+declare i32 @CallVariadic(i32, ...)
diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll
index 41fda4132632..5981cab7dcb1 100644
--- a/test/CodeGen/ARM/fast-isel.ll
+++ b/test/CodeGen/ARM/fast-isel.ll
@@ -1,10 +1,9 @@
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
-; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
-; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB
; Very basic fast-isel functionality.
-define i32 @add(i32 %a, i32 %b) nounwind {
+define i32 @test0(i32 %a, i32 %b) nounwind {
entry:
%a.addr = alloca i32, align 4
%b.addr = alloca i32, align 4
@@ -28,16 +27,16 @@ br label %if.end
if.end: ; preds = %if.then, %entry
ret void
-; ARM: test1:
+; ARM-LABEL: test1:
; ARM: tst r0, #1
-; THUMB: test1:
+; THUMB-LABEL: test1:
; THUMB: tst.w r0, #1
}
; Check some simple operations with immediates
define void @test2(i32 %tmp, i32* %ptr) nounwind {
-; THUMB: test2:
-; ARM: test2:
+; THUMB-LABEL: test2:
+; ARM-LABEL: test2:
b1:
%a = add i32 %tmp, 4096
@@ -65,8 +64,8 @@ b3:
}
define void @test3(i32 %tmp, i32* %ptr1, i16* %ptr2, i8* %ptr3) nounwind {
-; THUMB: test3:
-; ARM: test3:
+; THUMB-LABEL: test3:
+; ARM-LABEL: test3:
bb1:
%a1 = trunc i32 %tmp to i16
@@ -82,12 +81,12 @@ bb1:
; THUMB: and
; THUMB: strb
-; THUMB: uxtb
+; THUMB: and{{.*}}, #255
; THUMB: strh
; THUMB: uxth
; ARM: and
; ARM: strb
-; ARM: uxtb
+; ARM: and{{.*}}, #255
; ARM: strh
; ARM: uxth
@@ -123,13 +122,13 @@ bb3:
; THUMB: ldrb
; THUMB: ldrh
-; THUMB: uxtb
+; THUMB: and{{.*}}, #255
; THUMB: sxth
; THUMB: add
; THUMB: sub
; ARM: ldrb
; ARM: ldrh
-; ARM: uxtb
+; ARM: and{{.*}}, #255
; ARM: sxth
; ARM: add
; ARM: sub
@@ -144,82 +143,25 @@ define void @test4() {
store i32 %b, i32* @test4g
ret void
-; THUMB: movw r0, :lower16:L_test4g$non_lazy_ptr
-; THUMB: movt r0, :upper16:L_test4g$non_lazy_ptr
+
+; Note that relocations are either movw/movt or constant pool
+; loads. Different platforms will select different approaches.
+
+; THUMB: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
+; THUMB: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
; THUMB: ldr r0, [r0]
; THUMB: ldr r1, [r0]
; THUMB: adds r1, #1
; THUMB: str r1, [r0]
-; ARM: movw r0, :lower16:L_test4g$non_lazy_ptr
-; ARM: movt r0, :upper16:L_test4g$non_lazy_ptr
+; ARM: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr r0, .LCPI)}}
+; ARM: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
; ARM: ldr r0, [r0]
; ARM: ldr r1, [r0]
; ARM: add r1, r1, #1
; ARM: str r1, [r0]
}
-; Check unaligned stores
-%struct.anon = type <{ float }>
-
-@a = common global %struct.anon* null, align 4
-
-define void @unaligned_store(float %x, float %y) nounwind {
-entry:
-; ARM: @unaligned_store
-; ARM: vmov r1, s0
-; ARM: str r1, [r0]
-
-; THUMB: @unaligned_store
-; THUMB: vmov r1, s0
-; THUMB: str r1, [r0]
-
- %add = fadd float %x, %y
- %0 = load %struct.anon** @a, align 4
- %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0
- store float %add, float* %x1, align 1
- ret void
-}
-
-; Doublewords require only word-alignment.
-; rdar://10528060
-%struct.anon.0 = type { double }
-
-@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4
-
-define void @test5(double %a, double %b) nounwind {
-entry:
-; ARM: @test5
-; THUMB: @test5
- %add = fadd double %a, %b
- store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4
-; ARM: vstr d16, [r0]
-; THUMB: vstr d16, [r0]
- ret void
-}
-
-; Check unaligned loads of floats
-%class.TAlignTest = type <{ i16, float }>
-
-define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 {
-entry:
-; ARM: @test6
-; THUMB: @test6
- %0 = alloca %class.TAlignTest*, align 4
- store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
- %1 = load %class.TAlignTest** %0
- %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1
- %3 = load float* %2, align 1
- %4 = fcmp une float %3, 0.000000e+00
-; ARM: ldr r0, [r0, #2]
-; ARM: vmov s0, r0
-; ARM: vcmpe.f32 s0, #0
-; THUMB: ldr.w r0, [r0, #2]
-; THUMB: vmov s0, r0
-; THUMB: vcmpe.f32 s0, #0
- ret i1 %4
-}
-
; ARM: @urem_fold
; THUMB: @urem_fold
; ARM: and r0, r0, #31
@@ -229,10 +171,10 @@ define i32 @urem_fold(i32 %a) nounwind {
ret i32 %rem
}
-define i32 @test7() noreturn nounwind {
+define i32 @trap_intrinsic() noreturn nounwind {
entry:
-; ARM: @test7
-; THUMB: @test7
+; ARM: @trap_intrinsic
+; THUMB: @trap_intrinsic
; ARM: trap
; THUMB: trap
tail call void @llvm.trap( )
@@ -240,67 +182,3 @@ entry:
}
declare void @llvm.trap() nounwind
-
-define void @unaligned_i16_store(i16 %x, i16* %y) nounwind {
-entry:
-; ARM-STRICT-ALIGN: @unaligned_i16_store
-; ARM-STRICT-ALIGN: strb
-; ARM-STRICT-ALIGN: strb
-
-; THUMB-STRICT-ALIGN: @unaligned_i16_store
-; THUMB-STRICT-ALIGN: strb
-; THUMB-STRICT-ALIGN: strb
-
- store i16 %x, i16* %y, align 1
- ret void
-}
-
-define i16 @unaligned_i16_load(i16* %x) nounwind {
-entry:
-; ARM-STRICT-ALIGN: @unaligned_i16_load
-; ARM-STRICT-ALIGN: ldrb
-; ARM-STRICT-ALIGN: ldrb
-
-; THUMB-STRICT-ALIGN: @unaligned_i16_load
-; THUMB-STRICT-ALIGN: ldrb
-; THUMB-STRICT-ALIGN: ldrb
-
- %0 = load i16* %x, align 1
- ret i16 %0
-}
-
-define void @unaligned_i32_store(i32 %x, i32* %y) nounwind {
-entry:
-; ARM-STRICT-ALIGN: @unaligned_i32_store
-; ARM-STRICT-ALIGN: strb
-; ARM-STRICT-ALIGN: strb
-; ARM-STRICT-ALIGN: strb
-; ARM-STRICT-ALIGN: strb
-
-; THUMB-STRICT-ALIGN: @unaligned_i32_store
-; THUMB-STRICT-ALIGN: strb
-; THUMB-STRICT-ALIGN: strb
-; THUMB-STRICT-ALIGN: strb
-; THUMB-STRICT-ALIGN: strb
-
- store i32 %x, i32* %y, align 1
- ret void
-}
-
-define i32 @unaligned_i32_load(i32* %x) nounwind {
-entry:
-; ARM-STRICT-ALIGN: @unaligned_i32_load
-; ARM-STRICT-ALIGN: ldrb
-; ARM-STRICT-ALIGN: ldrb
-; ARM-STRICT-ALIGN: ldrb
-; ARM-STRICT-ALIGN: ldrb
-
-; THUMB-STRICT-ALIGN: @unaligned_i32_load
-; THUMB-STRICT-ALIGN: ldrb
-; THUMB-STRICT-ALIGN: ldrb
-; THUMB-STRICT-ALIGN: ldrb
-; THUMB-STRICT-ALIGN: ldrb
-
- %0 = load i32* %x, align 1
- ret i32 %0
-}
diff --git a/test/CodeGen/ARM/fast-tail-call.ll b/test/CodeGen/ARM/fast-tail-call.ll
new file mode 100644
index 000000000000..9fbdc9d24b01
--- /dev/null
+++ b/test/CodeGen/ARM/fast-tail-call.ll
@@ -0,0 +1,16 @@
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -O0 -arm-tail-calls < %s | FileCheck %s
+
+; Primarily a non-crash test: Thumbv7 Linux does not have FastISel support,
+; which led (via a convoluted route) to DAG nodes after a TC_RETURN that
+; couldn't possibly work.
+
+declare i8* @g(i8*)
+
+define i8* @f(i8* %a) {
+entry:
+ %0 = tail call i8* @g(i8* %a)
+ ret i8* %0
+; CHECK: b g
+; CHECK-NOT: ldr
+; CHECK-NOT: str
+}
diff --git a/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll b/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll
new file mode 100644
index 000000000000..a32ab6d09317
--- /dev/null
+++ b/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll
@@ -0,0 +1,18 @@
+; fastisel should not fold add with non-pointer bitwidth
+; sext(a) + sext(b) != sext(a + b)
+; RUN: llc -mtriple=armv7-apple-ios %s -O0 -o - | FileCheck %s
+
+define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
+entry:
+ %ptr.addr = alloca i8*, align 8
+ %add = add i8 64, 64 ; 0x40 + 0x40
+ %0 = load i8** %ptr.addr, align 8
+
+ ; CHECK-LABEL: _gep_promotion:
+ ; CHECK: ldrb {{r[0-9]+}}, {{\[r[0-9]+\]}}
+ %arrayidx = getelementptr inbounds i8* %0, i8 %add
+
+ %1 = load i8* %arrayidx, align 1
+ ret i8 %1
+}
+
diff --git a/test/CodeGen/ARM/fcopysign.ll b/test/CodeGen/ARM/fcopysign.ll
index 5511d24cb280..1de057208ce3 100644
--- a/test/CodeGen/ARM/fcopysign.ll
+++ b/test/CodeGen/ARM/fcopysign.ll
@@ -4,11 +4,11 @@
; rdar://8984306
define float @test1(float %x, float %y) nounwind {
entry:
-; SOFT: test1:
+; SOFT-LABEL: test1:
; SOFT: lsr r1, r1, #31
; SOFT: bfi r0, r1, #31, #1
-; HARD: test1:
+; HARD-LABEL: test1:
; HARD: vmov.i32 [[REG1:(d[0-9]+)]], #0x80000000
; HARD: vbsl [[REG1]], d
%0 = tail call float @copysignf(float %x, float %y) nounwind readnone
@@ -17,11 +17,11 @@ entry:
define double @test2(double %x, double %y) nounwind {
entry:
-; SOFT: test2:
+; SOFT-LABEL: test2:
; SOFT: lsr r2, r3, #31
; SOFT: bfi r1, r2, #31, #1
-; HARD: test2:
+; HARD-LABEL: test2:
; HARD: vmov.i32 [[REG2:(d[0-9]+)]], #0x80000000
; HARD: vshl.i64 [[REG2]], [[REG2]], #32
; HARD: vbsl [[REG2]], d1, d0
@@ -31,7 +31,7 @@ entry:
define double @test3(double %x, double %y, double %z) nounwind {
entry:
-; SOFT: test3:
+; SOFT-LABEL: test3:
; SOFT: vmov.i32 [[REG3:(d[0-9]+)]], #0x80000000
; SOFT: vshl.i64 [[REG3]], [[REG3]], #32
; SOFT: vbsl [[REG3]],
@@ -43,7 +43,7 @@ entry:
; rdar://9287902
define float @test4() nounwind {
entry:
-; SOFT: test4:
+; SOFT-LABEL: test4:
; SOFT: vmov [[REG7:(d[0-9]+)]], r0, r1
; SOFT: vmov.i32 [[REG6:(d[0-9]+)]], #0x80000000
; SOFT: vshr.u64 [[REG7]], [[REG7]], #32
diff --git a/test/CodeGen/ARM/fdivs.ll b/test/CodeGen/ARM/fdivs.ll
index 8f13f395e078..a4fecfe14588 100644
--- a/test/CodeGen/ARM/fdivs.ll
+++ b/test/CodeGen/ARM/fdivs.ll
@@ -9,15 +9,15 @@ entry:
ret float %0
}
-; VFP2: test:
+; VFP2-LABEL: test:
; VFP2: vdiv.f32 s{{.}}, s{{.}}, s{{.}}
-; NFP1: test:
+; NFP1-LABEL: test:
; NFP1: vdiv.f32 s{{.}}, s{{.}}, s{{.}}
-; NFP0: test:
+; NFP0-LABEL: test:
; NFP0: vdiv.f32 s{{.}}, s{{.}}, s{{.}}
-; CORTEXA8: test:
+; CORTEXA8-LABEL: test:
; CORTEXA8: vdiv.f32 s{{.}}, s{{.}}, s{{.}}
-; CORTEXA9: test:
+; CORTEXA9-LABEL: test:
; CORTEXA9: vdiv.f32 s{{.}}, s{{.}}, s{{.}}
diff --git a/test/CodeGen/ARM/fmacs.ll b/test/CodeGen/ARM/fmacs.ll
index b63f609e755a..f2486c65d3a2 100644
--- a/test/CodeGen/ARM/fmacs.ll
+++ b/test/CodeGen/ARM/fmacs.ll
@@ -6,13 +6,13 @@
define float @t1(float %acc, float %a, float %b) {
entry:
-; VFP2: t1:
+; VFP2-LABEL: t1:
; VFP2: vmla.f32
-; NEON: t1:
+; NEON-LABEL: t1:
; NEON: vmla.f32
-; A8: t1:
+; A8-LABEL: t1:
; A8: vmul.f32
; A8: vadd.f32
%0 = fmul float %a, %b
@@ -22,13 +22,13 @@ entry:
define double @t2(double %acc, double %a, double %b) {
entry:
-; VFP2: t2:
+; VFP2-LABEL: t2:
; VFP2: vmla.f64
-; NEON: t2:
+; NEON-LABEL: t2:
; NEON: vmla.f64
-; A8: t2:
+; A8-LABEL: t2:
; A8: vmul.f64
; A8: vadd.f64
%0 = fmul double %a, %b
@@ -38,13 +38,13 @@ entry:
define float @t3(float %acc, float %a, float %b) {
entry:
-; VFP2: t3:
+; VFP2-LABEL: t3:
; VFP2: vmla.f32
-; NEON: t3:
+; NEON-LABEL: t3:
; NEON: vmla.f32
-; A8: t3:
+; A8-LABEL: t3:
; A8: vmul.f32
; A8: vadd.f32
%0 = fmul float %a, %b
@@ -56,18 +56,18 @@ entry:
; rdar://8659675
define void @t4(float %acc1, float %a, float %b, float %acc2, float %c, float* %P1, float* %P2) {
entry:
-; A8: t4:
+; A8-LABEL: t4:
; A8: vmul.f32
; A8: vmul.f32
; A8: vadd.f32
; A8: vadd.f32
; Two vmla with now RAW hazard
-; A9: t4:
+; A9-LABEL: t4:
; A9: vmla.f32
; A9: vmla.f32
-; HARD: t4:
+; HARD-LABEL: t4:
; HARD: vmla.f32 s0, s1, s2
; HARD: vmla.f32 s3, s1, s4
%0 = fmul float %a, %b
@@ -81,18 +81,18 @@ entry:
define float @t5(float %a, float %b, float %c, float %d, float %e) {
entry:
-; A8: t5:
+; A8-LABEL: t5:
; A8: vmul.f32
; A8: vmul.f32
; A8: vadd.f32
; A8: vadd.f32
-; A9: t5:
+; A9-LABEL: t5:
; A9: vmla.f32
; A9: vmul.f32
; A9: vadd.f32
-; HARD: t5:
+; HARD-LABEL: t5:
; HARD: vmla.f32 s4, s0, s1
; HARD: vmul.f32 s0, s2, s3
; HARD: vadd.f32 s0, s4, s0
diff --git a/test/CodeGen/ARM/fmscs.ll b/test/CodeGen/ARM/fmscs.ll
index a182833a7a2c..f16ec172cb70 100644
--- a/test/CodeGen/ARM/fmscs.ll
+++ b/test/CodeGen/ARM/fmscs.ll
@@ -4,13 +4,13 @@
define float @t1(float %acc, float %a, float %b) {
entry:
-; VFP2: t1:
+; VFP2-LABEL: t1:
; VFP2: vnmls.f32
-; NEON: t1:
+; NEON-LABEL: t1:
; NEON: vnmls.f32
-; A8: t1:
+; A8-LABEL: t1:
; A8: vmul.f32
; A8: vsub.f32
%0 = fmul float %a, %b
@@ -20,13 +20,13 @@ entry:
define double @t2(double %acc, double %a, double %b) {
entry:
-; VFP2: t2:
+; VFP2-LABEL: t2:
; VFP2: vnmls.f64
-; NEON: t2:
+; NEON-LABEL: t2:
; NEON: vnmls.f64
-; A8: t2:
+; A8-LABEL: t2:
; A8: vmul.f64
; A8: vsub.f64
%0 = fmul double %a, %b
diff --git a/test/CodeGen/ARM/fmuls.ll b/test/CodeGen/ARM/fmuls.ll
index f5245c946398..d11f6bd1bd99 100644
--- a/test/CodeGen/ARM/fmuls.ll
+++ b/test/CodeGen/ARM/fmuls.ll
@@ -11,19 +11,19 @@ entry:
ret float %0
}
-; VFP2: test:
+; VFP2-LABEL: test:
; VFP2: vmul.f32 s
-; NFP1: test:
+; NFP1-LABEL: test:
; NFP1: vmul.f32 d
-; NFP0: test:
+; NFP0-LABEL: test:
; NFP0: vmul.f32 s
-; CORTEXA8: test:
+; CORTEXA8-LABEL: test:
; CORTEXA8: vmul.f32 s
-; CORTEXA8U: test:
+; CORTEXA8U-LABEL: test:
; CORTEXA8U: vmul.f32 d
-; CORTEXA9: test:
+; CORTEXA9-LABEL: test:
; CORTEXA9: vmul.f32 s
; VFP2: test2
diff --git a/test/CodeGen/ARM/fnegs.ll b/test/CodeGen/ARM/fnegs.ll
index d84690ba4e4b..dc4c2e33e491 100644
--- a/test/CodeGen/ARM/fnegs.ll
+++ b/test/CodeGen/ARM/fnegs.ll
@@ -14,22 +14,22 @@ entry:
%retval = select i1 %3, float %1, float %0 ; <float> [#uses=1]
ret float %retval
}
-; VFP2: test1:
+; VFP2-LABEL: test1:
; VFP2: vneg.f32 s{{.*}}, s{{.*}}
-; NFP1: test1:
+; NFP1-LABEL: test1:
; NFP1: vneg.f32 d{{.*}}, d{{.*}}
-; NFP0: test1:
+; NFP0-LABEL: test1:
; NFP0: vneg.f32 s{{.*}}, s{{.*}}
-; CORTEXA8: test1:
+; CORTEXA8-LABEL: test1:
; CORTEXA8: vneg.f32 s{{.*}}, s{{.*}}
-; CORTEXA8U: test1:
+; CORTEXA8U-LABEL: test1:
; CORTEXA8U: vneg.f32 d{{.*}}, d{{.*}}
-; CORTEXA9: test1:
+; CORTEXA9-LABEL: test1:
; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}}
define float @test2(float* %a) {
@@ -41,21 +41,21 @@ entry:
%retval = select i1 %3, float %1, float %0 ; <float> [#uses=1]
ret float %retval
}
-; VFP2: test2:
+; VFP2-LABEL: test2:
; VFP2: vneg.f32 s{{.*}}, s{{.*}}
-; NFP1: test2:
+; NFP1-LABEL: test2:
; NFP1: vneg.f32 d{{.*}}, d{{.*}}
-; NFP0: test2:
+; NFP0-LABEL: test2:
; NFP0: vneg.f32 s{{.*}}, s{{.*}}
-; CORTEXA8: test2:
+; CORTEXA8-LABEL: test2:
; CORTEXA8: vneg.f32 s{{.*}}, s{{.*}}
-; CORTEXA8U: test2:
+; CORTEXA8U-LABEL: test2:
; CORTEXA8U: vneg.f32 d{{.*}}, d{{.*}}
-; CORTEXA9: test2:
+; CORTEXA9-LABEL: test2:
; CORTEXA9: vneg.f32 s{{.*}}, s{{.*}}
diff --git a/test/CodeGen/ARM/fnmacs.ll b/test/CodeGen/ARM/fnmacs.ll
index 1763d46e06c4..825feaa0453f 100644
--- a/test/CodeGen/ARM/fnmacs.ll
+++ b/test/CodeGen/ARM/fnmacs.ll
@@ -4,13 +4,13 @@
define float @t1(float %acc, float %a, float %b) {
entry:
-; VFP2: t1:
+; VFP2-LABEL: t1:
; VFP2: vmls.f32
-; NEON: t1:
+; NEON-LABEL: t1:
; NEON: vmls.f32
-; A8: t1:
+; A8-LABEL: t1:
; A8: vmul.f32
; A8: vsub.f32
%0 = fmul float %a, %b
@@ -20,13 +20,13 @@ entry:
define double @t2(double %acc, double %a, double %b) {
entry:
-; VFP2: t2:
+; VFP2-LABEL: t2:
; VFP2: vmls.f64
-; NEON: t2:
+; NEON-LABEL: t2:
; NEON: vmls.f64
-; A8: t2:
+; A8-LABEL: t2:
; A8: vmul.f64
; A8: vsub.f64
%0 = fmul double %a, %b
diff --git a/test/CodeGen/ARM/fnmscs.ll b/test/CodeGen/ARM/fnmscs.ll
index c30806173428..78ccb6095e05 100644
--- a/test/CodeGen/ARM/fnmscs.ll
+++ b/test/CodeGen/ARM/fnmscs.ll
@@ -7,17 +7,17 @@
define float @t1(float %acc, float %a, float %b) nounwind {
entry:
-; VFP2: t1:
+; VFP2-LABEL: t1:
; VFP2: vnmla.f32
-; NEON: t1:
+; NEON-LABEL: t1:
; NEON: vnmla.f32
-; A8U: t1:
+; A8U-LABEL: t1:
; A8U: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
-; A8: t1:
+; A8-LABEL: t1:
; A8: vnmul.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
%0 = fmul float %a, %b
@@ -28,17 +28,17 @@ entry:
define float @t2(float %acc, float %a, float %b) nounwind {
entry:
-; VFP2: t2:
+; VFP2-LABEL: t2:
; VFP2: vnmla.f32
-; NEON: t2:
+; NEON-LABEL: t2:
; NEON: vnmla.f32
-; A8U: t2:
+; A8U-LABEL: t2:
; A8U: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
-; A8: t2:
+; A8-LABEL: t2:
; A8: vnmul.f32 s{{[01234]}}, s{{[01234]}}, s{{[01234]}}
; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
%0 = fmul float %a, %b
@@ -49,17 +49,17 @@ entry:
define double @t3(double %acc, double %a, double %b) nounwind {
entry:
-; VFP2: t3:
+; VFP2-LABEL: t3:
; VFP2: vnmla.f64
-; NEON: t3:
+; NEON-LABEL: t3:
; NEON: vnmla.f64
-; A8U: t3:
+; A8U-LABEL: t3:
; A8U: vnmul.f64 d
; A8U: vsub.f64 d
-; A8: t3:
+; A8-LABEL: t3:
; A8: vnmul.f64 d
; A8: vsub.f64 d
%0 = fmul double %a, %b
@@ -70,17 +70,17 @@ entry:
define double @t4(double %acc, double %a, double %b) nounwind {
entry:
-; VFP2: t4:
+; VFP2-LABEL: t4:
; VFP2: vnmla.f64
-; NEON: t4:
+; NEON-LABEL: t4:
; NEON: vnmla.f64
-; A8U: t4:
+; A8U-LABEL: t4:
; A8U: vnmul.f64 d
; A8U: vsub.f64 d
-; A8: t4:
+; A8-LABEL: t4:
; A8: vnmul.f64 d
; A8: vsub.f64 d
%0 = fmul double %a, %b
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
new file mode 100644
index 000000000000..67fd129fd1c9
--- /dev/null
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -0,0 +1,164 @@
+; RUN: llc -mtriple=thumbv7-apple-darwin-eabi < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv6m-apple-darwin-eabi -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-T1
+; RUN: llc -mtriple=thumbv7-apple-darwin-ios -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-IOS
+
+
+declare void @bar(i8*)
+
+%bigVec = type [2 x double]
+
+@var = global %bigVec zeroinitializer
+
+define void @check_simple() minsize {
+; CHECK-LABEL: check_simple:
+; CHECK: push.w {r7, r8, r9, r10, r11, lr}
+; CHECK-NOT: sub sp, sp,
+; ...
+; CHECK-NOT: add sp, sp,
+; CHECK: pop.w {r0, r1, r2, r3, r11, pc}
+
+; CHECK-T1-LABEL: check_simple:
+; CHECK-T1: push {r3, r4, r5, r6, r7, lr}
+; CHECK-T1: add r7, sp, #16
+; CHECK-T1-NOT: sub sp, sp,
+; ...
+; CHECK-T1-NOT: add sp, sp,
+; CHECK-T1: pop {r0, r1, r2, r3, r7, pc}
+
+ ; iOS always has a frame pointer and messing with the push affects
+ ; how it's set in the prologue. Make sure we get that right.
+; CHECK-IOS-LABEL: check_simple:
+; CHECK-IOS: push {r3, r4, r5, r6, r7, lr}
+; CHECK-NOT: sub sp,
+; CHECK-IOS: add r7, sp, #16
+; CHECK-NOT: sub sp,
+; ...
+; CHECK-NOT: add sp,
+; CHEC: pop {r3, r4, r5, r6, r7, pc}
+
+ %var = alloca i8, i32 16
+ call void @bar(i8* %var)
+ ret void
+}
+
+define void @check_simple_too_big() minsize {
+; CHECK-LABEL: check_simple_too_big:
+; CHECK: push.w {r11, lr}
+; CHECK: sub sp,
+; ...
+; CHECK: add sp,
+; CHECK: pop.w {r11, pc}
+ %var = alloca i8, i32 64
+ call void @bar(i8* %var)
+ ret void
+}
+
+define void @check_vfp_fold() minsize {
+; CHECK-LABEL: check_vfp_fold:
+; CHECK: push {r[[GLOBREG:[0-9]+]], lr}
+; CHECK: vpush {d6, d7, d8, d9}
+; CHECK-NOT: sub sp,
+; ...
+; CHECK: vldmia r[[GLOBREG]], {d8, d9}
+; ...
+; CHECK-NOT: add sp,
+; CHECK: vpop {d6, d7, d8, d9}
+; CHECKL pop {r[[GLOBREG]], pc}
+
+ ; iOS uses aligned NEON stores here, which is convenient since we
+ ; want to make sure that works too.
+; CHECK-IOS-LABEL: check_vfp_fold:
+; CHECK-IOS: push {r0, r1, r2, r3, r4, r7, lr}
+; CHECK-IOS: sub.w r4, sp, #16
+; CHECK-IOS: bic r4, r4, #15
+; CHECK-IOS: mov sp, r4
+; CHECK-IOS: vst1.64 {d8, d9}, [r4:128]
+; ...
+; CHECK-IOS: add r4, sp, #16
+; CHECK-IOS: vld1.64 {d8, d9}, [r4:128]
+; CHECK-IOS: mov sp, r4
+; CHECK-IOS: pop {r4, r7, pc}
+
+ %var = alloca i8, i32 16
+
+ %tmp = load %bigVec* @var
+ call void @bar(i8* %var)
+ store %bigVec %tmp, %bigVec* @var
+
+ ret void
+}
+
+; This function should use just enough space that the "add sp, sp, ..." could be
+; folded in except that doing so would clobber the value being returned.
+define i64 @check_no_return_clobber() minsize {
+; CHECK-LABEL: check_no_return_clobber:
+; CHECK: push.w {r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-NOT: sub sp,
+; ...
+; CHECK: add sp, #40
+; CHECK: pop.w {r11, pc}
+
+ ; Just to keep iOS FileCheck within previous function:
+; CHECK-IOS-LABEL: check_no_return_clobber:
+
+ %var = alloca i8, i32 40
+ call void @bar(i8* %var)
+ ret i64 0
+}
+
+define arm_aapcs_vfpcc double @check_vfp_no_return_clobber() minsize {
+; CHECK-LABEL: check_vfp_no_return_clobber:
+; CHECK: push {r[[GLOBREG:[0-9]+]], lr}
+; CHECK: vpush {d0, d1, d2, d3, d4, d5, d6, d7, d8, d9}
+; CHECK-NOT: sub sp,
+; ...
+; CHECK: add sp, #64
+; CHECK: vpop {d8, d9}
+; CHECK: pop {r[[GLOBREG]], pc}
+
+ %var = alloca i8, i32 64
+
+ %tmp = load %bigVec* @var
+ call void @bar(i8* %var)
+ store %bigVec %tmp, %bigVec* @var
+
+ ret double 1.0
+}
+
+@dbl = global double 0.0
+
+; PR18136: there was a bug determining where the first eligible pop in a
+; basic-block was when the entire block was epilogue code.
+define void @test_fold_point(i1 %tst) minsize {
+; CHECK-LABEL: test_fold_point:
+
+ ; Important to check for beginning of basic block, because if it gets
+ ; if-converted the test is probably no longer checking what it should.
+; CHECK: {{LBB[0-9]+_2}}:
+; CHECK-NEXT: vpop {d7, d8}
+; CHECK-NEXT: pop {r4, pc}
+
+ ; With a guaranteed frame-pointer, we want to make sure that its offset in the
+ ; push block is correct, even if a few registers have been tacked onto a later
+ ; vpush (PR18160).
+; CHECK-IOS-LABEL: test_fold_point:
+; CHECK-IOS: push {r4, r7, lr}
+; CHECK-IOS-NEXT: add r7, sp, #4
+; CHECK-IOS-NEXT: vpush {d7, d8}
+
+ ; We want some memory so there's a stack adjustment to fold...
+ %var = alloca i8, i32 8
+
+ ; We want a long-lived floating register so that a callee-saved dN is used and
+ ; there's both a vpop and a pop.
+ %live_val = load double* @dbl
+ br i1 %tst, label %true, label %end
+true:
+ call void @bar(i8* %var)
+ store double %live_val, double* @dbl
+ br label %end
+end:
+ ; We want the epilogue to be the only thing in a basic block so that we hit
+ ; the correct edge-case (first inst in block is correct one to adjust).
+ ret void
+} \ No newline at end of file
diff --git a/test/CodeGen/ARM/fp.ll b/test/CodeGen/ARM/fp.ll
index 93601cf9d6c9..fbf3a4a56ad5 100644
--- a/test/CodeGen/ARM/fp.ll
+++ b/test/CodeGen/ARM/fp.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
define float @f(i32 %a) {
-;CHECK: f:
+;CHECK-LABEL: f:
;CHECK: vmov
;CHECK-NEXT: vcvt.f32.s32
;CHECK-NEXT: vmov
@@ -11,7 +11,7 @@ entry:
}
define double @g(i32 %a) {
-;CHECK: g:
+;CHECK-LABEL: g:
;CHECK: vmov
;CHECK-NEXT: vcvt.f64.s32
;CHECK-NEXT: vmov
@@ -21,7 +21,7 @@ entry:
}
define double @uint_to_double(i32 %a) {
-;CHECK: uint_to_double:
+;CHECK-LABEL: uint_to_double:
;CHECK: vmov
;CHECK-NEXT: vcvt.f64.u32
;CHECK-NEXT: vmov
@@ -31,7 +31,7 @@ entry:
}
define float @uint_to_float(i32 %a) {
-;CHECK: uint_to_float:
+;CHECK-LABEL: uint_to_float:
;CHECK: vmov
;CHECK-NEXT: vcvt.f32.u32
;CHECK-NEXT: vmov
@@ -41,7 +41,7 @@ entry:
}
define double @h(double* %v) {
-;CHECK: h:
+;CHECK-LABEL: h:
;CHECK: vldr
;CHECK-NEXT: vmov
entry:
@@ -50,20 +50,20 @@ entry:
}
define float @h2() {
-;CHECK: h2:
+;CHECK-LABEL: h2:
;CHECK: mov r0, #1065353216
entry:
ret float 1.000000e+00
}
define double @f2(double %a) {
-;CHECK: f2:
+;CHECK-LABEL: f2:
;CHECK-NOT: vmov
ret double %a
}
define void @f3() {
-;CHECK: f3:
+;CHECK-LABEL: f3:
;CHECK-NOT: vmov
;CHECK: f4
entry:
diff --git a/test/CodeGen/ARM/fp16.ll b/test/CodeGen/ARM/fp16.ll
index 1261ea502129..a5c1aed277bb 100644
--- a/test/CodeGen/ARM/fp16.ll
+++ b/test/CodeGen/ARM/fp16.ll
@@ -8,8 +8,8 @@ target triple = "armv7-eabi"
@z = common global i16 0
define arm_aapcs_vfpcc void @foo() nounwind {
-; CHECK: foo:
-; CHECK-FP6: foo:
+; CHECK-LABEL: foo:
+; CHECK-FP6-LABEL: foo:
entry:
%0 = load i16* @x, align 2
%1 = load i16* @y, align 2
diff --git a/test/CodeGen/ARM/fp_convert.ll b/test/CodeGen/ARM/fp_convert.ll
index 3c47eb580ff1..f0d910052a4d 100644
--- a/test/CodeGen/ARM/fp_convert.ll
+++ b/test/CodeGen/ARM/fp_convert.ll
@@ -6,9 +6,9 @@
; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=VFP2
define i32 @test1(float %a, float %b) {
-; VFP2: test1:
+; VFP2-LABEL: test1:
; VFP2: vcvt.s32.f32 s{{.}}, s{{.}}
-; NEON: test1:
+; NEON-LABEL: test1:
; NEON: vadd.f32 [[D0:d[0-9]+]]
; NEON: vcvt.s32.f32 d0, [[D0]]
entry:
@@ -18,9 +18,9 @@ entry:
}
define i32 @test2(float %a, float %b) {
-; VFP2: test2:
+; VFP2-LABEL: test2:
; VFP2: vcvt.u32.f32 s{{.}}, s{{.}}
-; NEON: test2:
+; NEON-LABEL: test2:
; NEON: vadd.f32 [[D0:d[0-9]+]]
; NEON: vcvt.u32.f32 d0, [[D0]]
entry:
@@ -30,9 +30,9 @@ entry:
}
define float @test3(i32 %a, i32 %b) {
-; VFP2: test3:
+; VFP2-LABEL: test3:
; VFP2: vcvt.f32.u32 s{{.}}, s{{.}}
-; NEON: test3:
+; NEON-LABEL: test3:
; NEON: vcvt.f32.u32 d
entry:
%0 = add i32 %a, %b
@@ -41,9 +41,9 @@ entry:
}
define float @test4(i32 %a, i32 %b) {
-; VFP2: test4:
+; VFP2-LABEL: test4:
; VFP2: vcvt.f32.s32 s{{.}}, s{{.}}
-; NEON: test4:
+; NEON-LABEL: test4:
; NEON: vcvt.f32.s32 d
entry:
%0 = add i32 %a, %b
diff --git a/test/CodeGen/ARM/fparith.ll b/test/CodeGen/ARM/fparith.ll
index 40ea33becebb..cc880148da85 100644
--- a/test/CodeGen/ARM/fparith.ll
+++ b/test/CodeGen/ARM/fparith.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+vfp2 | FileCheck %s
define float @f1(float %a, float %b) {
-;CHECK: f1:
+;CHECK-LABEL: f1:
;CHECK: vadd.f32
entry:
%tmp = fadd float %a, %b ; <float> [#uses=1]
@@ -9,7 +9,7 @@ entry:
}
define double @f2(double %a, double %b) {
-;CHECK: f2:
+;CHECK-LABEL: f2:
;CHECK: vadd.f64
entry:
%tmp = fadd double %a, %b ; <double> [#uses=1]
@@ -17,7 +17,7 @@ entry:
}
define float @f3(float %a, float %b) {
-;CHECK: f3:
+;CHECK-LABEL: f3:
;CHECK: vmul.f32
entry:
%tmp = fmul float %a, %b ; <float> [#uses=1]
@@ -25,7 +25,7 @@ entry:
}
define double @f4(double %a, double %b) {
-;CHECK: f4:
+;CHECK-LABEL: f4:
;CHECK: vmul.f64
entry:
%tmp = fmul double %a, %b ; <double> [#uses=1]
@@ -33,7 +33,7 @@ entry:
}
define float @f5(float %a, float %b) {
-;CHECK: f5:
+;CHECK-LABEL: f5:
;CHECK: vsub.f32
entry:
%tmp = fsub float %a, %b ; <float> [#uses=1]
@@ -41,7 +41,7 @@ entry:
}
define double @f6(double %a, double %b) {
-;CHECK: f6:
+;CHECK-LABEL: f6:
;CHECK: vsub.f64
entry:
%tmp = fsub double %a, %b ; <double> [#uses=1]
@@ -49,7 +49,7 @@ entry:
}
define float @f7(float %a) {
-;CHECK: f7:
+;CHECK-LABEL: f7:
;CHECK: eor
entry:
%tmp1 = fsub float -0.000000e+00, %a ; <float> [#uses=1]
@@ -57,7 +57,7 @@ entry:
}
define double @f8(double %a) {
-;CHECK: f8:
+;CHECK-LABEL: f8:
;CHECK: vneg.f64
entry:
%tmp1 = fsub double -0.000000e+00, %a ; <double> [#uses=1]
@@ -65,7 +65,7 @@ entry:
}
define float @f9(float %a, float %b) {
-;CHECK: f9:
+;CHECK-LABEL: f9:
;CHECK: vdiv.f32
entry:
%tmp1 = fdiv float %a, %b ; <float> [#uses=1]
@@ -73,7 +73,7 @@ entry:
}
define double @f10(double %a, double %b) {
-;CHECK: f10:
+;CHECK-LABEL: f10:
;CHECK: vdiv.f64
entry:
%tmp1 = fdiv double %a, %b ; <double> [#uses=1]
@@ -81,7 +81,7 @@ entry:
}
define float @f11(float %a) {
-;CHECK: f11:
+;CHECK-LABEL: f11:
;CHECK: bic
entry:
%tmp1 = call float @fabsf( float %a ) readnone ; <float> [#uses=1]
@@ -91,7 +91,7 @@ entry:
declare float @fabsf(float)
define double @f12(double %a) {
-;CHECK: f12:
+;CHECK-LABEL: f12:
;CHECK: vabs.f64
entry:
%tmp1 = call double @fabs( double %a ) readnone ; <double> [#uses=1]
diff --git a/test/CodeGen/ARM/fpcmp-opt.ll b/test/CodeGen/ARM/fpcmp-opt.ll
index 2d8f7108e0ec..3a0af16bf6d6 100644
--- a/test/CodeGen/ARM/fpcmp-opt.ll
+++ b/test/CodeGen/ARM/fpcmp-opt.ll
@@ -5,7 +5,7 @@
; Disable this optimization unless we know one of them is zero.
define arm_apcscc i32 @t1(float* %a, float* %b) nounwind {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: vldr [[S0:s[0-9]+]],
; CHECK: vldr [[S1:s[0-9]+]],
; CHECK: vcmpe.f32 [[S1]], [[S0]]
@@ -29,13 +29,12 @@ bb2:
; +0.0 == -0.0
define arm_apcscc i32 @t2(double* %a, double* %b) nounwind {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK-NOT: vldr
-; CHECK: ldr [[REG1:(r[0-9]+)]], [r0]
-; CHECK: ldr [[REG2:(r[0-9]+)]], [r0, #4]
+; CHECK: ldrd [[REG1:(r[0-9]+)]], [[REG2:(r[0-9]+)]], [r0]
; CHECK-NOT: b LBB
-; CHECK: cmp [[REG1]], #0
; CHECK: bfc [[REG2]], #31, #1
+; CHECK: cmp [[REG1]], #0
; CHECK: cmpeq [[REG2]], #0
; CHECK-NOT: vcmpe.f32
; CHECK-NOT: vmrs
@@ -55,7 +54,7 @@ bb2:
define arm_apcscc i32 @t3(float* %a, float* %b) nounwind {
entry:
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK-NOT: vldr
; CHECK: ldr [[REG3:(r[0-9]+)]], [r0]
; CHECK: mvn [[REG4:(r[0-9]+)]], #-2147483648
diff --git a/test/CodeGen/ARM/fpcmp.ll b/test/CodeGen/ARM/fpcmp.ll
index 260ec49cd86b..916a1ae4952a 100644
--- a/test/CodeGen/ARM/fpcmp.ll
+++ b/test/CodeGen/ARM/fpcmp.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
define i32 @f1(float %a) {
-;CHECK: f1:
+;CHECK-LABEL: f1:
;CHECK: vcmpe.f32
;CHECK: movmi
entry:
@@ -11,7 +11,7 @@ entry:
}
define i32 @f2(float %a) {
-;CHECK: f2:
+;CHECK-LABEL: f2:
;CHECK: vcmpe.f32
;CHECK: moveq
entry:
@@ -21,7 +21,7 @@ entry:
}
define i32 @f3(float %a) {
-;CHECK: f3:
+;CHECK-LABEL: f3:
;CHECK: vcmpe.f32
;CHECK: movgt
entry:
@@ -31,7 +31,7 @@ entry:
}
define i32 @f4(float %a) {
-;CHECK: f4:
+;CHECK-LABEL: f4:
;CHECK: vcmpe.f32
;CHECK: movge
entry:
@@ -41,7 +41,7 @@ entry:
}
define i32 @f5(float %a) {
-;CHECK: f5:
+;CHECK-LABEL: f5:
;CHECK: vcmpe.f32
;CHECK: movls
entry:
@@ -51,7 +51,7 @@ entry:
}
define i32 @f6(float %a) {
-;CHECK: f6:
+;CHECK-LABEL: f6:
;CHECK: vcmpe.f32
;CHECK: movne
entry:
@@ -61,7 +61,7 @@ entry:
}
define i32 @g1(double %a) {
-;CHECK: g1:
+;CHECK-LABEL: g1:
;CHECK: vcmpe.f64
;CHECK: movmi
entry:
diff --git a/test/CodeGen/ARM/fpcmp_ueq.ll b/test/CodeGen/ARM/fpcmp_ueq.ll
index 4a4c5b1c8b05..d84c7ae82eca 100644
--- a/test/CodeGen/ARM/fpcmp_ueq.ll
+++ b/test/CodeGen/ARM/fpcmp_ueq.ll
@@ -3,7 +3,7 @@
define i32 @f7(float %a, float %b) {
entry:
-; CHECK: f7:
+; CHECK-LABEL: f7:
; CHECK: vcmpe.f32
; CHECK: vmrs APSR_nzcv, fpscr
; CHECK: movweq
diff --git a/test/CodeGen/ARM/fpconsts.ll b/test/CodeGen/ARM/fpconsts.ll
index 638dde9d8a0f..0679a47ded7b 100644
--- a/test/CodeGen/ARM/fpconsts.ll
+++ b/test/CodeGen/ARM/fpconsts.ll
@@ -2,7 +2,7 @@
define float @t1(float %x) nounwind readnone optsize {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: vmov.f32 s{{.*}}, #4.000000e+00
%0 = fadd float %x, 4.000000e+00
ret float %0
@@ -10,7 +10,7 @@ entry:
define double @t2(double %x) nounwind readnone optsize {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: vmov.f64 d{{.*}}, #3.000000e+00
%0 = fadd double %x, 3.000000e+00
ret double %0
@@ -18,7 +18,7 @@ entry:
define double @t3(double %x) nounwind readnone optsize {
entry:
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK: vmov.f64 d{{.*}}, #-1.300000e+01
%0 = fmul double %x, -1.300000e+01
ret double %0
@@ -26,7 +26,7 @@ entry:
define float @t4(float %x) nounwind readnone optsize {
entry:
-; CHECK: t4:
+; CHECK-LABEL: t4:
; CHECK: vmov.f32 s{{.*}}, #-2.400000e+01
%0 = fmul float %x, -2.400000e+01
ret float %0
diff --git a/test/CodeGen/ARM/fpconv.ll b/test/CodeGen/ARM/fpconv.ll
index 1b4c008bb775..326e0628b4e5 100644
--- a/test/CodeGen/ARM/fpconv.ll
+++ b/test/CodeGen/ARM/fpconv.ll
@@ -2,9 +2,9 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
define float @f1(double %x) {
-;CHECK-VFP: f1:
+;CHECK-VFP-LABEL: f1:
;CHECK-VFP: vcvt.f32.f64
-;CHECK: f1:
+;CHECK-LABEL: f1:
;CHECK: truncdfsf2
entry:
%tmp1 = fptrunc double %x to float ; <float> [#uses=1]
@@ -12,9 +12,9 @@ entry:
}
define double @f2(float %x) {
-;CHECK-VFP: f2:
+;CHECK-VFP-LABEL: f2:
;CHECK-VFP: vcvt.f64.f32
-;CHECK: f2:
+;CHECK-LABEL: f2:
;CHECK: extendsfdf2
entry:
%tmp1 = fpext float %x to double ; <double> [#uses=1]
@@ -22,9 +22,9 @@ entry:
}
define i32 @f3(float %x) {
-;CHECK-VFP: f3:
+;CHECK-VFP-LABEL: f3:
;CHECK-VFP: vcvt.s32.f32
-;CHECK: f3:
+;CHECK-LABEL: f3:
;CHECK: fixsfsi
entry:
%tmp = fptosi float %x to i32 ; <i32> [#uses=1]
@@ -32,9 +32,9 @@ entry:
}
define i32 @f4(float %x) {
-;CHECK-VFP: f4:
+;CHECK-VFP-LABEL: f4:
;CHECK-VFP: vcvt.u32.f32
-;CHECK: f4:
+;CHECK-LABEL: f4:
;CHECK: fixunssfsi
entry:
%tmp = fptoui float %x to i32 ; <i32> [#uses=1]
@@ -42,9 +42,9 @@ entry:
}
define i32 @f5(double %x) {
-;CHECK-VFP: f5:
+;CHECK-VFP-LABEL: f5:
;CHECK-VFP: vcvt.s32.f64
-;CHECK: f5:
+;CHECK-LABEL: f5:
;CHECK: fixdfsi
entry:
%tmp = fptosi double %x to i32 ; <i32> [#uses=1]
@@ -52,9 +52,9 @@ entry:
}
define i32 @f6(double %x) {
-;CHECK-VFP: f6:
+;CHECK-VFP-LABEL: f6:
;CHECK-VFP: vcvt.u32.f64
-;CHECK: f6:
+;CHECK-LABEL: f6:
;CHECK: fixunsdfsi
entry:
%tmp = fptoui double %x to i32 ; <i32> [#uses=1]
@@ -62,9 +62,9 @@ entry:
}
define float @f7(i32 %a) {
-;CHECK-VFP: f7:
+;CHECK-VFP-LABEL: f7:
;CHECK-VFP: vcvt.f32.s32
-;CHECK: f7:
+;CHECK-LABEL: f7:
;CHECK: floatsisf
entry:
%tmp = sitofp i32 %a to float ; <float> [#uses=1]
@@ -72,9 +72,9 @@ entry:
}
define double @f8(i32 %a) {
-;CHECK-VFP: f8:
+;CHECK-VFP-LABEL: f8:
;CHECK-VFP: vcvt.f64.s32
-;CHECK: f8:
+;CHECK-LABEL: f8:
;CHECK: floatsidf
entry:
%tmp = sitofp i32 %a to double ; <double> [#uses=1]
@@ -82,9 +82,9 @@ entry:
}
define float @f9(i32 %a) {
-;CHECK-VFP: f9:
+;CHECK-VFP-LABEL: f9:
;CHECK-VFP: vcvt.f32.u32
-;CHECK: f9:
+;CHECK-LABEL: f9:
;CHECK: floatunsisf
entry:
%tmp = uitofp i32 %a to float ; <float> [#uses=1]
@@ -92,9 +92,9 @@ entry:
}
define double @f10(i32 %a) {
-;CHECK-VFP: f10:
+;CHECK-VFP-LABEL: f10:
;CHECK-VFP: vcvt.f64.u32
-;CHECK: f10:
+;CHECK-LABEL: f10:
;CHECK: floatunsidf
entry:
%tmp = uitofp i32 %a to double ; <double> [#uses=1]
diff --git a/test/CodeGen/ARM/fpmem.ll b/test/CodeGen/ARM/fpmem.ll
index 8faa57896a8d..8fbd1d805840 100644
--- a/test/CodeGen/ARM/fpmem.ll
+++ b/test/CodeGen/ARM/fpmem.ll
@@ -1,13 +1,13 @@
; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
define float @f1(float %a) {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: mov r0, #0
ret float 0.000000e+00
}
define float @f2(float* %v, float %u) {
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: vldr{{.*}}[
%tmp = load float* %v ; <float> [#uses=1]
%tmp1 = fadd float %tmp, %u ; <float> [#uses=1]
@@ -15,7 +15,7 @@ define float @f2(float* %v, float %u) {
}
define float @f2offset(float* %v, float %u) {
-; CHECK: f2offset:
+; CHECK-LABEL: f2offset:
; CHECK: vldr{{.*}}, #4]
%addr = getelementptr float* %v, i32 1
%tmp = load float* %addr
@@ -24,7 +24,7 @@ define float @f2offset(float* %v, float %u) {
}
define float @f2noffset(float* %v, float %u) {
-; CHECK: f2noffset:
+; CHECK-LABEL: f2noffset:
; CHECK: vldr{{.*}}, #-4]
%addr = getelementptr float* %v, i32 -1
%tmp = load float* %addr
@@ -33,7 +33,7 @@ define float @f2noffset(float* %v, float %u) {
}
define void @f3(float %a, float %b, float* %v) {
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: vstr{{.*}}[
%tmp = fadd float %a, %b ; <float> [#uses=1]
store float %tmp, float* %v
diff --git a/test/CodeGen/ARM/fptoint.ll b/test/CodeGen/ARM/fptoint.ll
index 299cb8f81503..740868725e90 100644
--- a/test/CodeGen/ARM/fptoint.ll
+++ b/test/CodeGen/ARM/fptoint.ll
@@ -44,6 +44,6 @@ define void @foo9(double %x) {
store i16 %tmp, i16* null
ret void
}
-; CHECK: foo9:
+; CHECK-LABEL: foo9:
; CHECK: vmov r0, s0
diff --git a/test/CodeGen/ARM/fusedMAC.ll b/test/CodeGen/ARM/fusedMAC.ll
index 303d165de0b6..e29f291dc2c5 100644
--- a/test/CodeGen/ARM/fusedMAC.ll
+++ b/test/CodeGen/ARM/fusedMAC.ll
@@ -2,7 +2,7 @@
; Check generated fused MAC and MLS.
define double @fusedMACTest1(double %d1, double %d2, double %d3) {
-;CHECK: fusedMACTest1:
+;CHECK-LABEL: fusedMACTest1:
;CHECK: vfma.f64
%1 = fmul double %d1, %d2
%2 = fadd double %1, %d3
@@ -10,7 +10,7 @@ define double @fusedMACTest1(double %d1, double %d2, double %d3) {
}
define float @fusedMACTest2(float %f1, float %f2, float %f3) {
-;CHECK: fusedMACTest2:
+;CHECK-LABEL: fusedMACTest2:
;CHECK: vfma.f32
%1 = fmul float %f1, %f2
%2 = fadd float %1, %f3
@@ -18,7 +18,7 @@ define float @fusedMACTest2(float %f1, float %f2, float %f3) {
}
define double @fusedMACTest3(double %d1, double %d2, double %d3) {
-;CHECK: fusedMACTest3:
+;CHECK-LABEL: fusedMACTest3:
;CHECK: vfms.f64
%1 = fmul double %d2, %d3
%2 = fsub double %d1, %1
@@ -26,7 +26,7 @@ define double @fusedMACTest3(double %d1, double %d2, double %d3) {
}
define float @fusedMACTest4(float %f1, float %f2, float %f3) {
-;CHECK: fusedMACTest4:
+;CHECK-LABEL: fusedMACTest4:
;CHECK: vfms.f32
%1 = fmul float %f2, %f3
%2 = fsub float %f1, %1
@@ -34,7 +34,7 @@ define float @fusedMACTest4(float %f1, float %f2, float %f3) {
}
define double @fusedMACTest5(double %d1, double %d2, double %d3) {
-;CHECK: fusedMACTest5:
+;CHECK-LABEL: fusedMACTest5:
;CHECK: vfnma.f64
%1 = fmul double %d1, %d2
%2 = fsub double -0.0, %1
@@ -43,7 +43,7 @@ define double @fusedMACTest5(double %d1, double %d2, double %d3) {
}
define float @fusedMACTest6(float %f1, float %f2, float %f3) {
-;CHECK: fusedMACTest6:
+;CHECK-LABEL: fusedMACTest6:
;CHECK: vfnma.f32
%1 = fmul float %f1, %f2
%2 = fsub float -0.0, %1
@@ -52,7 +52,7 @@ define float @fusedMACTest6(float %f1, float %f2, float %f3) {
}
define double @fusedMACTest7(double %d1, double %d2, double %d3) {
-;CHECK: fusedMACTest7:
+;CHECK-LABEL: fusedMACTest7:
;CHECK: vfnms.f64
%1 = fmul double %d1, %d2
%2 = fsub double %1, %d3
@@ -60,7 +60,7 @@ define double @fusedMACTest7(double %d1, double %d2, double %d3) {
}
define float @fusedMACTest8(float %f1, float %f2, float %f3) {
-;CHECK: fusedMACTest8:
+;CHECK-LABEL: fusedMACTest8:
;CHECK: vfnms.f32
%1 = fmul float %f1, %f2
%2 = fsub float %1, %f3
@@ -68,7 +68,7 @@ define float @fusedMACTest8(float %f1, float %f2, float %f3) {
}
define <2 x float> @fusedMACTest9(<2 x float> %a, <2 x float> %b) {
-;CHECK: fusedMACTest9:
+;CHECK-LABEL: fusedMACTest9:
;CHECK: vfma.f32
%mul = fmul <2 x float> %a, %b
%add = fadd <2 x float> %mul, %a
@@ -76,7 +76,7 @@ define <2 x float> @fusedMACTest9(<2 x float> %a, <2 x float> %b) {
}
define <2 x float> @fusedMACTest10(<2 x float> %a, <2 x float> %b) {
-;CHECK: fusedMACTest10:
+;CHECK-LABEL: fusedMACTest10:
;CHECK: vfms.f32
%mul = fmul <2 x float> %a, %b
%sub = fsub <2 x float> %a, %mul
@@ -84,7 +84,7 @@ define <2 x float> @fusedMACTest10(<2 x float> %a, <2 x float> %b) {
}
define <4 x float> @fusedMACTest11(<4 x float> %a, <4 x float> %b) {
-;CHECK: fusedMACTest11:
+;CHECK-LABEL: fusedMACTest11:
;CHECK: vfma.f32
%mul = fmul <4 x float> %a, %b
%add = fadd <4 x float> %mul, %a
@@ -92,7 +92,7 @@ define <4 x float> @fusedMACTest11(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @fusedMACTest12(<4 x float> %a, <4 x float> %b) {
-;CHECK: fusedMACTest12:
+;CHECK-LABEL: fusedMACTest12:
;CHECK: vfms.f32
%mul = fmul <4 x float> %a, %b
%sub = fsub <4 x float> %a, %mul
diff --git a/test/CodeGen/ARM/globals.ll b/test/CodeGen/ARM/globals.ll
index eb71149d83a9..3101500f2ca8 100644
--- a/test/CodeGen/ARM/globals.ll
+++ b/test/CodeGen/ARM/globals.ll
@@ -57,7 +57,7 @@ define i32 @test1() {
-; LinuxPIC: test1:
+; LinuxPIC-LABEL: test1:
; LinuxPIC: ldr r0, .LCPI0_0
; LinuxPIC: ldr r1, .LCPI0_1
diff --git a/test/CodeGen/ARM/hidden-vis-2.ll b/test/CodeGen/ARM/hidden-vis-2.ll
index 8bb2c6e0c915..18d38d40072c 100644
--- a/test/CodeGen/ARM/hidden-vis-2.ll
+++ b/test/CodeGen/ARM/hidden-vis-2.ll
@@ -4,7 +4,7 @@
define i32 @t() nounwind readonly {
entry:
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK: ldr
; CHECK-NEXT: ldr
%0 = load i32* @x, align 4 ; <i32> [#uses=1]
diff --git a/test/CodeGen/ARM/hidden-vis.ll b/test/CodeGen/ARM/hidden-vis.ll
index 3544ae81a0a4..ce2ce2c1de54 100644
--- a/test/CodeGen/ARM/hidden-vis.ll
+++ b/test/CodeGen/ARM/hidden-vis.ll
@@ -6,18 +6,18 @@
define weak hidden void @t1() nounwind {
; LINUX: .hidden t1
-; LINUX: t1:
+; LINUX-LABEL: t1:
; DARWIN: .private_extern _t1
-; DARWIN: t1:
+; DARWIN-LABEL: t1:
ret void
}
define weak void @t2() nounwind {
-; LINUX: t2:
+; LINUX-LABEL: t2:
; LINUX: .hidden a
-; DARWIN: t2:
+; DARWIN-LABEL: t2:
; DARWIN: .private_extern _a
ret void
}
diff --git a/test/CodeGen/ARM/ifconv-kills.ll b/test/CodeGen/ARM/ifconv-kills.ll
new file mode 100644
index 000000000000..bf54ba2f730c
--- /dev/null
+++ b/test/CodeGen/ARM/ifconv-kills.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -march arm -mcpu swift -verify-machineinstrs
+
+declare i32 @f(i32 %p0, i32 %p1)
+
+define i32 @foo(i32* %ptr) {
+entry:
+ %cmp = icmp ne i32* %ptr, null
+ br i1 %cmp, label %if.then, label %if.else
+
+; present something which can be easily if-converted
+if.then:
+ ; %R0 should be killed here
+ %valt = load i32* %ptr, align 4
+ br label %return
+
+if.else:
+ ; %R0 should be killed here, however after if-conversion the %R0 kill
+ ; has to be removed because if.then will follow after this and still
+ ; read it.
+ %addr = getelementptr inbounds i32* %ptr, i32 4
+ %vale = load i32* %addr, align 4
+ br label %return
+
+return:
+ %phival = phi i32 [ %valt, %if.then ], [ %vale, %if.else ]
+ ; suggest to bring %phival/%valt/%vale into %R1 (because otherwise there
+ ; will be no kills in if.then/if.else)
+ %retval = call i32 @f (i32 0, i32 %phival)
+ ret i32 %retval
+}
diff --git a/test/CodeGen/ARM/ifconv-regmask.ll b/test/CodeGen/ARM/ifconv-regmask.ll
new file mode 100644
index 000000000000..d45f65f9567f
--- /dev/null
+++ b/test/CodeGen/ARM/ifconv-regmask.ll
@@ -0,0 +1,35 @@
+; RUN: llc < %s -mtriple=thumbv7s-apple-ios6.0.0 -verify-machineinstrs
+
+%union.opcode = type { i32 }
+
+@opcode = external global %union.opcode, align 4
+
+; Function Attrs: nounwind ssp
+define i32 @sfu() {
+entry:
+ %bf.load = load i32* getelementptr inbounds (%union.opcode* @opcode, i32 0, i32 0), align 4
+ %bf.lshr = lshr i32 %bf.load, 26
+ %bf.clear = and i32 %bf.lshr, 7
+ switch i32 %bf.clear, label %return [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ ]
+
+sw.bb: ; preds = %entry
+ %call = tail call i32 @func0()
+ br label %return
+
+sw.bb1: ; preds = %entry
+ %call2 = tail call i32 @func1()
+ br label %return
+
+return: ; preds = %sw.bb1, %sw.bb, %entry
+ %retval.0 = phi i32 [ %call2, %sw.bb1 ], [ %call, %sw.bb ], [ -1, %entry ]
+ ret i32 %retval.0
+}
+
+; Function Attrs: nounwind ssp
+declare i32 @func0()
+
+; Function Attrs: nounwind ssp
+declare i32 @func1()
diff --git a/test/CodeGen/ARM/ifcvt1.ll b/test/CodeGen/ARM/ifcvt1.ll
index fd831442c14b..5a55653239d1 100644
--- a/test/CodeGen/ARM/ifcvt1.ll
+++ b/test/CodeGen/ARM/ifcvt1.ll
@@ -2,8 +2,8 @@
; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s -check-prefix=SWIFT
define i32 @t1(i32 %a, i32 %b) {
-; A8: t1:
-; SWIFT: t1:
+; A8-LABEL: t1:
+; SWIFT-LABEL: t1:
%tmp2 = icmp eq i32 %a, 0
br i1 %tmp2, label %cond_false, label %cond_true
diff --git a/test/CodeGen/ARM/ifcvt10.ll b/test/CodeGen/ARM/ifcvt10.ll
index a5082d836587..26c72723b287 100644
--- a/test/CodeGen/ARM/ifcvt10.ll
+++ b/test/CodeGen/ARM/ifcvt10.ll
@@ -6,7 +6,7 @@
define void @t(double %a, double %b, double %c, double %d, i32* nocapture %solutions, double* nocapture %x) nounwind {
entry:
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK: vpop {d8}
; CHECK-NOT: vpopne
; CHECK: pop {r7, pc}
diff --git a/test/CodeGen/ARM/ifcvt11.ll b/test/CodeGen/ARM/ifcvt11.ll
index 0f142eef7a3c..dba8a3f1a6af 100644
--- a/test/CodeGen/ARM/ifcvt11.ll
+++ b/test/CodeGen/ARM/ifcvt11.ll
@@ -6,7 +6,7 @@
%struct.xyz_t = type { double, double, double }
define i32 @effie(i32 %tsets, %struct.xyz_t* nocapture %p, i32 %a, i32 %b, i32 %c) nounwind readonly noinline {
-; CHECK: effie:
+; CHECK-LABEL: effie:
entry:
%0 = icmp sgt i32 %tsets, 0
br i1 %0, label %bb.nph, label %bb6
diff --git a/test/CodeGen/ARM/ifcvt12.ll b/test/CodeGen/ARM/ifcvt12.ll
index 77bdca57e555..b61f4e1bb51b 100644
--- a/test/CodeGen/ARM/ifcvt12.ll
+++ b/test/CodeGen/ARM/ifcvt12.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 | FileCheck %s
define i32 @f1(i32 %a, i32 %b, i32 %c) {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: mlsne r0, r0, r1, r2
%tmp1 = icmp eq i32 %a, 0
br i1 %tmp1, label %cond_false, label %cond_true
diff --git a/test/CodeGen/ARM/ifcvt2.ll b/test/CodeGen/ARM/ifcvt2.ll
index 1bca10a7c646..e34edecf57ee 100644
--- a/test/CodeGen/ARM/ifcvt2.ll
+++ b/test/CodeGen/ARM/ifcvt2.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+v4t | FileCheck %s
define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: bxlt lr
%tmp2 = icmp sgt i32 %c, 10
%tmp5 = icmp slt i32 %d, 4
@@ -19,7 +19,7 @@ UnifiedReturnBlock:
}
define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: bxgt lr
; CHECK: cmp
; CHECK: addge
diff --git a/test/CodeGen/ARM/ifcvt3.ll b/test/CodeGen/ARM/ifcvt3.ll
index eef4de050b35..fa7d61887d9d 100644
--- a/test/CodeGen/ARM/ifcvt3.ll
+++ b/test/CodeGen/ARM/ifcvt3.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -march=arm -mattr=+v4t | grep bx | count 2
define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: cmp r2, #1
; CHECK: cmpne r2, #7
switch i32 %c, label %cond_next [
diff --git a/test/CodeGen/ARM/ifcvt4.ll b/test/CodeGen/ARM/ifcvt4.ll
index d247f14d91ce..53c789d184f6 100644
--- a/test/CodeGen/ARM/ifcvt4.ll
+++ b/test/CodeGen/ARM/ifcvt4.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
; Do not if-convert when branches go to the different loops.
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK-NOT: subgt
; CHECK-NOT: suble
; Don't use
diff --git a/test/CodeGen/ARM/ifcvt5.ll b/test/CodeGen/ARM/ifcvt5.ll
index 5081791bc257..31e3e00c468e 100644
--- a/test/CodeGen/ARM/ifcvt5.ll
+++ b/test/CodeGen/ARM/ifcvt5.ll
@@ -12,10 +12,10 @@ entry:
}
define i32 @t1(i32 %a, i32 %b) {
-; A8: t1:
+; A8-LABEL: t1:
; A8: poplt {r7, pc}
-; SWIFT: t1:
+; SWIFT-LABEL: t1:
; SWIFT: pop {r7, pc}
; SWIFT: pop {r7, pc}
entry:
diff --git a/test/CodeGen/ARM/indirect-reg-input.ll b/test/CodeGen/ARM/indirect-reg-input.ll
index 86728fa61934..b936455975c6 100644
--- a/test/CodeGen/ARM/indirect-reg-input.ll
+++ b/test/CodeGen/ARM/indirect-reg-input.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
+; RUN: not llc < %s -march=arm -mcpu=cortex-a8 2>&1 | FileCheck %s
; Check for error message:
; CHECK: error: inline asm not supported yet: don't know how to handle tied indirect register inputs
diff --git a/test/CodeGen/ARM/indirectbr-2.ll b/test/CodeGen/ARM/indirectbr-2.ll
index 084f520a8ee5..0c41da658009 100644
--- a/test/CodeGen/ARM/indirectbr-2.ll
+++ b/test/CodeGen/ARM/indirectbr-2.ll
@@ -8,7 +8,7 @@
; The indirect branch has the two destinations as successors. The lone PHI
; statement shouldn't be implicitly defined.
-; CHECK: func:
+; CHECK-LABEL: func:
; CHECK: Ltmp1: @ Block address taken
; CHECK-NOT: @ implicit-def: R0
; CHECK: @ 4-byte Reload
diff --git a/test/CodeGen/ARM/indirectbr-3.ll b/test/CodeGen/ARM/indirectbr-3.ll
new file mode 100644
index 000000000000..5a9c45902edc
--- /dev/null
+++ b/test/CodeGen/ARM/indirectbr-3.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-ios | FileCheck %s
+
+; If ARMBaseInstrInfo::AnalyzeBlocks returns the wrong value, which was possible
+; for blocks with indirect branches, the IfConverter could end up deleting
+; blocks that were the destinations of indirect branches, leaving branches to
+; nowhere.
+; <rdar://problem/14464830>
+
+define i32 @preserve_blocks(i32 %x) {
+; preserve_blocks:
+; CHECK: Block address taken
+; CHECK: movs r0, #2
+; CHECK: movs r0, #1
+; CHECK-NOT: Address of block that was removed by CodeGen
+entry:
+ %c2 = icmp slt i32 %x, 3
+ %blockaddr = select i1 %c2, i8* blockaddress(@preserve_blocks, %ibt1), i8* blockaddress(@preserve_blocks, %ibt2)
+ %c1 = icmp eq i32 %x, 0
+ br i1 %c1, label %pre_ib, label %nextblock
+
+nextblock:
+ ret i32 3
+
+ibt1:
+ ret i32 2
+
+ibt2:
+ ret i32 1
+
+pre_ib:
+ indirectbr i8* %blockaddr, [ label %ibt1, label %ibt2 ]
+}
diff --git a/test/CodeGen/ARM/indirectbr.ll b/test/CodeGen/ARM/indirectbr.ll
index 341c33f84ff3..1aeeb916e489 100644
--- a/test/CodeGen/ARM/indirectbr.ll
+++ b/test/CodeGen/ARM/indirectbr.ll
@@ -1,14 +1,15 @@
; RUN: llc < %s -relocation-model=pic -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=ARM
; RUN: llc < %s -relocation-model=pic -mtriple=thumbv6-apple-darwin | FileCheck %s -check-prefix=THUMB
; RUN: llc < %s -relocation-model=static -mtriple=thumbv7-apple-darwin | FileCheck %s -check-prefix=THUMB2
+; RUN: llc < %s -relocation-model=static -mtriple=thumbv8-apple-darwin | FileCheck %s -check-prefix=THUMB2
@nextaddr = global i8* null ; <i8**> [#uses=2]
@C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
define internal i32 @foo(i32 %i) nounwind {
-; ARM: foo:
-; THUMB: foo:
-; THUMB2: foo:
+; ARM-LABEL: foo:
+; THUMB-LABEL: foo:
+; THUMB2-LABEL: foo:
entry:
%0 = load i8** @nextaddr, align 4 ; <i8*> [#uses=2]
%1 = icmp eq i8* %0, null ; <i1> [#uses=1]
@@ -48,15 +49,18 @@ L2: ; preds = %L3, %bb2
L1: ; preds = %L2, %bb2
%res.3 = phi i32 [ %phitmp, %L2 ], [ 2, %bb2 ] ; <i32> [#uses=1]
+; ARM-LABEL: %L1
; ARM: ldr [[R1:r[0-9]+]], LCPI
; ARM: add [[R1b:r[0-9]+]], pc, [[R1]]
; ARM: str [[R1b]]
-; THUMB: ldr.n
+; THUMB-LABEL: %L1
+; THUMB: ldr
; THUMB: add
-; THUMB: ldr.n [[R2:r[0-9]+]], LCPI
+; THUMB: ldr [[R2:r[0-9]+]], LCPI
; THUMB: add [[R2]], pc
; THUMB: str [[R2]]
-; THUMB2: ldr.n [[R2:r[0-9]+]], LCPI
+; THUMB2-LABEL: %L1
+; THUMB2: ldr [[R2:r[0-9]+]], LCPI
; THUMB2-NEXT: str{{(.w)?}} [[R2]]
store i8* blockaddress(@foo, %L5), i8** @nextaddr, align 4
ret i32 %res.3
diff --git a/test/CodeGen/ARM/inlineasm-64bit.ll b/test/CodeGen/ARM/inlineasm-64bit.ll
index be5eb8157317..683a0c4b7d30 100644
--- a/test/CodeGen/ARM/inlineasm-64bit.ll
+++ b/test/CodeGen/ARM/inlineasm-64bit.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -O3 -mtriple=arm-linux-gnueabi | FileCheck %s
-
+; RUN: llc -mtriple=thumbv7-none-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s
; check if regs are passing correctly
define void @i64_write(i64* %p, i64 %val) nounwind {
-; CHECK: i64_write:
+; CHECK-LABEL: i64_write:
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
; CHECK: strexd [[REG1]], {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
%1 = tail call i64 asm sideeffect "1: ldrexd $0, ${0:H}, [$2]\0A strexd $0, $3, ${3:H}, [$2]\0A teq $0, #0\0A bne 1b", "=&r,=*Qo,r,r,~{cc}"(i64* %p, i64* %p, i64 %val) nounwind
@@ -12,7 +12,7 @@ define void @i64_write(i64* %p, i64 %val) nounwind {
; check if register allocation can reuse the registers
define void @multi_writes(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind {
entry:
-; CHECK: multi_writes:
+; CHECK-LABEL: multi_writes:
; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
@@ -44,11 +44,63 @@ entry:
; check if callee-saved registers used by inline asm are saved/restored
define void @foo(i64* %p, i64 %i) nounwind {
-; CHECK:foo:
-; CHECK: push {{{r[4-9]|r10|r11}}
+; CHECK-LABEL:foo:
+; CHECK: {{push|push.w}} {{{r[4-9]|r10|r11}}
; CHECK: ldrexd [[REG1:(r[0-9]?[02468])]], {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
; CHECK: strexd [[REG1]], {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
-; CHECK: pop {{{r[4-9]|r10|r11}}
+; CHECK: {{pop|pop.w}} {{{r[4-9]|r10|r11}}
%1 = tail call { i64, i64 } asm sideeffect "@ atomic64_set\0A1: ldrexd $0, ${0:H}, [$3]\0Aldrexd $1, ${1:H}, [$3]\0A strexd $0, $4, ${4:H}, [$3]\0A teq $0, #0\0A bne 1b", "=&r,=&r,=*Qo,r,r,~{cc}"(i64* %p, i64* %p, i64 %i) nounwind
ret void
}
+
+; return *p;
+define i64 @ldrd_test(i64* %p) nounwind {
+; CHECK-LABEL: ldrd_test:
+ %1 = tail call i64 asm "ldrd $0, ${0:H}, [$1]", "=r,r"(i64* %p) nounwind
+ ret i64 %1
+}
+
+define i64 @QR_test(i64* %p) nounwind {
+; CHECK-LABEL: QR_test:
+; CHECK: ldrd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
+ %1 = tail call i64 asm "ldrd ${0:Q}, ${0:R}, [$1]", "=r,r"(i64* %p) nounwind
+ ret i64 %1
+}
+
+define i64 @defuse_test(i64 %p) nounwind {
+; CHECK-LABEL: defuse_test:
+; CHECK: add {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, #1
+ %1 = tail call i64 asm "add $0, ${0:H}, #1", "=r,0"(i64 %p) nounwind
+ ret i64 %1
+}
+
+; *p = (hi << 32) | lo;
+define void @strd_test(i64* %p, i32 %lo, i32 %hi) nounwind {
+; CHECK-LABEL: strd_test:
+; CHECK: strd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}
+ %1 = zext i32 %hi to i64
+ %2 = shl nuw i64 %1, 32
+ %3 = sext i32 %lo to i64
+ %4 = or i64 %2, %3
+ tail call void asm sideeffect "strd $0, ${0:H}, [$1]", "r,r"(i64 %4, i64* %p) nounwind
+ ret void
+}
+
+; Make sure we don't untie operands by mistake.
+define i64 @tied_64bit_test(i64 %in) nounwind {
+; CHECK-LABEL: tied_64bit_test:
+; CHECK: OUT([[OUTREG:r[0-9]+]]), IN([[OUTREG]])
+ %addr = alloca i64
+ call void asm "OUT($0), IN($1)", "=*rm,0"(i64* %addr, i64 %in)
+ ret i64 %in
+}
+
+; If we explicitly name a tied operand, then the code should lookup the operand
+; we were tied to for information about register class and so on.
+define i64 @tied_64bit_lookback_test(i64 %in) nounwind {
+; CHECK-LABEL: tied_64bit_lookback_test:
+; CHECK: OUTLO([[LO:r[0-9]+]]) OUTHI([[HI:r[0-9]+]]) INLO([[LO]]) INHI([[HI]])
+ %vars = call {i64, i32, i64} asm "OUTLO(${2:Q}) OUTHI(${2:R}) INLO(${3:Q}) INHI(${3:R})", "=r,=r,=r,2"(i64 %in)
+ %res = extractvalue {i64, i32, i64} %vars, 2
+ ret i64 %res
+}
diff --git a/test/CodeGen/ARM/inlineasm4.ll b/test/CodeGen/ARM/inlineasm4.ll
index 9ed4b997a634..4a1bccaf61c5 100644
--- a/test/CodeGen/ARM/inlineasm4.ll
+++ b/test/CodeGen/ARM/inlineasm4.ll
@@ -4,7 +4,7 @@ define double @f(double %x) {
entry:
%0 = tail call double asm "mov ${0:R}, #4\0A", "=&r"()
ret double %0
-; CHECK: f:
+; CHECK-LABEL: f:
; CHECK: mov r1, #4
}
@@ -12,6 +12,6 @@ define double @g(double %x) {
entry:
%0 = tail call double asm "mov ${0:Q}, #4\0A", "=&r"()
ret double %0
-; CHECK: g:
+; CHECK-LABEL: g:
; CHECK: mov r0, #4
}
diff --git a/test/CodeGen/ARM/interrupt-attr.ll b/test/CodeGen/ARM/interrupt-attr.ll
new file mode 100644
index 000000000000..217fd696237e
--- /dev/null
+++ b/test/CodeGen/ARM/interrupt-attr.ll
@@ -0,0 +1,130 @@
+; RUN: llc -mtriple=arm-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A %s
+; RUN: llc -mtriple=thumb-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A-THUMB %s
+; RUN: llc -mtriple=thumb-apple-darwin -mcpu=cortex-m3 -o - %s | FileCheck --check-prefix=CHECK-M %s
+
+declare arm_aapcscc void @bar()
+
+@bigvar = global [16 x i32] zeroinitializer
+
+define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" {
+ ; Must save all registers except banked sp and lr (we save lr anyway because
+ ; we actually need it at the end to execute the return ourselves).
+
+ ; Also need special function return setting pc and CPSR simultaneously.
+; CHECK-A-LABEL: irq_fn:
+; CHECK-A: push {r0, r1, r2, r3, r11, lr}
+; CHECK-A: add r11, sp, #16
+; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: bic sp, sp, #7
+; CHECK-A: bl bar
+; CHECK-A: sub sp, r11, #16
+; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
+; CHECK-A: subs pc, lr, #4
+
+; CHECK-A-THUMB-LABEL: irq_fn:
+; CHECK-A-THUMB: push {r0, r1, r2, r3, r4, r7, lr}
+; CHECK-A-THUMB: mov r4, sp
+; CHECK-A-THUMB: add r7, sp, #20
+; CHECK-A-THUMB: bic r4, r4, #7
+; CHECK-A-THUMB: bl bar
+; CHECK-A-THUMB: sub.w r4, r7, #20
+; CHECK-A-THUMB: mov sp, r4
+; CHECK-A-THUMB: pop.w {r0, r1, r2, r3, r4, r7, lr}
+; CHECK-A-THUMB: subs pc, lr, #4
+
+ ; Normal AAPCS function (r0-r3 pushed onto stack by hardware, lr set to
+ ; appropriate sentinel so no special return needed).
+; CHECK-M: push {r4, r7, lr}
+; CHECK-M: add r7, sp, #4
+; CHECK-M: sub sp, #4
+; CHECK-M: mov r4, sp
+; CHECK-M: mov sp, r4
+; CHECK-M: blx _bar
+; CHECK-M: subs r4, r7, #4
+; CHECK-M: mov sp, r4
+; CHECK-M: pop {r4, r7, pc}
+
+ call arm_aapcscc void @bar()
+ ret void
+}
+
+define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" {
+; CHECK-A-LABEL: fiq_fn:
+; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr}
+ ; 32 to get past r0, r1, ..., r7
+; CHECK-A: add r11, sp, #32
+; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: bic sp, sp, #7
+; [...]
+ ; 32 must match above
+; CHECK-A: sub sp, r11, #32
+; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr}
+; CHECK-A: subs pc, lr, #4
+
+ %val = load volatile [16 x i32]* @bigvar
+ store volatile [16 x i32] %val, [16 x i32]* @bigvar
+ ret void
+}
+
+define arm_aapcscc void @swi_fn() alignstack(8) "interrupt"="SWI" {
+; CHECK-A-LABEL: swi_fn:
+; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-A: add r11, sp, #44
+; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: bic sp, sp, #7
+; [...]
+; CHECK-A: sub sp, r11, #44
+; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK-A: subs pc, lr, #0
+
+ %val = load volatile [16 x i32]* @bigvar
+ store volatile [16 x i32] %val, [16 x i32]* @bigvar
+ ret void
+}
+
+define arm_aapcscc void @undef_fn() alignstack(8) "interrupt"="UNDEF" {
+; CHECK-A-LABEL: undef_fn:
+; CHECK-A: push {r0, r1, r2, r3, r11, lr}
+; CHECK-A: add r11, sp, #16
+; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: bic sp, sp, #7
+; [...]
+; CHECK-A: sub sp, r11, #16
+; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
+; CHECK-A: subs pc, lr, #0
+
+ call void @bar()
+ ret void
+}
+
+define arm_aapcscc void @abort_fn() alignstack(8) "interrupt"="ABORT" {
+; CHECK-A-LABEL: abort_fn:
+; CHECK-A: push {r0, r1, r2, r3, r11, lr}
+; CHECK-A: add r11, sp, #16
+; CHECK-A: sub sp, sp, #{{[0-9]+}}
+; CHECK-A: bic sp, sp, #7
+; [...]
+; CHECK-A: sub sp, r11, #16
+; CHECK-A: pop {r0, r1, r2, r3, r11, lr}
+; CHECK-A: subs pc, lr, #4
+
+ call void @bar()
+ ret void
+}
+
+@var = global double 0.0
+
+; We don't save VFP regs, since it would be a massive overhead in the general
+; case.
+define arm_aapcscc void @floating_fn() alignstack(8) "interrupt"="IRQ" {
+; CHECK-A-LABEL: floating_fn:
+; CHECK-A-NOT: vpush
+; CHECK-A-NOT: vstr
+; CHECK-A-NOT: vstm
+; CHECK-A: vadd.f64 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+ %lhs = load volatile double* @var
+ %rhs = load volatile double* @var
+ %sum = fadd double %lhs, %rhs
+ store double %sum, double* @var
+ ret void
+}
diff --git a/test/CodeGen/ARM/intrinsics-crypto.ll b/test/CodeGen/ARM/intrinsics-crypto.ll
new file mode 100644
index 000000000000..c038fe6da84a
--- /dev/null
+++ b/test/CodeGen/ARM/intrinsics-crypto.ll
@@ -0,0 +1,57 @@
+; RUN: llc < %s -mtriple=armv8 -mattr=+crypto | FileCheck %s
+
+define arm_aapcs_vfpcc <16 x i8> @test_aesde(<16 x i8>* %a, <16 x i8> *%b) {
+ %tmp = load <16 x i8>* %a
+ %tmp2 = load <16 x i8>* %b
+ %tmp3 = call <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8> %tmp, <16 x i8> %tmp2)
+ ; CHECK: aesd.8 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp4 = call <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8> %tmp3, <16 x i8> %tmp2)
+ ; CHECK: aese.8 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp5 = call <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8> %tmp4)
+ ; CHECK: aesimc.8 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp6 = call <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8> %tmp5)
+ ; CHECK: aesmc.8 q{{[0-9]+}}, q{{[0-9]+}}
+ ret <16 x i8> %tmp6
+}
+
+define arm_aapcs_vfpcc <4 x i32> @test_sha(<4 x i32> *%a, <4 x i32> *%b, <4 x i32> *%c) {
+ %tmp = load <4 x i32>* %a
+ %tmp2 = load <4 x i32>* %b
+ %tmp3 = load <4 x i32>* %c
+ %res1 = call <4 x i32> @llvm.arm.neon.sha1h.v4i32(<4 x i32> %tmp)
+ ; CHECK: sha1h.32 q{{[0-9]+}}, q{{[0-9]+}}
+ %res2 = call <4 x i32> @llvm.arm.neon.sha1c.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha1c.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res3 = call <4 x i32> @llvm.arm.neon.sha1m.v4i32(<4 x i32> %res2, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha1m.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res4 = call <4 x i32> @llvm.arm.neon.sha1p.v4i32(<4 x i32> %res3, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha1p.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res5 = call <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha1su0.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res6 = call <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32> %res5, <4 x i32> %res1)
+ ; CHECK: sha1su1.32 q{{[0-9]+}}, q{{[0-9]+}}
+ %res7 = call <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32> %res6, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha256h.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res8 = call <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32> %res7, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha256h2.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res9 = call <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32> %res8, <4 x i32> %tmp3, <4 x i32> %res1)
+ ; CHECK: sha256su1.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %res10 = call <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32> %res9, <4 x i32> %tmp3)
+ ; CHECK: sha256su0.32 q{{[0-9]+}}, q{{[0-9]+}}
+ ret <4 x i32> %res10
+}
+
+declare <16 x i8> @llvm.arm.neon.aesd.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.aese.v16i8(<16 x i8>, <16 x i8>)
+declare <16 x i8> @llvm.arm.neon.aesimc.v16i8(<16 x i8>)
+declare <16 x i8> @llvm.arm.neon.aesmc.v16i8(<16 x i8>)
+declare <4 x i32> @llvm.arm.neon.sha1h.v4i32(<4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1c.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1m.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1p.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1su0.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256h.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256h2.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256su1.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha256su0.v4i32(<4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.arm.neon.sha1su1.v4i32(<4 x i32>, <4 x i32>)
diff --git a/test/CodeGen/ARM/intrinsics-v8.ll b/test/CodeGen/ARM/intrinsics-v8.ll
new file mode 100644
index 000000000000..247bfc1e5884
--- /dev/null
+++ b/test/CodeGen/ARM/intrinsics-v8.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -mtriple=armv8 -mattr=+db | FileCheck %s
+
+define void @test() {
+ ; CHECK: dmb sy
+ call void @llvm.arm.dmb(i32 15)
+ ; CHECK: dmb osh
+ call void @llvm.arm.dmb(i32 3)
+ ; CHECK: dsb sy
+ call void @llvm.arm.dsb(i32 15)
+ ; CHECK: dsb ishld
+ call void @llvm.arm.dsb(i32 9)
+ ; CHECK: sevl
+ tail call void @llvm.arm.sevl() nounwind
+ ret void
+}
+
+declare void @llvm.arm.dmb(i32)
+declare void @llvm.arm.dsb(i32)
+declare void @llvm.arm.sevl() nounwind
diff --git a/test/CodeGen/ARM/ldm.ll b/test/CodeGen/ARM/ldm.ll
index db78fd06ab2d..d5b805c721b7 100644
--- a/test/CodeGen/ARM/ldm.ll
+++ b/test/CodeGen/ARM/ldm.ll
@@ -4,9 +4,9 @@
@X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
define i32 @t1() {
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: pop
-; V4T: t1:
+; V4T-LABEL: t1:
; V4T: pop
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
@@ -15,9 +15,9 @@ define i32 @t1() {
}
define i32 @t2() {
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: pop
-; V4T: t2:
+; V4T-LABEL: t2:
; V4T: pop
%tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
%tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
@@ -27,10 +27,10 @@ define i32 @t2() {
}
define i32 @t3() {
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK: ldmib
; CHECK: pop
-; V4T: t3:
+; V4T-LABEL: t3:
; V4T: ldmib
; V4T: pop
; V4T-NEXT: bx lr
diff --git a/test/CodeGen/ARM/ldr.ll b/test/CodeGen/ARM/ldr.ll
index 011e61caea96..e4c695b87bec 100644
--- a/test/CodeGen/ARM/ldr.ll
+++ b/test/CodeGen/ARM/ldr.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
define i32 @f1(i32* %v) {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: ldr r0
entry:
%tmp = load i32* %v
@@ -9,7 +9,7 @@ entry:
}
define i32 @f2(i32* %v) {
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: ldr r0
entry:
%tmp2 = getelementptr i32* %v, i32 1023
@@ -18,7 +18,7 @@ entry:
}
define i32 @f3(i32* %v) {
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: mov
; CHECK: ldr r0
entry:
@@ -28,7 +28,7 @@ entry:
}
define i32 @f4(i32 %base) {
-; CHECK: f4:
+; CHECK-LABEL: f4:
; CHECK-NOT: mvn
; CHECK: ldr r0
entry:
@@ -39,7 +39,7 @@ entry:
}
define i32 @f5(i32 %base, i32 %offset) {
-; CHECK: f5:
+; CHECK-LABEL: f5:
; CHECK: ldr r0
entry:
%tmp1 = add i32 %base, %offset
@@ -49,7 +49,7 @@ entry:
}
define i32 @f6(i32 %base, i32 %offset) {
-; CHECK: f6:
+; CHECK-LABEL: f6:
; CHECK: ldr r0{{.*}}lsl{{.*}}
entry:
%tmp1 = shl i32 %offset, 2
@@ -60,7 +60,7 @@ entry:
}
define i32 @f7(i32 %base, i32 %offset) {
-; CHECK: f7:
+; CHECK-LABEL: f7:
; CHECK: ldr r0{{.*}}lsr{{.*}}
entry:
%tmp1 = lshr i32 %offset, 2
diff --git a/test/CodeGen/ARM/ldr_post.ll b/test/CodeGen/ARM/ldr_post.ll
index a6ca43448380..f5ff7dda5e04 100644
--- a/test/CodeGen/ARM/ldr_post.ll
+++ b/test/CodeGen/ARM/ldr_post.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK: ldr {{.*, \[.*]}}, -r2
; CHECK-NOT: ldr
define i32 @test1(i32 %a, i32 %b, i32 %c) {
@@ -13,7 +13,7 @@ define i32 @test1(i32 %a, i32 %b, i32 %c) {
ret i32 %tmp5
}
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK: ldr {{.*, \[.*\]}}, #-16
; CHECK-NOT: ldr
define i32 @test2(i32 %a, i32 %b) {
diff --git a/test/CodeGen/ARM/ldr_pre.ll b/test/CodeGen/ARM/ldr_pre.ll
index 6c40ad7326b6..82818272cf22 100644
--- a/test/CodeGen/ARM/ldr_pre.ll
+++ b/test/CodeGen/ARM/ldr_pre.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK: ldr {{.*!}}
; CHECK-NOT: ldr
define i32* @test1(i32* %X, i32* %dest) {
@@ -11,7 +11,7 @@ define i32* @test1(i32* %X, i32* %dest) {
ret i32* %Y
}
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK: ldr {{.*!}}
; CHECK-NOT: ldr
define i32 @test2(i32 %a, i32 %b, i32 %c) {
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index 73b546d021d5..864d18a88ae6 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -13,10 +13,10 @@
define i64 @t(i64 %a) nounwind readonly {
entry:
-; A8: t:
+; A8-LABEL: t:
; A8: ldrd r2, r3, [r2]
-; M3: t:
+; M3-LABEL: t:
; M3-NOT: ldrd
%0 = load i64** @b, align 4
@@ -67,3 +67,31 @@ bb: ; preds = %bb, %entry
return: ; preds = %bb, %entry
ret void
}
+
+; rdar://13978317
+; Pair of loads not formed when lifetime markers are set.
+%struct.Test = type { i32, i32, i32 }
+
+@TestVar = external global %struct.Test
+
+define void @Func1() nounwind ssp {
+; CHECK: @Func1
+entry:
+; A8: movw [[BASE:r[0-9]+]], :lower16:{{.*}}TestVar{{.*}}
+; A8: movt [[BASE]], :upper16:{{.*}}TestVar{{.*}}
+; A8: ldrd [[FIELD1:r[0-9]+]], [[FIELD2:r[0-9]+]], {{\[}}[[BASE]], #4]
+; A8-NEXT: add [[FIELD1]], [[FIELD2]]
+; A8-NEXT: str [[FIELD1]], {{\[}}[[BASE]]{{\]}}
+ %orig_blocks = alloca [256 x i16], align 2
+ %0 = bitcast [256 x i16]* %orig_blocks to i8*call void @llvm.lifetime.start(i64 512, i8* %0) nounwind
+ %tmp1 = load i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 1), align 4
+ %tmp2 = load i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 2), align 4
+ %add = add nsw i32 %tmp2, %tmp1
+ store i32 %add, i32* getelementptr inbounds (%struct.Test* @TestVar, i32 0, i32 0), align 4
+ call void @llvm.lifetime.end(i64 512, i8* %0) nounwind
+ ret void
+}
+
+
+declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
diff --git a/test/CodeGen/ARM/ldst-f32-2-i32.ll b/test/CodeGen/ARM/ldst-f32-2-i32.ll
index 1c69e15bbbfb..61c459c7435e 100644
--- a/test/CodeGen/ARM/ldst-f32-2-i32.ll
+++ b/test/CodeGen/ARM/ldst-f32-2-i32.ll
@@ -3,7 +3,7 @@
; rdar://8944252
define void @t(i32 %width, float* nocapture %src, float* nocapture %dst, i32 %index) nounwind {
-; CHECK: t:
+; CHECK-LABEL: t:
entry:
%src6 = bitcast float* %src to i8*
%0 = icmp eq i32 %width, 0
diff --git a/test/CodeGen/ARM/ldstrex.ll b/test/CodeGen/ARM/ldstrex.ll
new file mode 100644
index 000000000000..5eaae53da994
--- /dev/null
+++ b/test/CodeGen/ARM/ldstrex.ll
@@ -0,0 +1,139 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin > %t
+; RUN: FileCheck %s < %t
+; RUN: FileCheck %s < %t --check-prefix=CHECK-T2ADDRMODE
+
+%0 = type { i32, i32 }
+
+; CHECK-LABEL: f0:
+; CHECK: ldrexd
+define i64 @f0(i8* %p) nounwind readonly {
+entry:
+ %ldrexd = tail call %0 @llvm.arm.ldrexd(i8* %p)
+ %0 = extractvalue %0 %ldrexd, 1
+ %1 = extractvalue %0 %ldrexd, 0
+ %2 = zext i32 %0 to i64
+ %3 = zext i32 %1 to i64
+ %shl = shl nuw i64 %2, 32
+ %4 = or i64 %shl, %3
+ ret i64 %4
+}
+
+; CHECK-LABEL: f1:
+; CHECK: strexd
+define i32 @f1(i8* %ptr, i64 %val) nounwind {
+entry:
+ %tmp4 = trunc i64 %val to i32
+ %tmp6 = lshr i64 %val, 32
+ %tmp7 = trunc i64 %tmp6 to i32
+ %strexd = tail call i32 @llvm.arm.strexd(i32 %tmp4, i32 %tmp7, i8* %ptr)
+ ret i32 %strexd
+}
+
+declare %0 @llvm.arm.ldrexd(i8*) nounwind readonly
+declare i32 @llvm.arm.strexd(i32, i32, i8*) nounwind
+
+; CHECK-LABEL: test_load_i8:
+; CHECK: ldrexb r0, [r0]
+; CHECK-NOT: uxtb
+define i32 @test_load_i8(i8* %addr) {
+ %val = call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
+ ret i32 %val
+}
+
+; CHECK-LABEL: test_load_i16:
+; CHECK: ldrexh r0, [r0]
+; CHECK-NOT: uxth
+define i32 @test_load_i16(i16* %addr) {
+ %val = call i32 @llvm.arm.ldrex.p0i16(i16* %addr)
+ ret i32 %val
+}
+
+; CHECK-LABEL: test_load_i32:
+; CHECK: ldrex r0, [r0]
+define i32 @test_load_i32(i32* %addr) {
+ %val = call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
+ ret i32 %val
+}
+
+declare i32 @llvm.arm.ldrex.p0i8(i8*) nounwind readonly
+declare i32 @llvm.arm.ldrex.p0i16(i16*) nounwind readonly
+declare i32 @llvm.arm.ldrex.p0i32(i32*) nounwind readonly
+
+; CHECK-LABEL: test_store_i8:
+; CHECK-NOT: uxtb
+; CHECK: strexb r0, r1, [r2]
+define i32 @test_store_i8(i32, i8 %val, i8* %addr) {
+ %extval = zext i8 %val to i32
+ %res = call i32 @llvm.arm.strex.p0i8(i32 %extval, i8* %addr)
+ ret i32 %res
+}
+
+; CHECK-LABEL: test_store_i16:
+; CHECK-NOT: uxth
+; CHECK: strexh r0, r1, [r2]
+define i32 @test_store_i16(i32, i16 %val, i16* %addr) {
+ %extval = zext i16 %val to i32
+ %res = call i32 @llvm.arm.strex.p0i16(i32 %extval, i16* %addr)
+ ret i32 %res
+}
+
+; CHECK-LABEL: test_store_i32:
+; CHECK: strex r0, r1, [r2]
+define i32 @test_store_i32(i32, i32 %val, i32* %addr) {
+ %res = call i32 @llvm.arm.strex.p0i32(i32 %val, i32* %addr)
+ ret i32 %res
+}
+
+declare i32 @llvm.arm.strex.p0i8(i32, i8*) nounwind
+declare i32 @llvm.arm.strex.p0i16(i32, i16*) nounwind
+declare i32 @llvm.arm.strex.p0i32(i32, i32*) nounwind
+
+; CHECK-LABEL: test_clear:
+; CHECK: clrex
+define void @test_clear() {
+ call void @llvm.arm.clrex()
+ ret void
+}
+
+declare void @llvm.arm.clrex() nounwind
+
+@base = global i32* null
+
+define void @excl_addrmode() {
+; CHECK-T2ADDRMODE-LABEL: excl_addrmode:
+ %base1020 = load i32** @base
+ %offset1020 = getelementptr i32* %base1020, i32 255
+ call i32 @llvm.arm.ldrex.p0i32(i32* %offset1020)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1020)
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [{{r[0-9]+}}, #1020]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [{{r[0-9]+}}, #1020]
+
+ %base1024 = load i32** @base
+ %offset1024 = getelementptr i32* %base1024, i32 256
+ call i32 @llvm.arm.ldrex.p0i32(i32* %offset1024)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1024)
+; CHECK-T2ADDRMODE: add.w r[[ADDR:[0-9]+]], {{r[0-9]+}}, #1024
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
+
+ %base1 = load i32** @base
+ %addr8 = bitcast i32* %base1 to i8*
+ %offset1_8 = getelementptr i8* %addr8, i32 1
+ %offset1 = bitcast i8* %offset1_8 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %offset1)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1)
+; CHECK-T2ADDRMODE: adds r[[ADDR:[0-9]+]], #1
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
+
+ %local = alloca i8, i32 1024
+ %local32 = bitcast i8* %local to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32)
+; CHECK-T2ADDRMODE: mov r[[ADDR:[0-9]+]], sp
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
+
+ ret void
+}
diff --git a/test/CodeGen/ARM/ldstrexd.ll b/test/CodeGen/ARM/ldstrexd.ll
deleted file mode 100644
index 0c0911a86e72..000000000000
--- a/test/CodeGen/ARM/ldstrexd.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-
-%0 = type { i32, i32 }
-
-; CHECK: f0:
-; CHECK: ldrexd
-define i64 @f0(i8* %p) nounwind readonly {
-entry:
- %ldrexd = tail call %0 @llvm.arm.ldrexd(i8* %p)
- %0 = extractvalue %0 %ldrexd, 1
- %1 = extractvalue %0 %ldrexd, 0
- %2 = zext i32 %0 to i64
- %3 = zext i32 %1 to i64
- %shl = shl nuw i64 %2, 32
- %4 = or i64 %shl, %3
- ret i64 %4
-}
-
-; CHECK: f1:
-; CHECK: strexd
-define i32 @f1(i8* %ptr, i64 %val) nounwind {
-entry:
- %tmp4 = trunc i64 %val to i32
- %tmp6 = lshr i64 %val, 32
- %tmp7 = trunc i64 %tmp6 to i32
- %strexd = tail call i32 @llvm.arm.strexd(i32 %tmp4, i32 %tmp7, i8* %ptr)
- ret i32 %strexd
-}
-
-declare %0 @llvm.arm.ldrexd(i8*) nounwind readonly
-declare i32 @llvm.arm.strexd(i32, i32, i8*) nounwind
-
diff --git a/test/CodeGen/ARM/lit.local.cfg b/test/CodeGen/ARM/lit.local.cfg
index 4d75f581a1d2..8a3ba96497e7 100644
--- a/test/CodeGen/ARM/lit.local.cfg
+++ b/test/CodeGen/ARM/lit.local.cfg
@@ -1,5 +1,3 @@
-config.suffixes = ['.ll', '.c', '.cpp', '.test']
-
targets = set(config.root.targets_to_build.split())
if not 'ARM' in targets:
config.unsupported = True
diff --git a/test/CodeGen/ARM/load-address-masked.ll b/test/CodeGen/ARM/load-address-masked.ll
new file mode 100644
index 000000000000..65cc31104bc9
--- /dev/null
+++ b/test/CodeGen/ARM/load-address-masked.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=armv4t-unknown-linux-gnueabi -verify-machineinstrs | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
+target triple = "armv4t-unknown-linux-gnueabi"
+
+@a = global i32 0, align 4
+
+define i32 @foo() {
+entry:
+ ret i32 and (i32 ptrtoint (i32* @a to i32), i32 255)
+}
+
+; CHECK-LABEL: foo:
+; CHECK: ldrb r0, .LCPI0_0
diff --git a/test/CodeGen/ARM/load-global.ll b/test/CodeGen/ARM/load-global.ll
index 15a415df731d..00ca2e8b1b75 100644
--- a/test/CodeGen/ARM/load-global.ll
+++ b/test/CodeGen/ARM/load-global.ll
@@ -26,7 +26,7 @@ define i32 @test1() {
; PIC: .long L_G$non_lazy_ptr-(LPC0_0+8)
; PIC_T: _test1
-; PIC_T: ldr.n r0, LCPI0_0
+; PIC_T: ldr r0, LCPI0_0
; PIC_T: add r0, pc
; PIC_T: ldr r0, [r0]
; PIC_T: ldr r0, [r0]
diff --git a/test/CodeGen/ARM/load_i1_select.ll b/test/CodeGen/ARM/load_i1_select.ll
index bdd408164992..7a208ea41752 100644
--- a/test/CodeGen/ARM/load_i1_select.ll
+++ b/test/CodeGen/ARM/load_i1_select.ll
@@ -6,7 +6,7 @@ target triple = "thumbv7-apple-ios0.0.0"
; Codegen should only compare one bit of the loaded value.
; rdar://10887484
-; CHECK: foo:
+; CHECK-LABEL: foo:
; CHECK: ldrb r[[R0:[0-9]+]], [r0]
; CHECK: tst.w r[[R0]], #1
define void @foo(i8* %call, double* %p) nounwind {
diff --git a/test/CodeGen/ARM/long.ll b/test/CodeGen/ARM/long.ll
index 0f1c7be6a3d2..7fffc81797cb 100644
--- a/test/CodeGen/ARM/long.ll
+++ b/test/CodeGen/ARM/long.ll
@@ -1,33 +1,33 @@
; RUN: llc < %s -march=arm | FileCheck %s
define i64 @f1() {
-; CHECK: f1:
+; CHECK-LABEL: f1:
entry:
ret i64 0
}
define i64 @f2() {
-; CHECK: f2:
+; CHECK-LABEL: f2:
entry:
ret i64 1
}
define i64 @f3() {
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: mvn r0, #-2147483648
entry:
ret i64 2147483647
}
define i64 @f4() {
-; CHECK: f4:
+; CHECK-LABEL: f4:
; CHECK: mov r0, #-2147483648
entry:
ret i64 2147483648
}
define i64 @f5() {
-; CHECK: f5:
+; CHECK-LABEL: f5:
; CHECK: mvn r0, #0
; CHECK: mvn r1, #-2147483648
entry:
@@ -35,7 +35,7 @@ entry:
}
define i64 @f6(i64 %x, i64 %y) {
-; CHECK: f6:
+; CHECK-LABEL: f6:
; CHECK: adds
; CHECK: adc
entry:
@@ -44,7 +44,7 @@ entry:
}
define void @f7() {
-; CHECK: f7:
+; CHECK-LABEL: f7:
entry:
%tmp = call i64 @f8( ) ; <i64> [#uses=0]
ret void
@@ -53,7 +53,7 @@ entry:
declare i64 @f8()
define i64 @f9(i64 %a, i64 %b) {
-; CHECK: f9:
+; CHECK-LABEL: f9:
; CHECK: subs r
; CHECK: sbc
entry:
@@ -62,7 +62,7 @@ entry:
}
define i64 @f(i32 %a, i32 %b) {
-; CHECK: f:
+; CHECK-LABEL: f:
; CHECK: smull
entry:
%tmp = sext i32 %a to i64 ; <i64> [#uses=1]
@@ -72,7 +72,7 @@ entry:
}
define i64 @g(i32 %a, i32 %b) {
-; CHECK: g:
+; CHECK-LABEL: g:
; CHECK: umull
entry:
%tmp = zext i32 %a to i64 ; <i64> [#uses=1]
@@ -82,7 +82,7 @@ entry:
}
define i64 @f10() {
-; CHECK: f10:
+; CHECK-LABEL: f10:
entry:
%a = alloca i64, align 8 ; <i64*> [#uses=1]
%retval = load i64* %a ; <i64> [#uses=1]
diff --git a/test/CodeGen/ARM/longMAC.ll b/test/CodeGen/ARM/longMAC.ll
index e4a00e9ac303..2cf91c32bc1a 100644
--- a/test/CodeGen/ARM/longMAC.ll
+++ b/test/CodeGen/ARM/longMAC.ll
@@ -2,7 +2,7 @@
; Check generated signed and unsigned multiply accumulate long.
define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
-;CHECK: MACLongTest1:
+;CHECK-LABEL: MACLongTest1:
;CHECK: umlal
%conv = zext i32 %a to i64
%conv1 = zext i32 %b to i64
@@ -12,7 +12,7 @@ define i64 @MACLongTest1(i32 %a, i32 %b, i64 %c) {
}
define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) {
-;CHECK: MACLongTest2:
+;CHECK-LABEL: MACLongTest2:
;CHECK: smlal
%conv = sext i32 %a to i64
%conv1 = sext i32 %b to i64
@@ -22,7 +22,7 @@ define i64 @MACLongTest2(i32 %a, i32 %b, i64 %c) {
}
define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) {
-;CHECK: MACLongTest3:
+;CHECK-LABEL: MACLongTest3:
;CHECK: umlal
%conv = zext i32 %b to i64
%conv1 = zext i32 %a to i64
@@ -33,7 +33,7 @@ define i64 @MACLongTest3(i32 %a, i32 %b, i32 %c) {
}
define i64 @MACLongTest4(i32 %a, i32 %b, i32 %c) {
-;CHECK: MACLongTest4:
+;CHECK-LABEL: MACLongTest4:
;CHECK: smlal
%conv = sext i32 %b to i64
%conv1 = sext i32 %a to i64
diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll
index a99a7ec86c1e..3e986d802d81 100644
--- a/test/CodeGen/ARM/long_shift.ll
+++ b/test/CodeGen/ARM/long_shift.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
define i64 @f0(i64 %A, i64 %B) {
-; CHECK: f0
+; CHECK-LABEL: f0:
; CHECK: lsrs r3, r3, #1
; CHECK-NEXT: rrx r2, r2
; CHECK-NEXT: subs r0, r0, r2
@@ -13,7 +13,7 @@ define i64 @f0(i64 %A, i64 %B) {
}
define i32 @f1(i64 %x, i64 %y) {
-; CHECK: f1
+; CHECK-LABEL: f1:
; CHECK: lsl{{.*}}r2
%a = shl i64 %x, %y
%b = trunc i64 %a to i32
@@ -21,7 +21,7 @@ define i32 @f1(i64 %x, i64 %y) {
}
define i32 @f2(i64 %x, i64 %y) {
-; CHECK: f2
+; CHECK-LABEL: f2:
; CHECK: lsr{{.*}}r2
; CHECK-NEXT: rsb r3, r2, #32
; CHECK-NEXT: sub r2, r2, #32
@@ -34,7 +34,7 @@ define i32 @f2(i64 %x, i64 %y) {
}
define i32 @f3(i64 %x, i64 %y) {
-; CHECK: f3
+; CHECK-LABEL: f3:
; CHECK: lsr{{.*}}r2
; CHECK-NEXT: rsb r3, r2, #32
; CHECK-NEXT: sub r2, r2, #32
diff --git a/test/CodeGen/ARM/lsr-icmp-imm.ll b/test/CodeGen/ARM/lsr-icmp-imm.ll
index 248c4bd1beea..103642b8b72f 100644
--- a/test/CodeGen/ARM/lsr-icmp-imm.ll
+++ b/test/CodeGen/ARM/lsr-icmp-imm.ll
@@ -4,7 +4,7 @@
; LSR should compare against the post-incremented induction variable.
; In this case, the immediate value is -2 which requires a cmn instruction.
;
-; CHECK: f:
+; CHECK-LABEL: f:
; CHECK: %for.body
; CHECK: sub{{.*}}[[IV:r[0-9]+]], #2
; CHECK: cmn{{.*}}[[IV]], #2
diff --git a/test/CodeGen/ARM/lsr-unfolded-offset.ll b/test/CodeGen/ARM/lsr-unfolded-offset.ll
index 9b0f3e54e88a..26d4be2e06ff 100644
--- a/test/CodeGen/ARM/lsr-unfolded-offset.ll
+++ b/test/CodeGen/ARM/lsr-unfolded-offset.ll
@@ -7,8 +7,7 @@
; CHECK: sub sp, #{{40|32|28|24}}
; CHECK: %for.inc
-; CHECK: ldr{{(.w)?}} r{{.*}}, [sp, #
-; CHECK: ldr{{(.w)?}} r{{.*}}, [sp, #
+; CHECK-NOT: ldr
; CHECK: add
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
diff --git a/test/CodeGen/ARM/machine-cse-cmp.ll b/test/CodeGen/ARM/machine-cse-cmp.ll
index 03abd762a261..7e4b309fd9d1 100644
--- a/test/CodeGen/ARM/machine-cse-cmp.ll
+++ b/test/CodeGen/ARM/machine-cse-cmp.ll
@@ -6,7 +6,7 @@
define i32 @f1(i32 %cond1, i32 %x1, i32 %x2, i32 %x3) {
entry:
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: cmp
; CHECK: moveq
; CHECK-NOT: cmp
@@ -25,7 +25,7 @@ entry:
; rdar://10660865
define void @f2() nounwind ssp {
entry:
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: cmp
; CHECK: poplt
; CHECK-NOT: cmp
@@ -49,7 +49,7 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
; rdar://12462006
define i8* @f3(i8* %base, i32* nocapture %offset, i32 %size) nounwind {
entry:
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK-NOT: sub
; CHECK: cmp
; CHECK: blt
diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll
index 8656c5bbd72c..fc9b22614d6d 100644
--- a/test/CodeGen/ARM/machine-licm.ll
+++ b/test/CodeGen/ARM/machine-licm.ll
@@ -12,7 +12,7 @@
define void @t(i32* nocapture %vals, i32 %c) nounwind {
entry:
-; ARM: t:
+; ARM-LABEL: t:
; ARM: ldr [[REGISTER_1:r[0-9]+]], LCPI0_0
; Unfortunately currently ARM codegen doesn't cse the ldr from constantpool.
; The issue is it can be read by an "add pc" or a "ldr [pc]" so it's messy
@@ -23,14 +23,14 @@ entry:
; ARM: ldr r{{[0-9]+}}, [pc, [[REGISTER_1]]]
; ARM: ldr r{{[0-9]+}}, [r{{[0-9]+}}]
-; MOVT: t:
+; MOVT-LABEL: t:
; MOVT: movw [[REGISTER_2:r[0-9]+]], :lower16:(L_GV$non_lazy_ptr-(LPC0_0+8))
; MOVT: movt [[REGISTER_2]], :upper16:(L_GV$non_lazy_ptr-(LPC0_0+8))
; MOVT: LPC0_0:
; MOVT: ldr r{{[0-9]+}}, [pc, [[REGISTER_2]]]
; MOVT: ldr r{{[0-9]+}}, [r{{[0-9]+}}]
-; THUMB: t:
+; THUMB-LABEL: t:
%0 = icmp eq i32 %c, 0 ; <i1> [#uses=1]
br i1 %0, label %return, label %bb.nph
@@ -40,7 +40,7 @@ bb.nph: ; preds = %entry
; ARM: .section
; THUMB: BB#1
-; THUMB: ldr.n r2, LCPI0_0
+; THUMB: ldr r2, LCPI0_0
; THUMB: add r2, pc
; THUMB: ldr r{{[0-9]+}}, [r2]
; THUMB: LBB0_2
diff --git a/test/CodeGen/ARM/memcpy-inline.ll b/test/CodeGen/ARM/memcpy-inline.ll
index d846e5cb268b..946c63ed40c8 100644
--- a/test/CodeGen/ARM/memcpy-inline.ll
+++ b/test/CodeGen/ARM/memcpy-inline.ll
@@ -15,7 +15,7 @@
define i32 @t0() {
entry:
-; CHECK: t0:
+; CHECK-LABEL: t0:
; CHECK: vldr [[REG1:d[0-9]+]],
; CHECK: vstr [[REG1]],
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.x* @dst, i32 0, i32 0), i8* getelementptr inbounds (%struct.x* @src, i32 0, i32 0), i32 11, i32 8, i1 false)
@@ -24,7 +24,7 @@ entry:
define void @t1(i8* nocapture %C) nounwind {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
; CHECK: adds r0, #15
@@ -37,7 +37,7 @@ entry:
define void @t2(i8* nocapture %C) nounwind {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: ldr [[REG2:r[0-9]+]], [r1, #32]
; CHECK: str [[REG2]], [r0, #32]
; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
@@ -52,7 +52,7 @@ entry:
define void @t3(i8* nocapture %C) nounwind {
entry:
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK: vld1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
; CHECK: vst1.8 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
; CHECK: adds r0, #16
@@ -65,7 +65,7 @@ entry:
define void @t4(i8* nocapture %C) nounwind {
entry:
-; CHECK: t4:
+; CHECK-LABEL: t4:
; CHECK: vld1.8 {[[REG3:d[0-9]+]], [[REG4:d[0-9]+]]}, [r1]
; CHECK: vst1.8 {[[REG3]], [[REG4]]}, [r0]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %C, i8* getelementptr inbounds ([18 x i8]* @.str4, i64 0, i64 0), i64 18, i32 1, i1 false)
@@ -74,7 +74,7 @@ entry:
define void @t5(i8* nocapture %C) nounwind {
entry:
-; CHECK: t5:
+; CHECK-LABEL: t5:
; CHECK: movs [[REG5:r[0-9]+]], #0
; CHECK: strb [[REG5]], [r0, #6]
; CHECK: movw [[REG6:r[0-9]+]], #21587
@@ -87,7 +87,7 @@ entry:
define void @t6() nounwind {
entry:
-; CHECK: t6:
+; CHECK-LABEL: t6:
; CHECK: vld1.8 {[[REG8:d[0-9]+]]}, [r0]
; CHECK: vstr [[REG8]], [r1]
; CHECK: adds r1, #6
diff --git a/test/CodeGen/ARM/memset-inline.ll b/test/CodeGen/ARM/memset-inline.ll
index ee8c36433885..4e86d05b0a1c 100644
--- a/test/CodeGen/ARM/memset-inline.ll
+++ b/test/CodeGen/ARM/memset-inline.ll
@@ -2,7 +2,7 @@
define void @t1(i8* nocapture %c) nounwind optsize {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: movs r1, #0
; CHECK: str r1, [r0]
; CHECK: str r1, [r0, #4]
@@ -13,7 +13,7 @@ entry:
define void @t2() nounwind ssp {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: add.w r1, r0, #10
; CHECK: vmov.i32 {{q[0-9]+}}, #0x0
; CHECK: vst1.16 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll
index 4b15326008a4..5da335fa2030 100644
--- a/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/test/CodeGen/ARM/misched-copy-arm.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=thumb -mcpu=swift -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+; RUN: llc < %s -march=thumb -mcpu=swift -pre-RA-sched=source -join-globalcopies -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
;
; Loop counter copies should be eliminated.
; There is also a MUL here, but we don't care where it is scheduled.
@@ -28,3 +28,52 @@ for.end: ; preds = %for.body, %entry
%s.0.lcssa = phi i32 [ 0, %entry ], [ %mul, %for.body ]
ret i32 %s.0.lcssa
}
+
+
+; This case was a crasher in constrainLocalCopy.
+; The problem was the t2LDR_PRE defining both the global and local lrg.
+; CHECK-LABEL: *** Final schedule for BB#5 ***
+; CHECK: %[[R4:vreg[0-9]+]]<def>, %[[R1:vreg[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
+; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R1]]
+; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R4]]
+; CHECK-LABEL: MACHINEINSTRS
+%struct.rtx_def = type { [4 x i8], [1 x %union.rtunion_def] }
+%union.rtunion_def = type { i64 }
+
+; Function Attrs: nounwind ssp
+declare hidden fastcc void @df_ref_record(i32* nocapture, %struct.rtx_def*, %struct.rtx_def**, %struct.rtx_def*, i32, i32) #0
+
+; Function Attrs: nounwind ssp
+define hidden fastcc void @df_def_record_1(i32* nocapture %df, %struct.rtx_def* %x, %struct.rtx_def* %insn) #0 {
+entry:
+ br label %while.cond
+
+while.cond: ; preds = %if.end28, %entry
+ %loc.0 = phi %struct.rtx_def** [ %rtx31, %if.end28 ], [ undef, %entry ]
+ %dst.0 = phi %struct.rtx_def* [ %0, %if.end28 ], [ undef, %entry ]
+ switch i32 undef, label %if.end47 [
+ i32 61, label %if.then46
+ i32 64, label %if.then24
+ i32 132, label %if.end28
+ i32 133, label %if.end28
+ ]
+
+if.then24: ; preds = %while.cond
+ br label %if.end28
+
+if.end28: ; preds = %if.then24, %while.cond, %while.cond
+ %dst.1 = phi %struct.rtx_def* [ undef, %if.then24 ], [ %dst.0, %while.cond ], [ %dst.0, %while.cond ]
+ %arrayidx30 = getelementptr inbounds %struct.rtx_def* %dst.1, i32 0, i32 1, i32 0
+ %rtx31 = bitcast %union.rtunion_def* %arrayidx30 to %struct.rtx_def**
+ %0 = load %struct.rtx_def** %rtx31, align 4
+ br label %while.cond
+
+if.then46: ; preds = %while.cond
+ tail call fastcc void @df_ref_record(i32* %df, %struct.rtx_def* %dst.0, %struct.rtx_def** %loc.0, %struct.rtx_def* %insn, i32 0, i32 undef)
+ unreachable
+
+if.end47: ; preds = %while.cond
+ ret void
+}
+
+attributes #0 = { nounwind ssp }
diff --git a/test/CodeGen/ARM/mls.ll b/test/CodeGen/ARM/mls.ll
index 066bf98de651..8f0d3a89a30a 100644
--- a/test/CodeGen/ARM/mls.ll
+++ b/test/CodeGen/ARM/mls.ll
@@ -14,15 +14,15 @@ define i32 @f2(i32 %a, i32 %b, i32 %c) {
ret i32 %tmp2
}
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: mls r0, r0, r1, r2
-; NO_MULOPS: f1:
+; NO_MULOPS-LABEL: f1:
; NO_MULOPS: mul r0, r0, r1
; NO_MULOPS-NEXT: sub r0, r2, r0
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: mul r0, r0, r1
; CHECK-NEXT: sub r0, r0, r2
-; NO_MULOPS: f2:
+; NO_MULOPS-LABEL: f2:
; NO_MULOPS: mul r0, r0, r1
; NO_MULOPS-NEXT: sub r0, r0, r2
diff --git a/test/CodeGen/ARM/movt.ll b/test/CodeGen/ARM/movt.ll
index e82aca0e9c69..25c1bfe32044 100644
--- a/test/CodeGen/ARM/movt.ll
+++ b/test/CodeGen/ARM/movt.ll
@@ -2,7 +2,7 @@
; rdar://7317664
define i32 @t(i32 %X) nounwind {
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK: movt r0, #65535
entry:
%0 = or i32 %X, -65536
@@ -10,7 +10,7 @@ entry:
}
define i32 @t2(i32 %X) nounwind {
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: movt r0, #65534
entry:
%0 = or i32 %X, -131072
diff --git a/test/CodeGen/ARM/mul_const.ll b/test/CodeGen/ARM/mul_const.ll
index c50a23354678..482d8f2888ce 100644
--- a/test/CodeGen/ARM/mul_const.ll
+++ b/test/CodeGen/ARM/mul_const.ll
@@ -2,7 +2,7 @@
define i32 @t9(i32 %v) nounwind readnone {
entry:
-; CHECK: t9:
+; CHECK-LABEL: t9:
; CHECK: add r0, r0, r0, lsl #3
%0 = mul i32 %v, 9
ret i32 %0
@@ -10,7 +10,7 @@ entry:
define i32 @t7(i32 %v) nounwind readnone {
entry:
-; CHECK: t7:
+; CHECK-LABEL: t7:
; CHECK: rsb r0, r0, r0, lsl #3
%0 = mul i32 %v, 7
ret i32 %0
@@ -18,7 +18,7 @@ entry:
define i32 @t5(i32 %v) nounwind readnone {
entry:
-; CHECK: t5:
+; CHECK-LABEL: t5:
; CHECK: add r0, r0, r0, lsl #2
%0 = mul i32 %v, 5
ret i32 %0
@@ -26,7 +26,7 @@ entry:
define i32 @t3(i32 %v) nounwind readnone {
entry:
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK: add r0, r0, r0, lsl #1
%0 = mul i32 %v, 3
ret i32 %0
@@ -34,7 +34,7 @@ entry:
define i32 @t12288(i32 %v) nounwind readnone {
entry:
-; CHECK: t12288:
+; CHECK-LABEL: t12288:
; CHECK: add r0, r0, r0, lsl #1
; CHECK: lsl{{.*}}#12
%0 = mul i32 %v, 12288
@@ -43,7 +43,7 @@ entry:
define i32 @tn9(i32 %v) nounwind readnone {
entry:
-; CHECK: tn9:
+; CHECK-LABEL: tn9:
; CHECK: add r0, r0, r0, lsl #3
; CHECK: rsb r0, r0, #0
%0 = mul i32 %v, -9
@@ -52,7 +52,7 @@ entry:
define i32 @tn7(i32 %v) nounwind readnone {
entry:
-; CHECK: tn7:
+; CHECK-LABEL: tn7:
; CHECK: sub r0, r0, r0, lsl #3
%0 = mul i32 %v, -7
ret i32 %0
@@ -60,7 +60,7 @@ entry:
define i32 @tn5(i32 %v) nounwind readnone {
entry:
-; CHECK: tn5:
+; CHECK-LABEL: tn5:
; CHECK: add r0, r0, r0, lsl #2
; CHECK: rsb r0, r0, #0
%0 = mul i32 %v, -5
@@ -69,7 +69,7 @@ entry:
define i32 @tn3(i32 %v) nounwind readnone {
entry:
-; CHECK: tn3:
+; CHECK-LABEL: tn3:
; CHECK: sub r0, r0, r0, lsl #2
%0 = mul i32 %v, -3
ret i32 %0
@@ -77,7 +77,7 @@ entry:
define i32 @tn12288(i32 %v) nounwind readnone {
entry:
-; CHECK: tn12288:
+; CHECK-LABEL: tn12288:
; CHECK: sub r0, r0, r0, lsl #2
; CHECK: lsl{{.*}}#12
%0 = mul i32 %v, -12288
diff --git a/test/CodeGen/ARM/mulhi.ll b/test/CodeGen/ARM/mulhi.ll
index 932004c5dd85..63705c502779 100644
--- a/test/CodeGen/ARM/mulhi.ll
+++ b/test/CodeGen/ARM/mulhi.ll
@@ -3,13 +3,13 @@
; RUN: llc < %s -march=thumb -mcpu=cortex-m3 | FileCheck %s -check-prefix=M3
define i32 @smulhi(i32 %x, i32 %y) nounwind {
-; V6: smulhi:
+; V6-LABEL: smulhi:
; V6: smmul
-; V4: smulhi:
+; V4-LABEL: smulhi:
; V4: smull
-; M3: smulhi:
+; M3-LABEL: smulhi:
; M3: smull
%tmp = sext i32 %x to i64 ; <i64> [#uses=1]
%tmp1 = sext i32 %y to i64 ; <i64> [#uses=1]
@@ -20,13 +20,13 @@ define i32 @smulhi(i32 %x, i32 %y) nounwind {
}
define i32 @umulhi(i32 %x, i32 %y) nounwind {
-; V6: umulhi:
+; V6-LABEL: umulhi:
; V6: umull
-; V4: umulhi:
+; V4-LABEL: umulhi:
; V4: umull
-; M3: umulhi:
+; M3-LABEL: umulhi:
; M3: umull
%tmp = zext i32 %x to i64 ; <i64> [#uses=1]
%tmp1 = zext i32 %y to i64 ; <i64> [#uses=1]
@@ -38,13 +38,13 @@ define i32 @umulhi(i32 %x, i32 %y) nounwind {
; rdar://r10152911
define i32 @t3(i32 %a) nounwind {
-; V6: t3:
+; V6-LABEL: t3:
; V6: smmla
-; V4: t3:
+; V4-LABEL: t3:
; V4: smull
-; M3: t3:
+; M3-LABEL: t3:
; M3-NOT: smmla
; M3: smull
entry:
diff --git a/test/CodeGen/ARM/mvn.ll b/test/CodeGen/ARM/mvn.ll
index 571c21a833ec..2c5ccd7442e0 100644
--- a/test/CodeGen/ARM/mvn.ll
+++ b/test/CodeGen/ARM/mvn.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | grep mvn | count 8
+; RUN: llc < %s -march=arm | grep mvn | count 9
define i32 @f1() {
entry:
diff --git a/test/CodeGen/ARM/neon-spfp.ll b/test/CodeGen/ARM/neon-spfp.ll
index c00f0d17c9f5..dd2e67fe7753 100644
--- a/test/CodeGen/ARM/neon-spfp.ll
+++ b/test/CodeGen/ARM/neon-spfp.ll
@@ -1,41 +1,41 @@
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a5 | FileCheck %s -check-prefix=LINUXA5
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=LINUXA8
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a9 | FileCheck %s -check-prefix=LINUXA9
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 | FileCheck %s -check-prefix=LINUXA15
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift | FileCheck %s -check-prefix=LINUXSWIFT
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a5 | FileCheck %s -check-prefix=CHECK-LINUXA5
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK-LINUXA8
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a9 | FileCheck %s -check-prefix=CHECK-LINUXA9
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 | FileCheck %s -check-prefix=CHECK-LINUXA15
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift | FileCheck %s -check-prefix=CHECK-LINUXSWIFT
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a5 --enable-unsafe-fp-math | FileCheck %s -check-prefix=UNSAFEA5
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=UNSAFEA8
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a9 --enable-unsafe-fp-math | FileCheck %s -check-prefix=UNSAFEA9
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 --enable-unsafe-fp-math | FileCheck %s -check-prefix=UNSAFEA15
-; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift --enable-unsafe-fp-math | FileCheck %s -check-prefix=UNSAFESWIFT
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a5 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA5
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a8 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA8
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a9 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA9
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=cortex-a15 --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFEA15
+; RUN: llc < %s -mtriple armv7a-none-linux-gnueabihf -mcpu=swift --enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK-UNSAFESWIFT
-; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a5 | FileCheck %s -check-prefix=DARWINA5
-; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=DARWINA8
-; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a9 | FileCheck %s -check-prefix=DARWINA9
-; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a15 | FileCheck %s -check-prefix=DARWINA15
-; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=swift | FileCheck %s -check-prefix=DARWINSWIFT
+; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a5 | FileCheck %s -check-prefix=CHECK-DARWINA5
+; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a8 | FileCheck %s -check-prefix=CHECK-DARWINA8
+; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a9 | FileCheck %s -check-prefix=CHECK-DARWINA9
+; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=cortex-a15 | FileCheck %s -check-prefix=CHECK-DARWINA15
+; RUN: llc < %s -mtriple armv7a-none-darwin -mcpu=swift | FileCheck %s -check-prefix=CHECK-DARWINSWIFT
; This test makes sure we're not lowering VMUL.f32 D* (aka. NEON) for single-prec. FP ops, since
; NEON is not fully IEEE 754 compliant, unless unsafe-math is selected.
@.str = private unnamed_addr constant [12 x i8] c"S317\09%.5g \0A\00", align 1
-; CHECK-LINUXA5: main:
-; CHECK-LINUXA8: main:
-; CHECK-LINUXA9: main:
-; CHECK-LINUXA15: main:
-; CHECK-LINUXSWIFT: main:
-; CHECK-UNSAFEA5: main:
-; CHECK-UNSAFEA8: main:
-; CHECK-UNSAFEA9: main:
-; CHECK-UNSAFEA15: main:
-; CHECK-UNSAFESWIFT: main:
-; CHECK-DARWINA5: main:
-; CHECK-DARWINA8: main:
-; CHECK-DARWINA9: main:
-; CHECK-DARWINA15: main:
-; CHECK-DARWINSWIFT: main:
+; CHECK-LINUXA5-LABEL: main:
+; CHECK-LINUXA8-LABEL: main:
+; CHECK-LINUXA9-LABEL: main:
+; CHECK-LINUXA15-LABEL: main:
+; CHECK-LINUXSWIFT-LABEL: main:
+; CHECK-UNSAFEA5-LABEL: main:
+; CHECK-UNSAFEA8-LABEL: main:
+; CHECK-UNSAFEA9-LABEL: main:
+; CHECK-UNSAFEA15-LABEL: main:
+; CHECK-UNSAFESWIFT-LABEL: main:
+; CHECK-DARWINA5-LABEL: main:
+; CHECK-DARWINA8-LABEL: main:
+; CHECK-DARWINA9-LABEL: main:
+; CHECK-DARWINA15-LABEL: main:
+; CHECK-DARWINSWIFT-LABEL: main:
define i32 @main() {
entry:
br label %for.body
diff --git a/test/CodeGen/ARM/neon_minmax.ll b/test/CodeGen/ARM/neon_minmax.ll
index 0a7c8b2b6aae..2e45919e7790 100644
--- a/test/CodeGen/ARM/neon_minmax.ll
+++ b/test/CodeGen/ARM/neon_minmax.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mcpu=swift | FileCheck %s
define float @fmin_ole(float %x) nounwind {
-;CHECK: fmin_ole:
+;CHECK-LABEL: fmin_ole:
;CHECK: vmin.f32
%cond = fcmp ole float 1.0, %x
%min1 = select i1 %cond, float 1.0, float %x
@@ -9,7 +9,7 @@ define float @fmin_ole(float %x) nounwind {
}
define float @fmin_ole_zero(float %x) nounwind {
-;CHECK: fmin_ole_zero:
+;CHECK-LABEL: fmin_ole_zero:
;CHECK-NOT: vmin.f32
%cond = fcmp ole float 0.0, %x
%min1 = select i1 %cond, float 0.0, float %x
@@ -17,7 +17,7 @@ define float @fmin_ole_zero(float %x) nounwind {
}
define float @fmin_ult(float %x) nounwind {
-;CHECK: fmin_ult:
+;CHECK-LABEL: fmin_ult:
;CHECK: vmin.f32
%cond = fcmp ult float %x, 1.0
%min1 = select i1 %cond, float %x, float 1.0
@@ -25,7 +25,7 @@ define float @fmin_ult(float %x) nounwind {
}
define float @fmax_ogt(float %x) nounwind {
-;CHECK: fmax_ogt:
+;CHECK-LABEL: fmax_ogt:
;CHECK: vmax.f32
%cond = fcmp ogt float 1.0, %x
%max1 = select i1 %cond, float 1.0, float %x
@@ -33,7 +33,7 @@ define float @fmax_ogt(float %x) nounwind {
}
define float @fmax_uge(float %x) nounwind {
-;CHECK: fmax_uge:
+;CHECK-LABEL: fmax_uge:
;CHECK: vmax.f32
%cond = fcmp uge float %x, 1.0
%max1 = select i1 %cond, float %x, float 1.0
@@ -41,7 +41,7 @@ define float @fmax_uge(float %x) nounwind {
}
define float @fmax_uge_zero(float %x) nounwind {
-;CHECK: fmax_uge_zero:
+;CHECK-LABEL: fmax_uge_zero:
;CHECK-NOT: vmax.f32
%cond = fcmp uge float %x, 0.0
%max1 = select i1 %cond, float %x, float 0.0
@@ -49,7 +49,7 @@ define float @fmax_uge_zero(float %x) nounwind {
}
define float @fmax_olt_reverse(float %x) nounwind {
-;CHECK: fmax_olt_reverse:
+;CHECK-LABEL: fmax_olt_reverse:
;CHECK: vmax.f32
%cond = fcmp olt float %x, 1.0
%max1 = select i1 %cond, float 1.0, float %x
@@ -57,7 +57,7 @@ define float @fmax_olt_reverse(float %x) nounwind {
}
define float @fmax_ule_reverse(float %x) nounwind {
-;CHECK: fmax_ule_reverse:
+;CHECK-LABEL: fmax_ule_reverse:
;CHECK: vmax.f32
%cond = fcmp ult float 1.0, %x
%max1 = select i1 %cond, float %x, float 1.0
@@ -65,7 +65,7 @@ define float @fmax_ule_reverse(float %x) nounwind {
}
define float @fmin_oge_reverse(float %x) nounwind {
-;CHECK: fmin_oge_reverse:
+;CHECK-LABEL: fmin_oge_reverse:
;CHECK: vmin.f32
%cond = fcmp oge float %x, 1.0
%min1 = select i1 %cond, float 1.0, float %x
@@ -73,7 +73,7 @@ define float @fmin_oge_reverse(float %x) nounwind {
}
define float @fmin_ugt_reverse(float %x) nounwind {
-;CHECK: fmin_ugt_reverse:
+;CHECK-LABEL: fmin_ugt_reverse:
;CHECK: vmin.f32
%cond = fcmp ugt float 1.0, %x
%min1 = select i1 %cond, float %x, float 1.0
diff --git a/test/CodeGen/ARM/neon_spill.ll b/test/CodeGen/ARM/neon_spill.ll
index 277bd05ba3b6..d286d16486c1 100644
--- a/test/CodeGen/ARM/neon_spill.ll
+++ b/test/CodeGen/ARM/neon_spill.ll
@@ -24,7 +24,7 @@ declare arm_aapcs_vfpcc %2** @func4()
define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
call void @llvm.arm.neon.vst4.v4i32(i8* undef, <4 x i32> <i32 0, i32 1065353216, i32 1073741824, i32 1077936128>, <4 x i32> <i32 1082130432, i32 1084227584, i32 1086324736, i32 1088421888>, <4 x i32> <i32 1090519040, i32 1091567616, i32 1092616192, i32 1093664768>, <4 x i32> <i32 1094713344, i32 1095761920, i32 1096810496, i32 1097859072>, i32 16) nounwind
%2 = call arm_aapcs_vfpcc %0** @func2() nounwind
- %3 = load %0** %2, align 4, !tbaa !0
+ %3 = load %0** %2, align 4
store float 0.000000e+00, float* undef, align 4
%4 = call arm_aapcs_vfpcc %2* @func3(%2* undef, %2* undef, i32 2956) nounwind
call arm_aapcs_vfpcc void @func1(%0* %3, float* undef, float* undef, %2* undef)
@@ -35,11 +35,11 @@ define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
%6 = call arm_aapcs_vfpcc %2** @func4() nounwind
%7 = call arm_aapcs_vfpcc %2* @func3(%2* undef, %2* undef, i32 2971) nounwind
%8 = fadd float undef, -1.000000e+05
- store float %8, float* undef, align 16, !tbaa !3
+ store float %8, float* undef, align 16
%9 = call arm_aapcs_vfpcc i32 @rand() nounwind
%10 = fmul float undef, 2.000000e+05
%11 = fadd float %10, -1.000000e+05
- store float %11, float* undef, align 4, !tbaa !3
+ store float %11, float* undef, align 4
call void @llvm.arm.neon.vst4.v4i32(i8* undef, <4 x i32> <i32 0, i32 1065353216, i32 1073741824, i32 1077936128>, <4 x i32> <i32 1082130432, i32 1084227584, i32 1086324736, i32 1088421888>, <4 x i32> <i32 1090519040, i32 1091567616, i32 1092616192, i32 1093664768>, <4 x i32> <i32 1094713344, i32 1095761920, i32 1096810496, i32 1097859072>, i32 16) nounwind
ret void
}
@@ -47,8 +47,3 @@ define arm_aapcs_vfpcc void @foo(%3* nocapture) nounwind align 2 {
declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
declare arm_aapcs_vfpcc i32 @rand()
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"float", metadata !1}
diff --git a/test/CodeGen/ARM/neon_vabs.ll b/test/CodeGen/ARM/neon_vabs.ll
index bf2770b15b01..76b604423986 100644
--- a/test/CodeGen/ARM/neon_vabs.ll
+++ b/test/CodeGen/ARM/neon_vabs.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <4 x i32> @test1(<4 x i32> %a) nounwind {
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK: vabs.s32 q
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -10,7 +10,7 @@ define <4 x i32> @test1(<4 x i32> %a) nounwind {
}
define <4 x i32> @test2(<4 x i32> %a) nounwind {
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK: vabs.s32 q
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sge <4 x i32> %a, zeroinitializer
@@ -19,7 +19,7 @@ define <4 x i32> @test2(<4 x i32> %a) nounwind {
}
define <8 x i16> @test3(<8 x i16> %a) nounwind {
-; CHECK: test3:
+; CHECK-LABEL: test3:
; CHECK: vabs.s16 q
%tmp1neg = sub <8 x i16> zeroinitializer, %a
%b = icmp sgt <8 x i16> %a, zeroinitializer
@@ -28,7 +28,7 @@ define <8 x i16> @test3(<8 x i16> %a) nounwind {
}
define <16 x i8> @test4(<16 x i8> %a) nounwind {
-; CHECK: test4:
+; CHECK-LABEL: test4:
; CHECK: vabs.s8 q
%tmp1neg = sub <16 x i8> zeroinitializer, %a
%b = icmp slt <16 x i8> %a, zeroinitializer
@@ -37,7 +37,7 @@ define <16 x i8> @test4(<16 x i8> %a) nounwind {
}
define <4 x i32> @test5(<4 x i32> %a) nounwind {
-; CHECK: test5:
+; CHECK-LABEL: test5:
; CHECK: vabs.s32 q
%tmp1neg = sub <4 x i32> zeroinitializer, %a
%b = icmp sle <4 x i32> %a, zeroinitializer
@@ -46,7 +46,7 @@ define <4 x i32> @test5(<4 x i32> %a) nounwind {
}
define <2 x i32> @test6(<2 x i32> %a) nounwind {
-; CHECK: test6:
+; CHECK-LABEL: test6:
; CHECK: vabs.s32 d
%tmp1neg = sub <2 x i32> zeroinitializer, %a
%b = icmp sgt <2 x i32> %a, <i32 -1, i32 -1>
@@ -55,7 +55,7 @@ define <2 x i32> @test6(<2 x i32> %a) nounwind {
}
define <2 x i32> @test7(<2 x i32> %a) nounwind {
-; CHECK: test7:
+; CHECK-LABEL: test7:
; CHECK: vabs.s32 d
%tmp1neg = sub <2 x i32> zeroinitializer, %a
%b = icmp sge <2 x i32> %a, zeroinitializer
@@ -64,7 +64,7 @@ define <2 x i32> @test7(<2 x i32> %a) nounwind {
}
define <4 x i16> @test8(<4 x i16> %a) nounwind {
-; CHECK: test8:
+; CHECK-LABEL: test8:
; CHECK: vabs.s16 d
%tmp1neg = sub <4 x i16> zeroinitializer, %a
%b = icmp sgt <4 x i16> %a, zeroinitializer
@@ -73,7 +73,7 @@ define <4 x i16> @test8(<4 x i16> %a) nounwind {
}
define <8 x i8> @test9(<8 x i8> %a) nounwind {
-; CHECK: test9:
+; CHECK-LABEL: test9:
; CHECK: vabs.s8 d
%tmp1neg = sub <8 x i8> zeroinitializer, %a
%b = icmp slt <8 x i8> %a, zeroinitializer
@@ -82,7 +82,7 @@ define <8 x i8> @test9(<8 x i8> %a) nounwind {
}
define <2 x i32> @test10(<2 x i32> %a) nounwind {
-; CHECK: test10:
+; CHECK-LABEL: test10:
; CHECK: vabs.s32 d
%tmp1neg = sub <2 x i32> zeroinitializer, %a
%b = icmp sle <2 x i32> %a, zeroinitializer
diff --git a/test/CodeGen/ARM/no-fpu.ll b/test/CodeGen/ARM/no-fpu.ll
new file mode 100644
index 000000000000..fff4bccb80e9
--- /dev/null
+++ b/test/CodeGen/ARM/no-fpu.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon,-vfp2 | FileCheck --check-prefix=NONEON-NOVFP %s
+; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon | FileCheck --check-prefix=NONEON %s
+; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-vfp2 | FileCheck --check-prefix=NOVFP %s
+; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon,+vfp2 | FileCheck --check-prefix=NONEON-VFP %s
+
+; Check no NEON instructions are selected when feature is disabled.
+define void @neonop(i64* nocapture readonly %a, i64* nocapture %b) #0 {
+ %1 = bitcast i64* %a to <2 x i64>*
+ %wide.load = load <2 x i64>* %1, align 8
+ ; NONEON-NOVFP-NOT: vld1.64
+ ; NONEON-NOT: vld1.64
+ %add = add <2 x i64> %wide.load, %wide.load
+ ; NONEON-NOVFP-NOT: vadd.i64
+ ; NONEON-NOT: vadd.i64
+ %2 = bitcast i64* %b to <2 x i64>*
+ store <2 x i64> %add, <2 x i64>* %2, align 8
+ ; NONEON-NOVFP-NOT: vst1.64
+ ; NONEON-NOT: vst1.64
+ ret void
+}
+
+; Likewise with VFP instructions.
+define double @fpmult(double %a, double %b) {
+ %res = fmul double %a, %b
+ ; NONEON-NOVFP-NOT: vmov
+ ; NONEON-NOVFP-NOT: vmul.f64
+ ; NOVFP-NOT: vmov
+ ; NOVFP-NOT: vmul.f64
+ ; NONEON-VFP: vmov
+ ; NONEON-VFP: vmul.f64
+ ret double %res
+}
+
diff --git a/test/CodeGen/ARM/noreturn.ll b/test/CodeGen/ARM/noreturn.ll
new file mode 100644
index 000000000000..4c876cec9c10
--- /dev/null
+++ b/test/CodeGen/ARM/noreturn.ll
@@ -0,0 +1,50 @@
+; RUN: llc -O3 -o - %s | FileCheck %s
+; Test case from PR16882.
+target triple = "thumbv7s-apple-ios"
+
+define i32 @test1() {
+; CHECK-LABEL: @test1
+; CHECK-NOT: push
+entry:
+ tail call void @overflow() #0
+ unreachable
+}
+
+; Function Attrs: noreturn nounwind
+declare void @overflow() #0
+
+define i32 @test2(i32 %x, i32 %y) {
+; CHECK-LABEL: @test2
+; CHECK-NOT: push
+; CHECK-NOT: pop
+entry:
+ %conv = sext i32 %x to i64
+ %conv1 = sext i32 %y to i64
+ %mul = mul nsw i64 %conv1, %conv
+ %conv2 = trunc i64 %mul to i32
+ %conv3 = sext i32 %conv2 to i64
+ %cmp = icmp eq i64 %mul, %conv3
+ br i1 %cmp, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ tail call void @overflow() #0
+ unreachable
+
+if.end: ; preds = %entry
+ ret i32 %conv2
+}
+
+; Test case for PR17825.
+define i32 @test3() {
+; CHECK-LABEL: @test3
+; CHECK: push
+entry:
+ tail call void @overflow_with_unwind() #1
+ unreachable
+}
+
+; Function Attrs: noreturn
+declare void @overflow_with_unwind() #1
+
+attributes #0 = { noreturn nounwind }
+attributes #1 = { noreturn }
diff --git a/test/CodeGen/ARM/optselect-regclass.ll b/test/CodeGen/ARM/optselect-regclass.ll
new file mode 100644
index 000000000000..1aa452089646
--- /dev/null
+++ b/test/CodeGen/ARM/optselect-regclass.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -march=arm -mcpu=swift -verify-machineinstrs
+%union.opcode.0.2.5.8.15.28 = type { i32 }
+
+@opcode = external global %union.opcode.0.2.5.8.15.28, align 4
+@operands = external hidden global [50 x i8], align 4
+@.str86 = external hidden unnamed_addr constant [13 x i8], align 1
+
+; Function Attrs: nounwind ssp
+define void @xfr() {
+entry:
+ %bf.load4 = load i32* getelementptr inbounds (%union.opcode.0.2.5.8.15.28* @opcode, i32 0, i32 0), align 4
+ %bf.clear10 = and i32 %bf.load4, 65535
+ %and11 = and i32 %bf.load4, 32768
+ %tobool12 = icmp ne i32 %and11, 0
+ %cond13 = select i1 %tobool12, i32 1073676288, i32 0
+ %or = or i32 %cond13, %bf.clear10
+ %shl = shl nuw i32 %or, 2
+ %add = add i32 0, %shl
+ tail call void (i8*, i32, i32, i8*, ...)* @__sprintf_chk(i8* getelementptr inbounds ([50 x i8]* @operands, i32 0, i32 0), i32 0, i32 50, i8* getelementptr inbounds ([13 x i8]* @.str86, i32 0, i32 0), i32 undef, i32 undef, i32 %add)
+ ret void
+}
+
+declare void @__sprintf_chk(i8*, i32, i32, i8*, ...)
diff --git a/test/CodeGen/ARM/pack.ll b/test/CodeGen/ARM/pack.ll
index 90151767b919..fbc115518f88 100644
--- a/test/CodeGen/ARM/pack.ll
+++ b/test/CodeGen/ARM/pack.ll
@@ -78,11 +78,34 @@ define i32 @test7(i32 %X, i32 %Y) {
ret i32 %tmp57
}
+; Arithmetic and logic right shift does not have the same semantics if shifting
+; by more than 16 in this context.
+
; CHECK: test8
-; CHECK: pkhtb r0, r0, r1, asr #22
+; CHECK-NOT: pkhtb r0, r0, r1, asr #22
define i32 @test8(i32 %X, i32 %Y) {
%tmp1 = and i32 %X, -65536
%tmp3 = lshr i32 %Y, 22
%tmp57 = or i32 %tmp3, %tmp1
ret i32 %tmp57
}
+
+; CHECK-LABEL: test9:
+; CHECK: pkhtb r0, r0, r1, asr #16
+define i32 @test9(i32 %src1, i32 %src2) {
+entry:
+ %tmp = and i32 %src1, -65536
+ %tmp2 = lshr i32 %src2, 16
+ %tmp3 = or i32 %tmp, %tmp2
+ ret i32 %tmp3
+}
+
+; CHECK-LABEL: test10:
+; CHECK: pkhtb r0, r0, r1, asr #17
+define i32 @test10(i32 %src1, i32 %src2) {
+entry:
+ %tmp = and i32 %src1, -65536
+ %tmp2 = ashr i32 %src2, 17
+ %tmp3 = or i32 %tmp, %tmp2
+ ret i32 %tmp3
+}
diff --git a/test/CodeGen/ARM/peephole-bitcast.ll b/test/CodeGen/ARM/peephole-bitcast.ll
index e72d51f06d4c..3c6a187d99a0 100644
--- a/test/CodeGen/ARM/peephole-bitcast.ll
+++ b/test/CodeGen/ARM/peephole-bitcast.ll
@@ -10,7 +10,7 @@
define void @t(float %x) nounwind ssp {
entry:
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK-NOT: vmov
; CHECK: bl
%0 = bitcast float %x to i32
diff --git a/test/CodeGen/ARM/pic.ll b/test/CodeGen/ARM/pic.ll
new file mode 100644
index 000000000000..9fc7a63bd687
--- /dev/null
+++ b/test/CodeGen/ARM/pic.ll
@@ -0,0 +1,23 @@
+; Check the function call in PIC relocation model.
+
+; If the relocation model is PIC, then the "bl" instruction for the function
+; call to the external function should come with PLT fixup type.
+
+; RUN: llc < %s -mtriple=armv7-unknown-linux-gnueabi \
+; RUN: -relocation-model=pic -fast-isel -verify-machineinstrs \
+; RUN: | FileCheck %s
+
+define void @test() {
+entry:
+
+ %0 = call i32 @get()
+; CHECK: bl get(PLT)
+
+ call void @put(i32 %0)
+; CHECK: bl put(PLT)
+
+ ret void
+}
+
+declare i32 @get()
+declare void @put(i32)
diff --git a/test/CodeGen/ARM/popcnt.ll b/test/CodeGen/ARM/popcnt.ll
index 0b9c9467c206..bdf793d91b0a 100644
--- a/test/CodeGen/ARM/popcnt.ll
+++ b/test/CodeGen/ARM/popcnt.ll
@@ -2,7 +2,7 @@
; Implement ctpop with vcnt
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
-;CHECK: vcnt8:
+;CHECK-LABEL: vcnt8:
;CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
@@ -10,7 +10,7 @@ define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
}
define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
-;CHECK: vcntQ8:
+;CHECK-LABEL: vcntQ8:
;CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
@@ -18,7 +18,7 @@ define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
}
define <4 x i16> @vcnt16(<4 x i16>* %A) nounwind {
-; CHECK: vcnt16:
+; CHECK-LABEL: vcnt16:
; CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vrev16.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
@@ -30,7 +30,7 @@ define <4 x i16> @vcnt16(<4 x i16>* %A) nounwind {
}
define <8 x i16> @vcntQ16(<8 x i16>* %A) nounwind {
-; CHECK: vcntQ16:
+; CHECK-LABEL: vcntQ16:
; CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vrev16.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
@@ -42,7 +42,7 @@ define <8 x i16> @vcntQ16(<8 x i16>* %A) nounwind {
}
define <2 x i32> @vcnt32(<2 x i32>* %A) nounwind {
-; CHECK: vcnt32:
+; CHECK-LABEL: vcnt32:
; CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vrev16.8 {{d[0-9]+}}, {{d[0-9]+}}
; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
@@ -57,7 +57,7 @@ define <2 x i32> @vcnt32(<2 x i32>* %A) nounwind {
}
define <4 x i32> @vcntQ32(<4 x i32>* %A) nounwind {
-; CHECK: vcntQ32:
+; CHECK-LABEL: vcntQ32:
; CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vrev16.8 {{q[0-9]+}}, {{q[0-9]+}}
; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
@@ -79,7 +79,7 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) nounwind readnone
declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readnone
define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
-;CHECK: vclz8:
+;CHECK-LABEL: vclz8:
;CHECK: vclz.i8 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
@@ -87,7 +87,7 @@ define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
-;CHECK: vclz16:
+;CHECK-LABEL: vclz16:
;CHECK: vclz.i16 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
@@ -95,7 +95,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
-;CHECK: vclz32:
+;CHECK-LABEL: vclz32:
;CHECK: vclz.i32 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
@@ -103,7 +103,7 @@ define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
}
define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
-;CHECK: vclzQ8:
+;CHECK-LABEL: vclzQ8:
;CHECK: vclz.i8 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
@@ -111,7 +111,7 @@ define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
-;CHECK: vclzQ16:
+;CHECK-LABEL: vclzQ16:
;CHECK: vclz.i16 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
@@ -119,7 +119,7 @@ define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
-;CHECK: vclzQ32:
+;CHECK-LABEL: vclzQ32:
;CHECK: vclz.i32 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
@@ -135,7 +135,7 @@ declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone
declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
-;CHECK: vclss8:
+;CHECK-LABEL: vclss8:
;CHECK: vcls.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
@@ -143,7 +143,7 @@ define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
-;CHECK: vclss16:
+;CHECK-LABEL: vclss16:
;CHECK: vcls.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
@@ -151,7 +151,7 @@ define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
-;CHECK: vclss32:
+;CHECK-LABEL: vclss32:
;CHECK: vcls.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
@@ -159,7 +159,7 @@ define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
}
define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vclsQs8:
+;CHECK-LABEL: vclsQs8:
;CHECK: vcls.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
@@ -167,7 +167,7 @@ define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vclsQs16:
+;CHECK-LABEL: vclsQs16:
;CHECK: vcls.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
@@ -175,7 +175,7 @@ define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vclsQs32:
+;CHECK-LABEL: vclsQs32:
;CHECK: vcls.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
diff --git a/test/CodeGen/ARM/prefetch-thumb.ll b/test/CodeGen/ARM/prefetch-thumb.ll
new file mode 100644
index 000000000000..e6f6ae8d18b2
--- /dev/null
+++ b/test/CodeGen/ARM/prefetch-thumb.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=thumb -mattr=+v7 | FileCheck %s -check-prefix=THUMB2
+; TODO: This test case will be merged back into prefetch.ll when ARM mode issue is solved.
+
+declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
+
+define void @t6() {
+entry:
+;ARM: t6:
+;ARM: pld [sp]
+;ARM: pld [sp, #50]
+
+;THUMB2: t6:
+;THUMB2: pld [sp]
+;THUMB2: pld [sp, #50]
+
+%red = alloca [100 x i8], align 1
+%0 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 0
+%1 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 50
+call void @llvm.prefetch(i8* %0, i32 0, i32 3, i32 1)
+call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
+ret void
+}
diff --git a/test/CodeGen/ARM/prefetch.ll b/test/CodeGen/ARM/prefetch.ll
index 9c8ff2b40962..5badb3114814 100644
--- a/test/CodeGen/ARM/prefetch.ll
+++ b/test/CodeGen/ARM/prefetch.ll
@@ -6,15 +6,15 @@
define void @t1(i8* %ptr) nounwind {
entry:
-; ARM: t1:
+; ARM-LABEL: t1:
; ARM-NOT: pldw [r0]
; ARM: pld [r0]
-; ARM-MP: t1:
+; ARM-MP-LABEL: t1:
; ARM-MP: pldw [r0]
; ARM-MP: pld [r0]
-; THUMB2: t1:
+; THUMB2-LABEL: t1:
; THUMB2-NOT: pldw [r0]
; THUMB2: pld [r0]
tail call void @llvm.prefetch( i8* %ptr, i32 1, i32 3, i32 1 )
@@ -24,10 +24,10 @@ entry:
define void @t2(i8* %ptr) nounwind {
entry:
-; ARM: t2:
+; ARM-LABEL: t2:
; ARM: pld [r0, #1023]
-; THUMB2: t2:
+; THUMB2-LABEL: t2:
; THUMB2: pld [r0, #1023]
%tmp = getelementptr i8* %ptr, i32 1023
tail call void @llvm.prefetch( i8* %tmp, i32 0, i32 3, i32 1 )
@@ -36,10 +36,10 @@ entry:
define void @t3(i32 %base, i32 %offset) nounwind {
entry:
-; ARM: t3:
+; ARM-LABEL: t3:
; ARM: pld [r0, r1, lsr #2]
-; THUMB2: t3:
+; THUMB2-LABEL: t3:
; THUMB2: lsrs r1, r1, #2
; THUMB2: pld [r0, r1]
%tmp1 = lshr i32 %offset, 2
@@ -51,10 +51,10 @@ entry:
define void @t4(i32 %base, i32 %offset) nounwind {
entry:
-; ARM: t4:
+; ARM-LABEL: t4:
; ARM: pld [r0, r1, lsl #2]
-; THUMB2: t4:
+; THUMB2-LABEL: t4:
; THUMB2: pld [r0, r1, lsl #2]
%tmp1 = shl i32 %offset, 2
%tmp2 = add i32 %base, %tmp1
@@ -67,10 +67,10 @@ declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
define void @t5(i8* %ptr) nounwind {
entry:
-; ARM: t5:
+; ARM-LABEL: t5:
; ARM: pli [r0]
-; THUMB2: t5:
+; THUMB2-LABEL: t5:
; THUMB2: pli [r0]
tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3, i32 0 )
ret void
diff --git a/test/CodeGen/ARM/private.ll b/test/CodeGen/ARM/private.ll
index 94578d82fddc..e48c292db466 100644
--- a/test/CodeGen/ARM/private.ll
+++ b/test/CodeGen/ARM/private.ll
@@ -2,7 +2,7 @@
;
; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
; CHECK: .Lfoo:
-; CHECK: bar:
+; CHECK-LABEL: bar:
; CHECK: bl .Lfoo
; CHECK: .long .Lbaz
; CHECK: .Lbaz:
diff --git a/test/CodeGen/ARM/readcyclecounter.ll b/test/CodeGen/ARM/readcyclecounter.ll
new file mode 100644
index 000000000000..db47ad355d09
--- /dev/null
+++ b/test/CodeGen/ARM/readcyclecounter.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mtriple=armv7-none-linux-gnueabi < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7-none-linux-gnueabi < %s | FileCheck %s
+; RUN: llc -mtriple=armv7-none-linux-gnueabi -mattr=-perfmon < %s | FileCheck %s --check-prefix=CHECK-NO-PERFMON
+; RUN: llc -mtriple=armv6-none-linux-gnueabi < %s | FileCheck %s --check-prefix=CHECK-NO-PERFMON
+
+; The performance monitor we're looking for is an ARMv7 extension. It should be
+; possible to disable it, but realistically present on at least every v7-A
+; processor (but not on v6, at least by default).
+
+declare i64 @llvm.readcyclecounter()
+
+define i64 @get_count() {
+ %val = call i64 @llvm.readcyclecounter()
+ ret i64 %val
+
+ ; As usual, exact registers only sort of matter but the cycle-count had better
+ ; end up in r0 in the end.
+
+; CHECK: mrc p15, #0, r0, c9, c13, #0
+; CHECK: {{movs?}} r1, #0
+
+; CHECK-NO-PERFMON: {{movs?}} r0, #0
+; CHECK-NO-PERFMON: {{movs?}} r1, #0
+}
diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll
index fd2083cf9f41..25484f484853 100644
--- a/test/CodeGen/ARM/reg_sequence.ll
+++ b/test/CodeGen/ARM/reg_sequence.ll
@@ -11,7 +11,7 @@
define void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind {
entry:
-; CHECK: t1:
+; CHECK-LABEL: t1:
; CHECK: vld1.16
; CHECK-NOT: vmov d
; CHECK: vmovl.s16
@@ -44,7 +44,7 @@ entry:
define void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind {
entry:
-; CHECK: t2:
+; CHECK-LABEL: t2:
; CHECK: vld1.16
; CHECK-NOT: vmov
; CHECK: vmul.i16
@@ -73,7 +73,7 @@ entry:
}
define <8 x i8> @t3(i8* %A, i8* %B) nounwind {
-; CHECK: t3:
+; CHECK-LABEL: t3:
; CHECK: vld3.8
; CHECK: vmul.i8
; CHECK: vmov r
@@ -92,7 +92,7 @@ define <8 x i8> @t3(i8* %A, i8* %B) nounwind {
define void @t4(i32* %in, i32* %out) nounwind {
entry:
-; CHECK: t4:
+; CHECK-LABEL: t4:
; CHECK: vld2.32
; CHECK-NOT: vmov
; CHECK: vld2.32
@@ -135,7 +135,7 @@ return2:
}
define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
-; CHECK: t5:
+; CHECK-LABEL: t5:
; CHECK: vld1.32
; How can FileCheck match Q and D registers? We need a lisp interpreter.
; CHECK: vorr {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
@@ -153,7 +153,7 @@ define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
}
define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
-; CHECK: t6:
+; CHECK-LABEL: t6:
; CHECK: vldr
; CHECK: vorr d[[D0:[0-9]+]], d[[D1:[0-9]+]]
; CHECK-NEXT: vld2.8 {d[[D1]][1], d[[D0]][1]}
@@ -167,7 +167,7 @@ define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
define void @t7(i32* %iptr, i32* %optr) nounwind {
entry:
-; CHECK: t7:
+; CHECK-LABEL: t7:
; CHECK: vld2.32
; CHECK: vst2.32
; CHECK: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}},
@@ -189,7 +189,7 @@ entry:
; PR7156
define arm_aapcs_vfpcc i32 @t8() nounwind {
-; CHECK: t8:
+; CHECK-LABEL: t8:
; CHECK: vrsqrte.f32 q8, q8
bb.nph55.bb.nph55.split_crit_edge:
br label %bb3
@@ -238,11 +238,10 @@ bb14: ; preds = %bb6
; PR7157
define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind {
-; CHECK: t9:
-; CHECK: vldr
-; CHECK-NOT: vmov d{{.*}}, d16
-; CHECK: vmov.i32 d17
+; CHECK-LABEL: t9:
+; CHECK: vmov.i32 d16, #0x0
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
+; CHECK-NEXT: vorr d17, d16, d16
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
%3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2]
%4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
@@ -270,7 +269,7 @@ define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind {
; PR7162
define arm_aapcs_vfpcc i32 @t10() nounwind {
entry:
-; CHECK: t10:
+; CHECK-LABEL: t10:
; CHECK: vmov.i32 q[[Q0:[0-9]+]], #0x3f000000
; CHECK: vmul.f32 q8, q8, d[[DREG:[0-1]+]]
; CHECK: vadd.f32 q8, q8, q8
diff --git a/test/CodeGen/ARM/ret_sret_vector.ll b/test/CodeGen/ARM/ret_sret_vector.ll
index 9bb3519555e8..f9c46262b6a6 100644
--- a/test/CodeGen/ARM/ret_sret_vector.ll
+++ b/test/CodeGen/ARM/ret_sret_vector.ll
@@ -6,7 +6,7 @@ target triple = "thumbv7-apple-ios3.0.0"
define <4 x double> @PR14337(<4 x double> %a, <4 x double> %b) {
%foo = fadd <4 x double> %a, %b
ret <4 x double> %foo
-; CHECK: PR14337:
+; CHECK-LABEL: PR14337:
; CHECK: vst1.64
; CHECK: vst1.64
}
diff --git a/test/CodeGen/ARM/returned-ext.ll b/test/CodeGen/ARM/returned-ext.ll
index 670b12f249d4..d2cdeb096a88 100644
--- a/test/CodeGen/ARM/returned-ext.ll
+++ b/test/CodeGen/ARM/returned-ext.ll
@@ -10,13 +10,13 @@ declare zeroext i16 @bothzext16(i16 zeroext returned %x)
; The zeroext param attribute below is meant to have no effect
define i16 @test_identity(i16 zeroext %x) {
entry:
-; CHECKELF: test_identity:
+; CHECKELF-LABEL: test_identity:
; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
; CHECKELF: bl identity16
; CHECKELF: uxth r0, r0
; CHECKELF: bl identity32
; CHECKELF: mov r0, [[SAVEX]]
-; CHECKT2D: test_identity:
+; CHECKT2D-LABEL: test_identity:
; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
; CHECKT2D: blx _identity16
; CHECKT2D: uxth r0, r0
@@ -32,7 +32,7 @@ entry:
; x is not considered equal to %call (see SelectionDAGBuilder.cpp)
define i16 @test_matched_ret(i16 %x) {
entry:
-; CHECKELF: test_matched_ret:
+; CHECKELF-LABEL: test_matched_ret:
; This shouldn't be required
; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
@@ -44,7 +44,7 @@ entry:
; This shouldn't be required
; CHECKELF: mov r0, [[SAVEX]]
-; CHECKT2D: test_matched_ret:
+; CHECKT2D-LABEL: test_matched_ret:
; This shouldn't be required
; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
@@ -64,13 +64,13 @@ entry:
define i16 @test_mismatched_ret(i16 %x) {
entry:
-; CHECKELF: test_mismatched_ret:
+; CHECKELF-LABEL: test_mismatched_ret:
; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
; CHECKELF: bl retzext16
; CHECKELF: sxth r0, {{r[0-9]+}}
; CHECKELF: bl identity32
; CHECKELF: mov r0, [[SAVEX]]
-; CHECKT2D: test_mismatched_ret:
+; CHECKT2D-LABEL: test_mismatched_ret:
; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
; CHECKT2D: blx _retzext16
; CHECKT2D: sxth r0, {{r[0-9]+}}
@@ -84,13 +84,13 @@ entry:
define i16 @test_matched_paramext(i16 %x) {
entry:
-; CHECKELF: test_matched_paramext:
+; CHECKELF-LABEL: test_matched_paramext:
; CHECKELF: uxth r0, r0
; CHECKELF: bl paramzext16
; CHECKELF: uxth r0, r0
; CHECKELF: bl identity32
; CHECKELF: b paramzext16
-; CHECKT2D: test_matched_paramext:
+; CHECKT2D-LABEL: test_matched_paramext:
; CHECKT2D: uxth r0, r0
; CHECKT2D: blx _paramzext16
; CHECKT2D: uxth r0, r0
@@ -113,11 +113,11 @@ entry:
; optimization, don't bother checking: just verify that the calls are made
; in the correct order as a basic sanity check
-; CHECKELF: test_matched_paramext2:
+; CHECKELF-LABEL: test_matched_paramext2:
; CHECKELF: bl paramzext16
; CHECKELF: bl identity32
; CHECKELF: b paramzext16
-; CHECKT2D: test_matched_paramext2:
+; CHECKT2D-LABEL: test_matched_paramext2:
; CHECKT2D: blx _paramzext16
; CHECKT2D: blx _identity32
; CHECKT2D: b.w _paramzext16
@@ -133,7 +133,7 @@ entry:
define i16 @test_matched_bothext(i16 %x) {
entry:
-; CHECKELF: test_matched_bothext:
+; CHECKELF-LABEL: test_matched_bothext:
; CHECKELF: uxth r0, r0
; CHECKELF: bl bothzext16
; CHECKELF-NOT: uxth r0, r0
@@ -141,7 +141,7 @@ entry:
; FIXME: Tail call should be OK here
; CHECKELF: bl identity32
-; CHECKT2D: test_matched_bothext:
+; CHECKT2D-LABEL: test_matched_bothext:
; CHECKT2D: uxth r0, r0
; CHECKT2D: blx _bothzext16
; CHECKT2D-NOT: uxth r0, r0
@@ -157,14 +157,14 @@ entry:
define i16 @test_mismatched_bothext(i16 %x) {
entry:
-; CHECKELF: test_mismatched_bothext:
+; CHECKELF-LABEL: test_mismatched_bothext:
; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
; CHECKELF: uxth r0, {{r[0-9]+}}
; CHECKELF: bl bothzext16
; CHECKELF: sxth r0, [[SAVEX]]
; CHECKELF: bl identity32
; CHECKELF: mov r0, [[SAVEX]]
-; CHECKT2D: test_mismatched_bothext:
+; CHECKT2D-LABEL: test_mismatched_bothext:
; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
; CHECKT2D: uxth r0, {{r[0-9]+}}
; CHECKT2D: blx _bothzext16
diff --git a/test/CodeGen/ARM/returned-trunc-tail-calls.ll b/test/CodeGen/ARM/returned-trunc-tail-calls.ll
new file mode 100644
index 000000000000..59467271a7a7
--- /dev/null
+++ b/test/CodeGen/ARM/returned-trunc-tail-calls.ll
@@ -0,0 +1,111 @@
+; RUN: llc < %s -mtriple=armv7 -arm-tail-calls | FileCheck %s
+
+declare i16 @ret16(i16 returned)
+declare i32 @ret32(i32 returned)
+
+define i32 @test1(i32 %val) {
+; CHECK-LABEL: test1:
+; CHECK: bl {{_?}}ret16
+ %in = trunc i32 %val to i16
+ tail call i16 @ret16(i16 returned %in)
+ ret i32 %val
+}
+
+define i16 @test2(i32 %val) {
+; CHECK-LABEL: test2:
+; CHECK: b {{_?}}ret16
+ %in = trunc i32 %val to i16
+ tail call i16 @ret16(i16 returned %in)
+ ret i16 %in
+}
+
+declare {i32, i8} @take_i32_i8({i32, i8} returned)
+define { i8, i8 } @test_nocommon_value({i32, i32} %in) {
+; CHECK-LABEL: test_nocommon_value:
+; CHECK: b {{_?}}take_i32_i8
+
+ %first = extractvalue {i32, i32} %in, 0
+ %first.trunc = trunc i32 %first to i8
+
+ %second = extractvalue {i32, i32} %in, 1
+ %second.trunc = trunc i32 %second to i8
+
+ %tmp = insertvalue {i32, i8} undef, i32 %first, 0
+ %callval = insertvalue {i32, i8} %tmp, i8 %second.trunc, 1
+ tail call {i32, i8} @take_i32_i8({i32, i8} returned %callval)
+
+ %restmp = insertvalue {i8, i8} undef, i8 %first.trunc, 0
+ %res = insertvalue {i8, i8} %restmp, i8 %second.trunc, 1
+ ret {i8, i8} %res
+}
+
+declare {i32, {i32, i32}} @give_i32_i32_i32()
+define {{i32, i32}, i32} @test_structs_different_shape() {
+; CHECK-LABEL: test_structs_different_shape:
+; CHECK: b {{_?}}give_i32_i32_i32
+ %val = tail call {i32, {i32, i32}} @give_i32_i32_i32()
+
+ %first = extractvalue {i32, {i32, i32}} %val, 0
+ %second = extractvalue {i32, {i32, i32}} %val, 1, 0
+ %third = extractvalue {i32, {i32, i32}} %val, 1, 1
+
+ %restmp = insertvalue {{i32, i32}, i32} undef, i32 %first, 0, 0
+ %reseventmper = insertvalue {{i32, i32}, i32} %restmp, i32 %second, 0, 1
+ %res = insertvalue {{i32, i32}, i32} %reseventmper, i32 %third, 1
+
+ ret {{i32, i32}, i32} %res
+}
+
+define i32 @test_undef_asymmetry() {
+; CHECK: test_undef_asymmetry:
+; CHECK: bl {{_?}}ret32
+; CHECK-NOT: jmp
+ tail call i32 @ret32(i32 returned undef)
+ ret i32 2
+}
+
+define {{}, {{}, i32, {}}, [1 x i32]} @evil_empty_aggregates() {
+; CHECK-LABEL: evil_empty_aggregates:
+; CHECK: b {{_?}}give_i32_i32_i32
+ %agg = tail call {i32, {i32, i32}} @give_i32_i32_i32()
+
+ %first = extractvalue {i32, {i32, i32}} %agg, 0
+ %second = extractvalue {i32, {i32, i32}} %agg, 1, 0
+
+ %restmp = insertvalue {{}, {{}, i32, {}}, [1 x i32]} undef, i32 %first, 1, 1
+ %res = insertvalue {{}, {{}, i32, {}}, [1 x i32]} %restmp, i32 %second, 2, 0
+ ret {{}, {{}, i32, {}}, [1 x i32]} %res
+}
+
+define i32 @structure_is_unimportant() {
+; CHECK-LABEL: structure_is_unimportant:
+; CHECK: b {{_?}}give_i32_i32_i32
+ %val = tail call {i32, {i32, i32}} @give_i32_i32_i32()
+
+ %res = extractvalue {i32, {i32, i32}} %val, 0
+ ret i32 %res
+}
+
+declare i64 @give_i64()
+define i64 @direct_i64_ok() {
+; CHECK-LABEL: direct_i64_ok:
+; CHECK: b {{_?}}give_i64
+ %val = tail call i64 @give_i64()
+ ret i64 %val
+}
+
+declare {i64, i32} @give_i64_i32()
+define {i32, i32} @trunc_i64_not_ok() {
+; CHECK-LABEL: trunc_i64_not_ok:
+; CHECK: bl {{_?}}give_i64_i32
+ %agg = tail call {i64, i32} @give_i64_i32()
+
+ %first = extractvalue {i64, i32} %agg, 0
+ %second = extractvalue {i64, i32} %agg, 1
+ %first.trunc = trunc i64 %first to i32
+
+ %tmp = insertvalue {i32, i32} undef, i32 %first.trunc, 0
+ %ret = insertvalue {i32, i32} %tmp, i32 %second, 1
+
+ ret {i32, i32} %ret
+}
diff --git a/test/CodeGen/ARM/rev.ll b/test/CodeGen/ARM/rev.ll
index 6bb67431198a..6c380aee3d93 100644
--- a/test/CodeGen/ARM/rev.ll
+++ b/test/CodeGen/ARM/rev.ll
@@ -32,7 +32,7 @@ define i32 @test2(i32 %X) nounwind {
; rdar://9147637
define i32 @test3(i16 zeroext %a) nounwind {
entry:
-; CHECK: test3:
+; CHECK-LABEL: test3:
; CHECK: revsh r0, r0
%0 = tail call i16 @llvm.bswap.i16(i16 %a)
%1 = sext i16 %0 to i32
@@ -43,7 +43,7 @@ declare i16 @llvm.bswap.i16(i16) nounwind readnone
define i32 @test4(i16 zeroext %a) nounwind {
entry:
-; CHECK: test4:
+; CHECK-LABEL: test4:
; CHECK: revsh r0, r0
%conv = zext i16 %a to i32
%shr9 = lshr i16 %a, 8
diff --git a/test/CodeGen/ARM/sbfx.ll b/test/CodeGen/ARM/sbfx.ll
index d29693e4cf92..36fbd1939c55 100644
--- a/test/CodeGen/ARM/sbfx.ll
+++ b/test/CodeGen/ARM/sbfx.ll
@@ -2,7 +2,7 @@
define i32 @f1(i32 %a) {
entry:
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: sbfx r0, r0, #0, #20
%tmp = shl i32 %a, 12
%tmp2 = ashr i32 %tmp, 12
@@ -11,7 +11,7 @@ entry:
define i32 @f2(i32 %a) {
entry:
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: bfc r0, #20, #12
%tmp = shl i32 %a, 12
%tmp2 = lshr i32 %tmp, 12
@@ -20,7 +20,7 @@ entry:
define i32 @f3(i32 %a) {
entry:
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: sbfx r0, r0, #5, #3
%tmp = shl i32 %a, 24
%tmp2 = ashr i32 %tmp, 29
@@ -29,7 +29,7 @@ entry:
define i32 @f4(i32 %a) {
entry:
-; CHECK: f4:
+; CHECK-LABEL: f4:
; CHECK: ubfx r0, r0, #5, #3
%tmp = shl i32 %a, 24
%tmp2 = lshr i32 %tmp, 29
@@ -38,7 +38,7 @@ entry:
define i32 @f5(i32 %a) {
entry:
-; CHECK: f5:
+; CHECK-LABEL: f5:
; CHECK-NOT: sbfx
; CHECK: bx
%tmp = shl i32 %a, 3
diff --git a/test/CodeGen/ARM/section-name.ll b/test/CodeGen/ARM/section-name.ll
new file mode 100644
index 000000000000..a0aad4733bc8
--- /dev/null
+++ b/test/CodeGen/ARM/section-name.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -mtriple=arm-unknown-linux-gnueabi | FileCheck %s
+
+; CHECK: .text
+; CHECK: .globl test1
+; CHECK: .type test1,%function
+define void @test1() {
+entry:
+ ret void
+}
+
+; CHECK: .section .test2,"ax",%progbits
+; CHECK: .globl test2
+; CHECK: .type test2,%function
+define void @test2() section ".test2" {
+entry:
+ ret void
+}
+
+; CHECK: .section .text.test3,"axG",%progbits,test3,comdat
+; CHECK: .weak test3
+; CHECK: .type test3,%function
+define linkonce_odr void @test3() {
+entry:
+ ret void
+}
diff --git a/test/CodeGen/ARM/select-imm.ll b/test/CodeGen/ARM/select-imm.ll
index c9ac66acbfd8..6f4bfb81d51b 100644
--- a/test/CodeGen/ARM/select-imm.ll
+++ b/test/CodeGen/ARM/select-imm.ll
@@ -4,18 +4,18 @@
define i32 @t1(i32 %c) nounwind readnone {
entry:
-; ARM: t1:
+; ARM-LABEL: t1:
; ARM: mov [[R1:r[0-9]+]], #101
; ARM: orr [[R1b:r[0-9]+]], [[R1]], #256
-; ARM: movgt r0, #123
+; ARM: movgt {{r[0-1]}}, #123
-; ARMT2: t1:
-; ARMT2: movw r0, #357
-; ARMT2: movgt r0, #123
+; ARMT2-LABEL: t1:
+; ARMT2: movw [[R:r[0-1]]], #357
+; ARMT2: movwgt [[R]], #123
-; THUMB2: t1:
-; THUMB2: movw r0, #357
-; THUMB2: movgt r0, #123
+; THUMB2-LABEL: t1:
+; THUMB2: movw [[R:r[0-1]]], #357
+; THUMB2: movgt [[R]], #123
%0 = icmp sgt i32 %c, 1
%1 = select i1 %0, i32 123, i32 357
@@ -24,18 +24,18 @@ entry:
define i32 @t2(i32 %c) nounwind readnone {
entry:
-; ARM: t2:
-; ARM: mov r0, #123
-; ARM: movgt r0, #101
-; ARM: orrgt r0, r0, #256
+; ARM-LABEL: t2:
+; ARM: mov [[R:r[0-9]+]], #101
+; ARM: orr [[R]], [[R]], #256
+; ARM: movle [[R]], #123
-; ARMT2: t2:
-; ARMT2: mov r0, #123
-; ARMT2: movwgt r0, #357
+; ARMT2-LABEL: t2:
+; ARMT2: mov [[R:r[0-1]]], #123
+; ARMT2: movwgt [[R]], #357
-; THUMB2: t2:
-; THUMB2: mov{{(s|\.w)}} r0, #123
-; THUMB2: movwgt r0, #357
+; THUMB2-LABEL: t2:
+; THUMB2: mov{{(s|\.w)}} [[R:r[0-1]]], #123
+; THUMB2: movwgt [[R]], #357
%0 = icmp sgt i32 %c, 1
%1 = select i1 %0, i32 357, i32 123
@@ -44,17 +44,17 @@ entry:
define i32 @t3(i32 %a) nounwind readnone {
entry:
-; ARM: t3:
-; ARM: mov r0, #0
-; ARM: moveq r0, #1
+; ARM-LABEL: t3:
+; ARM: mov [[R:r[0-1]]], #0
+; ARM: moveq [[R]], #1
-; ARMT2: t3:
-; ARMT2: mov r0, #0
-; ARMT2: moveq r0, #1
+; ARMT2-LABEL: t3:
+; ARMT2: mov [[R:r[0-1]]], #0
+; ARMT2: movweq [[R]], #1
-; THUMB2: t3:
-; THUMB2: mov{{(s|\.w)}} r0, #0
-; THUMB2: moveq r0, #1
+; THUMB2-LABEL: t3:
+; THUMB2: mov{{(s|\.w)}} [[R:r[0-1]]], #0
+; THUMB2: moveq [[R]], #1
%0 = icmp eq i32 %a, 160
%1 = zext i1 %0 to i32
ret i32 %1
@@ -62,15 +62,15 @@ entry:
define i32 @t4(i32 %a, i32 %b, i32 %x) nounwind {
entry:
-; ARM: t4:
+; ARM-LABEL: t4:
; ARM: ldr
; ARM: mov{{lt|ge}}
-; ARMT2: t4:
+; ARMT2-LABEL: t4:
; ARMT2: movwlt [[R0:r[0-9]+]], #65365
; ARMT2: movtlt [[R0]], #65365
-; THUMB2: t4:
+; THUMB2-LABEL: t4:
; THUMB2: mvnlt [[R0:r[0-9]+]], #11141290
%0 = icmp slt i32 %a, %b
%1 = select i1 %0, i32 4283826005, i32 %x
@@ -80,13 +80,13 @@ entry:
; rdar://9758317
define i32 @t5(i32 %a) nounwind {
entry:
-; ARM: t5:
+; ARM-LABEL: t5:
; ARM-NOT: mov
; ARM: cmp r0, #1
; ARM-NOT: mov
; ARM: movne r0, #0
-; THUMB2: t5:
+; THUMB2-LABEL: t5:
; THUMB2-NOT: mov
; THUMB2: cmp r0, #1
; THUMB2: it ne
@@ -98,12 +98,12 @@ entry:
define i32 @t6(i32 %a) nounwind {
entry:
-; ARM: t6:
+; ARM-LABEL: t6:
; ARM-NOT: mov
; ARM: cmp r0, #0
; ARM: movne r0, #1
-; THUMB2: t6:
+; THUMB2-LABEL: t6:
; THUMB2-NOT: mov
; THUMB2: cmp r0, #0
; THUMB2: it ne
diff --git a/test/CodeGen/ARM/select-undef.ll b/test/CodeGen/ARM/select-undef.ll
new file mode 100644
index 000000000000..23f7eb8b352f
--- /dev/null
+++ b/test/CodeGen/ARM/select-undef.ll
@@ -0,0 +1,7 @@
+; RUN: llc < %s -march=arm -mcpu=swift -verify-machineinstrs
+define i32 @func(i32 %arg0, i32 %arg1) {
+entry:
+ %cmp = icmp slt i32 %arg0, 10
+ %v = select i1 %cmp, i32 undef, i32 %arg1
+ ret i32 %v
+}
diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll
index 62708ed53d05..ed006d643f87 100644
--- a/test/CodeGen/ARM/select.ll
+++ b/test/CodeGen/ARM/select.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -mattr=+neon,+thumb2 -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=CHECK-NEON
define i32 @f1(i32 %a.s) {
-;CHECK: f1:
+;CHECK-LABEL: f1:
;CHECK: moveq
entry:
%tmp = icmp eq i32 %a.s, 4
@@ -12,7 +12,7 @@ entry:
}
define i32 @f2(i32 %a.s) {
-;CHECK: f2:
+;CHECK-LABEL: f2:
;CHECK: movgt
entry:
%tmp = icmp sgt i32 %a.s, 4
@@ -21,7 +21,7 @@ entry:
}
define i32 @f3(i32 %a.s, i32 %b.s) {
-;CHECK: f3:
+;CHECK-LABEL: f3:
;CHECK: movlt
entry:
%tmp = icmp slt i32 %a.s, %b.s
@@ -30,7 +30,7 @@ entry:
}
define i32 @f4(i32 %a.s, i32 %b.s) {
-;CHECK: f4:
+;CHECK-LABEL: f4:
;CHECK: movle
entry:
%tmp = icmp sle i32 %a.s, %b.s
@@ -39,7 +39,7 @@ entry:
}
define i32 @f5(i32 %a.u, i32 %b.u) {
-;CHECK: f5:
+;CHECK-LABEL: f5:
;CHECK: movls
entry:
%tmp = icmp ule i32 %a.u, %b.u
@@ -48,7 +48,7 @@ entry:
}
define i32 @f6(i32 %a.u, i32 %b.u) {
-;CHECK: f6:
+;CHECK-LABEL: f6:
;CHECK: movhi
entry:
%tmp = icmp ugt i32 %a.u, %b.u
@@ -57,10 +57,10 @@ entry:
}
define double @f7(double %a, double %b) {
-;CHECK: f7:
+;CHECK-LABEL: f7:
;CHECK: movlt
-;CHECK: movlt
-;CHECK-VFP: f7:
+;CHECK: movge
+;CHECK-VFP-LABEL: f7:
;CHECK-VFP: vmovmi
%tmp = fcmp olt double %a, 1.234e+00
%tmp1 = select i1 %tmp, double -1.000e+00, double %b
@@ -75,7 +75,7 @@ define double @f7(double %a, double %b) {
; into the constant pool based on the value of the "icmp". If we have one "it"
; block generated, odds are good that we have close to the ideal code for this:
;
-; CHECK-NEON: _f8:
+; CHECK-NEON-LABEL: f8:
; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
; CHECK-NEON-NEXT: cmp r0, [[R3]]
@@ -94,7 +94,7 @@ define arm_apcscc float @f8(i32 %a) nounwind {
; Glue values can only have a single use, but the following test exposed a
; case where a SELECT was lowered with 2 uses of a comparison, causing the
; scheduler to assert.
-; CHECK-VFP: f9:
+; CHECK-VFP-LABEL: f9:
declare i8* @objc_msgSend(i8*, i8*, ...)
define void @f9() optsize {
@@ -113,7 +113,7 @@ entry:
ret void
}
-; CHECK: f10
+; CHECK-LABEL: f10:
define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
; CHECK-NOT: floatsisf
%1 = icmp eq i32 %a, %b
@@ -122,7 +122,7 @@ define float @f10(i32 %a, i32 %b) nounwind uwtable readnone ssp {
ret float %3
}
-; CHECK: f11
+; CHECK-LABEL: f11:
define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
; CHECK-NOT: floatsisf
%1 = icmp eq i32 %a, %b
@@ -130,7 +130,7 @@ define float @f11(i32 %a, i32 %b) nounwind uwtable readnone ssp {
ret float %2
}
-; CHECK: f12
+; CHECK-LABEL: f12:
define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp {
; CHECK-NOT: floatunsisf
%1 = icmp eq i32 %a, %b
diff --git a/test/CodeGen/ARM/select_xform.ll b/test/CodeGen/ARM/select_xform.ll
index 750780891261..e13504a42a16 100644
--- a/test/CodeGen/ARM/select_xform.ll
+++ b/test/CodeGen/ARM/select_xform.ll
@@ -3,11 +3,11 @@
; rdar://8662825
define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
-; ARM: t1:
+; ARM-LABEL: t1:
; ARM: suble r1, r1, #-2147483647
; ARM: mov r0, r1
-; T2: t1:
+; T2-LABEL: t1:
; T2: mvn r0, #-2147483648
; T2: addle r1, r0
; T2: mov r0, r1
@@ -18,11 +18,11 @@ define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
}
define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
-; ARM: t2:
+; ARM-LABEL: t2:
; ARM: suble r1, r1, #10
; ARM: mov r0, r1
-; T2: t2:
+; T2-LABEL: t2:
; T2: suble r1, #10
; T2: mov r0, r1
%tmp1 = icmp sgt i32 %c, 10
@@ -32,11 +32,11 @@ define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
}
define i32 @t3(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
-; ARM: t3:
+; ARM-LABEL: t3:
; ARM: andge r3, r3, r2
; ARM: mov r0, r3
-; T2: t3:
+; T2-LABEL: t3:
; T2: andge r3, r2
; T2: mov r0, r3
%cond = icmp slt i32 %a, %b
@@ -46,11 +46,11 @@ define i32 @t3(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
}
define i32 @t4(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
-; ARM: t4:
+; ARM-LABEL: t4:
; ARM: orrge r3, r3, r2
; ARM: mov r0, r3
-; T2: t4:
+; T2-LABEL: t4:
; T2: orrge r3, r2
; T2: mov r0, r3
%cond = icmp slt i32 %a, %b
@@ -61,11 +61,11 @@ define i32 @t4(i32 %a, i32 %b, i32 %x, i32 %y) nounwind {
define i32 @t5(i32 %a, i32 %b, i32 %c) nounwind {
entry:
-; ARM: t5:
+; ARM-LABEL: t5:
; ARM-NOT: moveq
; ARM: orreq r2, r2, #1
-; T2: t5:
+; T2-LABEL: t5:
; T2-NOT: moveq
; T2: orreq r2, r2, #1
%tmp1 = icmp eq i32 %a, %b
@@ -75,11 +75,11 @@ entry:
}
define i32 @t6(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
-; ARM: t6:
+; ARM-LABEL: t6:
; ARM-NOT: movge
; ARM: eorlt r3, r3, r2
-; T2: t6:
+; T2-LABEL: t6:
; T2-NOT: movge
; T2: eorlt r3, r2
%cond = icmp slt i32 %a, %b
@@ -90,11 +90,11 @@ define i32 @t6(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
define i32 @t7(i32 %a, i32 %b, i32 %c) nounwind {
entry:
-; ARM: t7:
+; ARM-LABEL: t7:
; ARM-NOT: lsleq
; ARM: andeq r2, r2, r2, lsl #1
-; T2: t7:
+; T2-LABEL: t7:
; T2-NOT: lsleq.w
; T2: andeq.w r2, r2, r2, lsl #1
%tmp1 = shl i32 %c, 1
@@ -106,11 +106,11 @@ entry:
; Fold ORRri into movcc.
define i32 @t8(i32 %a, i32 %b) nounwind {
-; ARM: t8:
+; ARM-LABEL: t8:
; ARM: cmp r0, r1
; ARM: orrge r0, r1, #1
-; T2: t8:
+; T2-LABEL: t8:
; T2: cmp r0, r1
; T2: orrge r0, r1, #1
%x = or i32 %b, 1
@@ -121,11 +121,11 @@ define i32 @t8(i32 %a, i32 %b) nounwind {
; Fold ANDrr into movcc.
define i32 @t9(i32 %a, i32 %b, i32 %c) nounwind {
-; ARM: t9:
+; ARM-LABEL: t9:
; ARM: cmp r0, r1
; ARM: andge r0, r1, r2
-; T2: t9:
+; T2-LABEL: t9:
; T2: cmp r0, r1
; T2: andge.w r0, r1, r2
%x = and i32 %b, %c
@@ -136,11 +136,11 @@ define i32 @t9(i32 %a, i32 %b, i32 %c) nounwind {
; Fold EORrs into movcc.
define i32 @t10(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
-; ARM: t10:
+; ARM-LABEL: t10:
; ARM: cmp r0, r1
; ARM: eorge r0, r1, r2, lsl #7
-; T2: t10:
+; T2-LABEL: t10:
; T2: cmp r0, r1
; T2: eorge.w r0, r1, r2, lsl #7
%s = shl i32 %c, 7
@@ -152,11 +152,11 @@ define i32 @t10(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; Fold ORRri into movcc, reversing the condition.
define i32 @t11(i32 %a, i32 %b) nounwind {
-; ARM: t11:
+; ARM-LABEL: t11:
; ARM: cmp r0, r1
; ARM: orrlt r0, r1, #1
-; T2: t11:
+; T2-LABEL: t11:
; T2: cmp r0, r1
; T2: orrlt r0, r1, #1
%x = or i32 %b, 1
@@ -167,11 +167,11 @@ define i32 @t11(i32 %a, i32 %b) nounwind {
; Fold ADDri12 into movcc
define i32 @t12(i32 %a, i32 %b) nounwind {
-; ARM: t12:
+; ARM-LABEL: t12:
; ARM: cmp r0, r1
; ARM: addge r0, r1,
-; T2: t12:
+; T2-LABEL: t12:
; T2: cmp r0, r1
; T2: addwge r0, r1, #3000
%x = add i32 %b, 3000
diff --git a/test/CodeGen/ARM/setcc-sentinals.ll b/test/CodeGen/ARM/setcc-sentinals.ll
new file mode 100644
index 000000000000..8878f9bf22df
--- /dev/null
+++ b/test/CodeGen/ARM/setcc-sentinals.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mcpu=cortex-a8 -march=arm -asm-verbose=false | FileCheck %s
+
+define zeroext i1 @test0(i32 %x) nounwind {
+; CHECK-LABEL: test0:
+; CHECK-NEXT: add [[REG:(r[0-9]+)|(lr)]], r0, #1
+; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: cmp [[REG]], #1
+; CHECK-NEXT: movwhi r0, #1
+; CHECK-NEXT: bx lr
+ %cmp1 = icmp ne i32 %x, -1
+ %not.cmp = icmp ne i32 %x, 0
+ %.cmp1 = and i1 %cmp1, %not.cmp
+ ret i1 %.cmp1
+}
diff --git a/test/CodeGen/ARM/shifter_operand.ll b/test/CodeGen/ARM/shifter_operand.ll
index eb971ff72e74..f14adcae663c 100644
--- a/test/CodeGen/ARM/shifter_operand.ll
+++ b/test/CodeGen/ARM/shifter_operand.ll
@@ -4,10 +4,10 @@
define i32 @test1(i32 %X, i32 %Y, i8 %sh) {
-; A8: test1:
+; A8-LABEL: test1:
; A8: add r0, r0, r1, lsl r2
-; A9: test1:
+; A9-LABEL: test1:
; A9: add r0, r0, r1, lsl r2
%shift.upgrd.1 = zext i8 %sh to i32
%A = shl i32 %Y, %shift.upgrd.1
@@ -16,10 +16,10 @@ define i32 @test1(i32 %X, i32 %Y, i8 %sh) {
}
define i32 @test2(i32 %X, i32 %Y, i8 %sh) {
-; A8: test2:
+; A8-LABEL: test2:
; A8: bic r0, r0, r1, asr r2
-; A9: test2:
+; A9-LABEL: test2:
; A9: bic r0, r0, r1, asr r2
%shift.upgrd.2 = zext i8 %sh to i32
%A = ashr i32 %Y, %shift.upgrd.2
@@ -30,12 +30,12 @@ define i32 @test2(i32 %X, i32 %Y, i8 %sh) {
define i32 @test3(i32 %base, i32 %base2, i32 %offset) {
entry:
-; A8: test3:
+; A8-LABEL: test3:
; A8: ldr r0, [r0, r2, lsl #2]
; A8: ldr r1, [r1, r2, lsl #2]
; lsl #2 is free
-; A9: test3:
+; A9-LABEL: test3:
; A9: ldr r0, [r0, r2, lsl #2]
; A9: ldr r1, [r1, r2, lsl #2]
%tmp1 = shl i32 %offset, 2
@@ -53,13 +53,13 @@ declare i8* @malloc(...)
define fastcc void @test4(i16 %addr) nounwind {
entry:
-; A8: test4:
+; A8-LABEL: test4:
; A8: ldr [[REG:r[0-9]+]], [r0, r1, lsl #2]
; A8-NOT: ldr [[REG:r[0-9]+]], [r0, r1, lsl #2]!
; A8: str [[REG]], [r0, r1, lsl #2]
; A8-NOT: str [[REG]], [r0]
-; A9: test4:
+; A9-LABEL: test4:
; A9: ldr [[REG:r[0-9]+]], [r0, r1, lsl #2]
; A9-NOT: ldr [[REG:r[0-9]+]], [r0, r1, lsl #2]!
; A9: str [[REG]], [r0, r1, lsl #2]
diff --git a/test/CodeGen/ARM/sincos.ll b/test/CodeGen/ARM/sincos.ll
new file mode 100644
index 000000000000..30b2664e3726
--- /dev/null
+++ b/test/CodeGen/ARM/sincos.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -mtriple=armv7-apple-ios6 -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT
+; RUN: llc < %s -mtriple=armv7-apple-ios7 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS
+
+; Combine sin / cos into a single call.
+; rdar://12856873
+
+define float @test1(float %x) nounwind {
+entry:
+; SINCOS-LABEL: test1:
+; SINCOS: bl ___sincosf_stret
+
+; NOOPT-LABEL: test1:
+; NOOPT: bl _sinf
+; NOOPT: bl _cosf
+ %call = tail call float @sinf(float %x) nounwind readnone
+ %call1 = tail call float @cosf(float %x) nounwind readnone
+ %add = fadd float %call, %call1
+ ret float %add
+}
+
+define double @test2(double %x) nounwind {
+entry:
+; SINCOS-LABEL: test2:
+; SINCOS: bl ___sincos_stret
+
+; NOOPT-LABEL: test2:
+; NOOPT: bl _sin
+; NOOPT: bl _cos
+ %call = tail call double @sin(double %x) nounwind readnone
+ %call1 = tail call double @cos(double %x) nounwind readnone
+ %add = fadd double %call, %call1
+ ret double %add
+}
+
+declare float @sinf(float) readonly
+declare double @sin(double) readonly
+declare float @cosf(float) readonly
+declare double @cos(double) readonly
diff --git a/test/CodeGen/ARM/spill-q.ll b/test/CodeGen/ARM/spill-q.ll
index e93cdbc10a46..b9246635e408 100644
--- a/test/CodeGen/ARM/spill-q.ll
+++ b/test/CodeGen/ARM/spill-q.ll
@@ -10,7 +10,7 @@
declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
define void @aaa(%quuz* %this, i8* %block) {
-; CHECK: aaa:
+; CHECK-LABEL: aaa:
; CHECK: bic {{.*}}, #15
; CHECK: vst1.64 {{.*}}sp:128
; CHECK: vld1.64 {{.*}}sp:128
diff --git a/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll b/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll
new file mode 100644
index 000000000000..f5cda14861af
--- /dev/null
+++ b/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll
@@ -0,0 +1,32 @@
+; RUN: llc -O3 -mcpu=swift -mtriple=armv7s-apple-ios6.0.0 %s -o /dev/null
+; rdar://14811848
+
+; Make sure that we do not emit the BMOVPCB_CALL instruction for now or if we
+; fix the assumptions in its implementation that we do not crash when doing it.
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
+target triple = "armv7s-apple-ios6.0.0"
+
+@main.title = private unnamed_addr constant [15 x i8] c"foo and stuff\0A\00", align 1
+@.str = private unnamed_addr constant [3 x i8] c"%s\00", align 1
+
+; Function Attrs: nounwind optsize ssp
+define i32 @main() #0 {
+entry:
+ %title = alloca [15 x i8], align 1
+ %0 = getelementptr inbounds [15 x i8]* %title, i32 0, i32 0
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* getelementptr inbounds ([15 x i8]* @main.title, i32 0, i32 0), i32 15, i32 1, i1 false)
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8* %0) #3
+ ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1) #1
+
+; Function Attrs: nounwind optsize
+declare i32 @printf(i8* nocapture readonly, ...) #2
+
+attributes #0 = { nounwind optsize ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind optsize }
diff --git a/test/CodeGen/ARM/str_post.ll b/test/CodeGen/ARM/str_post.ll
index 97916f169b0f..32e3b856c03c 100644
--- a/test/CodeGen/ARM/str_post.ll
+++ b/test/CodeGen/ARM/str_post.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
define i16 @test1(i32* %X, i16* %A) {
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK: strh {{.*}}[{{.*}}], #-4
%Y = load i32* %X ; <i32> [#uses=1]
%tmp1 = trunc i32 %Y to i16 ; <i16> [#uses=1]
@@ -12,7 +12,7 @@ define i16 @test1(i32* %X, i16* %A) {
}
define i32 @test2(i32* %X, i32* %A) {
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK: str {{.*}}[{{.*}}],
%Y = load i32* %X ; <i32> [#uses=1]
store i32 %Y, i32* %A
diff --git a/test/CodeGen/ARM/struct-byval-frame-index.ll b/test/CodeGen/ARM/struct-byval-frame-index.ll
new file mode 100644
index 000000000000..465ee1218fda
--- /dev/null
+++ b/test/CodeGen/ARM/struct-byval-frame-index.ll
@@ -0,0 +1,219 @@
+; RUN: llc < %s -mcpu=cortex-a15 -verify-machineinstrs | FileCheck %s
+
+; Check a spill right after a function call with large struct byval is correctly
+; generated.
+; PR16393
+
+; CHECK: set_stored_macroblock_parameters
+; CHECK: str r{{.*}}, [sp, [[SLOT:#[0-9]+]]] @ 4-byte Spill
+; CHECK: bl RestoreMVBlock8x8
+; CHECK: bl RestoreMVBlock8x8
+; CHECK: bl RestoreMVBlock8x8
+; CHECK: ldr r{{.*}}, [sp, [[SLOT]]] @ 4-byte Reload
+
+target triple = "armv7l-unknown-linux-gnueabihf"
+
+%structA = type { double, [16 x [16 x i16]], [16 x [16 x i16]], [16 x [16 x i16]], i32****, i32***, i32, i16, [4 x i32], [4 x i32], i8**, [16 x i8], [16 x i8], i32, i64, i32, i16******, i16******, [2 x [4 x [4 x i8]]], i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%structB = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i8**, i32, i32***, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [9 x [16 x [16 x i16]]], [5 x [16 x [16 x i16]]], [9 x [8 x [8 x i16]]], [2 x [4 x [16 x [16 x i16]]]], [16 x [16 x i16]], [16 x [16 x i32]], i32****, i32***, i32***, i32***, i32****, i32****, %structC*, %structD*, %structK*, i32*, i32*, i32, i32, i32, i32, [4 x [4 x i32]], i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i16******, i16******, i16******, i16******, [15 x i16], i32, i32, i32, i32, i32, i32, i32, i32, [6 x [32 x i32]], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [1 x i32], i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %structL*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, double**, double***, i32***, double**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [2 x i32], i32, i32, i16, i32, i32, i32, i32, i32 }
+%structC = type { i32, i32, [100 x %structD*], i32, float, float, float }
+%structD = type { i32, i32, i32, i32, i32, i32, %structE*, %structH*, %structJ*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (i32)*, [3 x [2 x i32]] }
+%structE = type { %structF*, %structG, %structG }
+%structF = type { i32, i32, i8, i32, i32, i8, i8, i32, i32, i8*, i32 }
+%structG = type { i32, i32, i32, i32, i32, i8*, i32*, i32, i32 }
+%structH = type { [3 x [11 x %structI]], [2 x [9 x %structI]], [2 x [10 x %structI]], [2 x [6 x %structI]], [4 x %structI], [4 x %structI], [3 x %structI] }
+%structI = type { i16, i8, i32 }
+%structJ = type { [2 x %structI], [4 x %structI], [3 x [4 x %structI]], [10 x [4 x %structI]], [10 x [15 x %structI]], [10 x [15 x %structI]], [10 x [5 x %structI]], [10 x [5 x %structI]], [10 x [15 x %structI]], [10 x [15 x %structI]] }
+%structK = type { i32, i32, i32, [2 x i32], i32, [8 x i32], %structK*, %structK*, i32, [2 x [4 x [4 x [2 x i32]]]], [16 x i8], [16 x i8], i32, i64, [4 x i32], [4 x i32], i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, double, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%structL = type { i32, i32, i32, i32, i32, %structL* }
+%structM = type { i32, i32, i32, i32, i32, i32, [6 x [33 x i64]], [6 x [33 x i64]], [6 x [33 x i64]], [6 x [33 x i64]], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16**, i16****, i16****, i16*****, i16***, i8*, i8***, i64***, i64***, i16****, i8**, i8**, %structM*, %structM*, %structM*, i32, i32, i32, i32, i32, i32, i32 }
+%structN = type { i32, [16 x [16 x i32]], [16 x [16 x i32]], [16 x [16 x i32]], [3 x [16 x [16 x i32]]], [4 x i16], [4 x i8], [4 x i8], [4 x i8], [16 x [16 x i16]], [16 x [16 x i16]], [16 x [16 x i32]] }
+
+@cofAC = external global i32****, align 4
+@cofDC = external global i32***, align 4
+@rdopt = external global %structA*, align 4
+@img = external global %structB*
+@enc_picture = external global %structM*
+@si_frame_indicator = external global i32, align 4
+@sp2_frame_indicator = external global i32, align 4
+@lrec = external global i32**, align 4
+@tr8x8 = external global %structN, align 4
+@best_mode = external global i16, align 2
+@best_c_imode = external global i32, align 4
+@best_i16offset = external global i32, align 4
+@bi_pred_me = external global i16, align 2
+@b8mode = external global [4 x i32], align 4
+@b8pdir = external global [4 x i32], align 4
+@b4_intra_pred_modes = external global [16 x i8], align 1
+@b8_intra_pred_modes8x8 = external global [16 x i8], align 1
+@b4_ipredmode = external global [16 x i8], align 1
+@b8_ipredmode8x8 = external global [4 x [4 x i8]], align 1
+@rec_mbY = external global [16 x [16 x i16]], align 2
+@lrec_rec = external global [16 x [16 x i32]], align 4
+@rec_mbU = external global [16 x [16 x i16]], align 2
+@rec_mbV = external global [16 x [16 x i16]], align 2
+@lrec_rec_U = external global [16 x [16 x i32]], align 4
+@lrec_uv = external global i32***, align 4
+@lrec_rec_V = external global [16 x [16 x i32]], align 4
+@cbp = external global i32, align 4
+@cbp_blk = external global i64, align 8
+@luma_transform_size_8x8_flag = external global i32, align 4
+@frefframe = external global [4 x [4 x i8]], align 1
+@brefframe = external global [4 x [4 x i8]], align 1
+
+; Function Attrs: nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) #0
+
+; Function Attrs: nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) #0
+
+; Function Attrs: nounwind
+declare void @SetMotionVectorsMB(%structK* nocapture, i32) #1
+
+; Function Attrs: nounwind
+define void @set_stored_macroblock_parameters() #1 {
+entry:
+ %0 = load %structB** @img, align 4
+ %1 = load i32* undef, align 4
+ %mb_data = getelementptr inbounds %structB* %0, i32 0, i32 61
+ %2 = load %structK** %mb_data, align 4
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ br i1 undef, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ br i1 undef, label %for.body20, label %if.end
+
+for.body20: ; preds = %for.end
+ unreachable
+
+if.end: ; preds = %for.end
+ br i1 undef, label %if.end40, label %for.cond31.preheader
+
+for.cond31.preheader: ; preds = %if.end
+ unreachable
+
+if.end40: ; preds = %if.end
+ br i1 undef, label %if.end43, label %if.then42
+
+if.then42: ; preds = %if.end40
+ br label %if.end43
+
+if.end43: ; preds = %if.then42, %if.end40
+ br i1 undef, label %if.end164, label %for.cond47.preheader
+
+for.cond47.preheader: ; preds = %if.end43
+ br i1 undef, label %for.body119, label %if.end164
+
+for.body119: ; preds = %for.body119, %for.cond47.preheader
+ br i1 undef, label %for.body119, label %if.end164
+
+if.end164: ; preds = %for.body119, %for.cond47.preheader, %if.end43
+ store i32*** null, i32**** @cofDC, align 4
+ %mb_type = getelementptr inbounds %structK* %2, i32 %1, i32 8
+ br i1 undef, label %if.end230, label %if.then169
+
+if.then169: ; preds = %if.end164
+ br i1 undef, label %for.cond185.preheader, label %for.cond210.preheader
+
+for.cond185.preheader: ; preds = %if.then169
+ unreachable
+
+for.cond210.preheader: ; preds = %if.then169
+ unreachable
+
+if.end230: ; preds = %if.end164
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* bitcast ([4 x i32]* @b8mode to i8*), i32 16, i32 4, i1 false)
+ %b8pdir = getelementptr inbounds %structK* %2, i32 %1, i32 15
+ %3 = bitcast [4 x i32]* %b8pdir to i8*
+ tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* bitcast ([4 x i32]* @b8pdir to i8*), i32 16, i32 4, i1 false)
+ br i1 undef, label %if.end236, label %if.then233
+
+if.then233: ; preds = %if.end230
+ unreachable
+
+if.end236: ; preds = %if.end230
+ %cmp242 = icmp ne i16 undef, 8
+ %4 = load i32* @luma_transform_size_8x8_flag, align 4
+ %tobool245 = icmp ne i32 %4, 0
+ %or.cond812 = or i1 %cmp242, %tobool245
+ br i1 %or.cond812, label %if.end249, label %land.lhs.true246
+
+land.lhs.true246: ; preds = %if.end236
+ br i1 undef, label %if.end249, label %if.then248
+
+if.then248: ; preds = %land.lhs.true246
+ tail call void asm sideeffect "", "~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11}"() nounwind
+ tail call void @RestoreMVBlock8x8(i32 1, i32 0, %structN* byval @tr8x8, i32 0) #0
+ tail call void @RestoreMVBlock8x8(i32 1, i32 2, %structN* byval @tr8x8, i32 0) #0
+ tail call void @RestoreMVBlock8x8(i32 1, i32 3, %structN* byval @tr8x8, i32 0) #0
+ br label %if.end249
+
+if.end249: ; preds = %if.then248, %land.lhs.true246, %if.end236
+ %5 = load i32* @luma_transform_size_8x8_flag, align 4
+ %6 = load %structA** @rdopt, align 4
+ %luma_transform_size_8x8_flag264 = getelementptr inbounds %structA* %6, i32 0, i32 21
+ store i32 %5, i32* %luma_transform_size_8x8_flag264, align 4
+ %7 = load i32* undef, align 4
+ %add281 = add nsw i32 %7, 0
+ br label %for.body285
+
+for.body285: ; preds = %for.inc503, %if.end249
+ %8 = phi %structB* [ undef, %if.end249 ], [ %.pre1155, %for.inc503 ]
+ %i.21103 = phi i32 [ 0, %if.end249 ], [ %inc504, %for.inc503 ]
+ %block_x286 = getelementptr inbounds %structB* %8, i32 0, i32 37
+ %9 = load i32* %block_x286, align 4
+ %add287 = add nsw i32 %9, %i.21103
+ %shr289 = ashr i32 %i.21103, 1
+ %add290 = add nsw i32 %shr289, 0
+ %arrayidx292 = getelementptr inbounds %structK* %2, i32 %1, i32 15, i32 %add290
+ %10 = load %structM** @enc_picture, align 4
+ %ref_idx = getelementptr inbounds %structM* %10, i32 0, i32 35
+ %11 = load i8**** %ref_idx, align 4
+ %12 = load i8*** %11, align 4
+ %arrayidx313 = getelementptr inbounds i8** %12, i32 %add281
+ %13 = load i8** %arrayidx313, align 4
+ %arrayidx314 = getelementptr inbounds i8* %13, i32 %add287
+ store i8 -1, i8* %arrayidx314, align 1
+ %14 = load %structB** @img, align 4
+ %MbaffFrameFlag327 = getelementptr inbounds %structB* %14, i32 0, i32 100
+ %15 = load i32* %MbaffFrameFlag327, align 4
+ %tobool328 = icmp eq i32 %15, 0
+ br i1 %tobool328, label %if.end454, label %if.then329
+
+if.then329: ; preds = %for.body285
+ %16 = load %structA** @rdopt, align 4
+ br label %if.end454
+
+if.end454: ; preds = %if.then329, %for.body285
+ %17 = load i32* %arrayidx292, align 4
+ %cmp457 = icmp eq i32 %17, 0
+ br i1 %cmp457, label %if.then475, label %lor.lhs.false459
+
+lor.lhs.false459: ; preds = %if.end454
+ %18 = load i32* %mb_type, align 4
+ switch i32 %18, label %for.inc503 [
+ i32 9, label %if.then475
+ i32 10, label %if.then475
+ i32 13, label %if.then475
+ i32 14, label %if.then475
+ ]
+
+if.then475: ; preds = %lor.lhs.false459, %lor.lhs.false459, %lor.lhs.false459, %lor.lhs.false459, %if.end454
+ store i16 0, i16* undef, align 2
+ br label %for.inc503
+
+for.inc503: ; preds = %if.then475, %lor.lhs.false459
+ %inc504 = add nsw i32 %i.21103, 1
+ %.pre1155 = load %structB** @img, align 4
+ br label %for.body285
+}
+
+; Function Attrs: nounwind
+declare void @update_offset_params(i32, i32) #1
+
+; Function Attrs: nounwind
+declare void @RestoreMVBlock8x8(i32, i32, %structN* byval nocapture, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/struct_byval.ll b/test/CodeGen/ARM/struct_byval.ll
index e9541c278803..130925a0c237 100644
--- a/test/CodeGen/ARM/struct_byval.ll
+++ b/test/CodeGen/ARM/struct_byval.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=armv7-apple-ios6.0 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-ios6.0 | FileCheck %s -check-prefix=THUMB
; rdar://9877866
%struct.SmallStruct = type { i32, [8 x i32], [37 x i8] }
@@ -6,10 +7,14 @@
define i32 @f() nounwind ssp {
entry:
-; CHECK: f:
+; CHECK-LABEL: f:
; CHECK: ldr
; CHECK: str
; CHECK-NOT:bne
+; THUMB-LABEL: f:
+; THUMB: ldr
+; THUMB: str
+; THUMB-NOT:bne
%st = alloca %struct.SmallStruct, align 4
%call = call i32 @e1(%struct.SmallStruct* byval %st)
ret i32 0
@@ -18,11 +23,16 @@ entry:
; Generate a loop for large struct byval
define i32 @g() nounwind ssp {
entry:
-; CHECK: g:
+; CHECK-LABEL: g:
; CHECK: ldr
; CHECK: sub
; CHECK: str
; CHECK: bne
+; THUMB-LABEL: g:
+; THUMB: ldr
+; THUMB: sub
+; THUMB: str
+; THUMB: bne
%st = alloca %struct.LargeStruct, align 4
%call = call i32 @e2(%struct.LargeStruct* byval %st)
ret i32 0
@@ -31,11 +41,16 @@ entry:
; Generate a loop using NEON instructions
define i32 @h() nounwind ssp {
entry:
-; CHECK: h:
+; CHECK-LABEL: h:
; CHECK: vld1
; CHECK: sub
; CHECK: vst1
; CHECK: bne
+; THUMB-LABEL: h:
+; THUMB: vld1
+; THUMB: sub
+; THUMB: vst1
+; THUMB: bne
%st = alloca %struct.LargeStruct, align 16
%call = call i32 @e3(%struct.LargeStruct* byval align 16 %st)
ret i32 0
@@ -49,8 +64,10 @@ declare i32 @e3(%struct.LargeStruct* nocapture byval align 16 %in) nounwind
; We can't do tail call since address of s is passed to the callee and part of
; s is in caller's local frame.
define void @f3(%struct.SmallStruct* nocapture byval %s) nounwind optsize {
-; CHECK: f3
+; CHECK-LABEL: f3
; CHECK: bl _consumestruct
+; THUMB-LABEL: f3
+; THUMB: blx _consumestruct
entry:
%0 = bitcast %struct.SmallStruct* %s to i8*
tail call void @consumestruct(i8* %0, i32 80) optsize
@@ -58,8 +75,10 @@ entry:
}
define void @f4(%struct.SmallStruct* nocapture byval %s) nounwind optsize {
-; CHECK: f4
+; CHECK-LABEL: f4
; CHECK: bl _consumestruct
+; THUMB-LABEL: f4
+; THUMB: blx _consumestruct
entry:
%addr = getelementptr inbounds %struct.SmallStruct* %s, i32 0, i32 0
%0 = bitcast i32* %addr to i8*
@@ -69,8 +88,10 @@ entry:
; We can do tail call here since s is in the incoming argument area.
define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize {
-; CHECK: f5
+; CHECK-LABEL: f5
; CHECK: b _consumestruct
+; THUMB-LABEL: f5
+; THUMB: b.w _consumestruct
entry:
%0 = bitcast %struct.SmallStruct* %s to i8*
tail call void @consumestruct(i8* %0, i32 80) optsize
@@ -78,8 +99,10 @@ entry:
}
define void @f6(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize {
-; CHECK: f6
+; CHECK-LABEL: f6
; CHECK: b _consumestruct
+; THUMB-LABEL: f6
+; THUMB: b.w _consumestruct
entry:
%addr = getelementptr inbounds %struct.SmallStruct* %s, i32 0, i32 0
%0 = bitcast i32* %addr to i8*
@@ -88,3 +111,19 @@ entry:
}
declare void @consumestruct(i8* nocapture %structp, i32 %structsize) nounwind
+
+; PR17309
+%struct.I.8 = type { [10 x i32], [3 x i8] }
+
+declare void @use_I(%struct.I.8* byval)
+define void @test_I_16() {
+; CHECK-LABEL: test_I_16
+; CHECK: ldrb
+; CHECK: strb
+; THUMB-LABEL: test_I_16
+; THUMB: ldrb
+; THUMB: strb
+entry:
+ call void @use_I(%struct.I.8* byval align 16 undef)
+ ret void
+}
diff --git a/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll b/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll
new file mode 100644
index 000000000000..189926941eb2
--- /dev/null
+++ b/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll
@@ -0,0 +1,1523 @@
+;RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+neon -verify-machineinstrs -filetype=obj | llvm-objdump -triple armv7-none-linux-gnueabi -disassemble - | FileCheck %s --check-prefix=ARM
+;RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi -mattr=+neon -verify-machineinstrs -filetype=obj | llvm-objdump -triple thumbv7-none-linux-gnueabi -disassemble - | FileCheck %s --check-prefix=THUMB2
+;RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=-neon -verify-machineinstrs -filetype=obj | llvm-objdump -triple armv7-none-linux-gnueabi -disassemble - | FileCheck %s --check-prefix=NO_NEON
+;We want to have both positive and negative checks for thumb1. These checks
+;are not easy to do in a single pass so we generate the output once to a
+;temp file and run filecheck twice with different prefixes.
+;RUN: llc < %s -mtriple=thumbv5-none-linux-gnueabi -verify-machineinstrs -filetype=obj | llvm-objdump -triple thumbv5-none-linux-gnueabi -disassemble - > %t
+;RUN: cat %t | FileCheck %s --check-prefix=THUMB1
+;RUN: cat %t | FileCheck %s --check-prefix=T1POST
+
+;This file contains auto generated tests for the lowering of passing structs
+;byval in the arm backend. We have tests for both packed and unpacked
+;structs at varying alignments. Each test is run for arm, thumb2 and thumb1.
+;We check for the strings in the generated object code using llvm-objdump
+;because it provides better assurance that we are generating instructions
+;for the correct architecture. Otherwise we could accidently generate an
+;ARM instruction for THUMB1 and wouldn't detect it because the assembly
+;code representation is the same, but the object code would be generated
+;incorrectly. For each test we check for the label, a load instruction of the
+;correct form, a branch if it will be generated with a loop, and the leftover
+;cleanup if the number of bytes does not divide evenly by the store size
+
+%struct.A = type <{ [ 10 x i32 ] }> ; 40 bytes
+declare void @use_A(%struct.A* byval)
+%struct.B = type <{ [ 10 x i32 ], i8 }> ; 41 bytes
+declare void @use_B(%struct.B* byval)
+%struct.C = type <{ [ 10 x i32 ], [ 3 x i8 ] }> ; 43 bytes
+declare void @use_C(%struct.C* byval)
+%struct.D = type <{ [ 100 x i32 ] }> ; 400 bytes
+declare void @use_D(%struct.D* byval)
+%struct.E = type <{ [ 100 x i32 ], i8 }> ; 401 bytes
+declare void @use_E(%struct.E* byval)
+%struct.F = type <{ [ 100 x i32 ], [ 3 x i8 ] }> ; 403 bytes
+declare void @use_F(%struct.F* byval)
+%struct.G = type { [ 10 x i32 ] } ; 40 bytes
+declare void @use_G(%struct.G* byval)
+%struct.H = type { [ 10 x i32 ], i8 } ; 41 bytes
+declare void @use_H(%struct.H* byval)
+%struct.I = type { [ 10 x i32 ], [ 3 x i8 ] } ; 43 bytes
+declare void @use_I(%struct.I* byval)
+%struct.J = type { [ 100 x i32 ] } ; 400 bytes
+declare void @use_J(%struct.J* byval)
+%struct.K = type { [ 100 x i32 ], i8 } ; 401 bytes
+declare void @use_K(%struct.K* byval)
+%struct.L = type { [ 100 x i32 ], [ 3 x i8 ] } ; 403 bytes
+declare void @use_L(%struct.L* byval)
+
+;ARM-LABEL: test_A_1:
+;THUMB2-LABEL: test_A_1:
+;NO_NEON-LABEL:test_A_1:
+;THUMB1-LABEL: test_A_1:
+;T1POST-LABEL: test_A_1:
+ define void @test_A_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.A, align 1
+ call void @use_A(%struct.A* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_A_2:
+;THUMB2-LABEL: test_A_2:
+;NO_NEON-LABEL:test_A_2:
+;THUMB1-LABEL: test_A_2:
+;T1POST-LABEL: test_A_2:
+ define void @test_A_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.A, align 2
+ call void @use_A(%struct.A* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_A_4:
+;THUMB2-LABEL: test_A_4:
+;NO_NEON-LABEL:test_A_4:
+;THUMB1-LABEL: test_A_4:
+;T1POST-LABEL: test_A_4:
+ define void @test_A_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.A, align 4
+ call void @use_A(%struct.A* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_A_8:
+;THUMB2-LABEL: test_A_8:
+;NO_NEON-LABEL:test_A_8:
+;THUMB1-LABEL: test_A_8:
+;T1POST-LABEL: test_A_8:
+ define void @test_A_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.A, align 8
+ call void @use_A(%struct.A* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_A_16:
+;THUMB2-LABEL: test_A_16:
+;NO_NEON-LABEL:test_A_16:
+;THUMB1-LABEL: test_A_16:
+;T1POST-LABEL: test_A_16:
+ define void @test_A_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.A, align 16
+ call void @use_A(%struct.A* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_B_1:
+;THUMB2-LABEL: test_B_1:
+;NO_NEON-LABEL:test_B_1:
+;THUMB1-LABEL: test_B_1:
+;T1POST-LABEL: test_B_1:
+ define void @test_B_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.B, align 1
+ call void @use_B(%struct.B* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_B_2:
+;THUMB2-LABEL: test_B_2:
+;NO_NEON-LABEL:test_B_2:
+;THUMB1-LABEL: test_B_2:
+;T1POST-LABEL: test_B_2:
+ define void @test_B_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.B, align 2
+ call void @use_B(%struct.B* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_B_4:
+;THUMB2-LABEL: test_B_4:
+;NO_NEON-LABEL:test_B_4:
+;THUMB1-LABEL: test_B_4:
+;T1POST-LABEL: test_B_4:
+ define void @test_B_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.B, align 4
+ call void @use_B(%struct.B* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_B_8:
+;THUMB2-LABEL: test_B_8:
+;NO_NEON-LABEL:test_B_8:
+;THUMB1-LABEL: test_B_8:
+;T1POST-LABEL: test_B_8:
+ define void @test_B_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.B, align 8
+ call void @use_B(%struct.B* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_B_16:
+;THUMB2-LABEL: test_B_16:
+;NO_NEON-LABEL:test_B_16:
+;THUMB1-LABEL: test_B_16:
+;T1POST-LABEL: test_B_16:
+ define void @test_B_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.B, align 16
+ call void @use_B(%struct.B* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_C_1:
+;THUMB2-LABEL: test_C_1:
+;NO_NEON-LABEL:test_C_1:
+;THUMB1-LABEL: test_C_1:
+;T1POST-LABEL: test_C_1:
+ define void @test_C_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.C, align 1
+ call void @use_C(%struct.C* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_C_2:
+;THUMB2-LABEL: test_C_2:
+;NO_NEON-LABEL:test_C_2:
+;THUMB1-LABEL: test_C_2:
+;T1POST-LABEL: test_C_2:
+ define void @test_C_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.C, align 2
+ call void @use_C(%struct.C* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_C_4:
+;THUMB2-LABEL: test_C_4:
+;NO_NEON-LABEL:test_C_4:
+;THUMB1-LABEL: test_C_4:
+;T1POST-LABEL: test_C_4:
+ define void @test_C_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.C, align 4
+ call void @use_C(%struct.C* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_C_8:
+;THUMB2-LABEL: test_C_8:
+;NO_NEON-LABEL:test_C_8:
+;THUMB1-LABEL: test_C_8:
+;T1POST-LABEL: test_C_8:
+ define void @test_C_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.C, align 8
+ call void @use_C(%struct.C* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_C_16:
+;THUMB2-LABEL: test_C_16:
+;NO_NEON-LABEL:test_C_16:
+;THUMB1-LABEL: test_C_16:
+;T1POST-LABEL: test_C_16:
+ define void @test_C_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.C, align 16
+ call void @use_C(%struct.C* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_D_1:
+;THUMB2-LABEL: test_D_1:
+;NO_NEON-LABEL:test_D_1:
+;THUMB1-LABEL: test_D_1:
+;T1POST-LABEL: test_D_1:
+ define void @test_D_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;ARM: bne
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;THUMB2: bne
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON: bne
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+;THUMB1: bne
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.D, align 1
+ call void @use_D(%struct.D* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_D_2:
+;THUMB2-LABEL: test_D_2:
+;NO_NEON-LABEL:test_D_2:
+;THUMB1-LABEL: test_D_2:
+;T1POST-LABEL: test_D_2:
+ define void @test_D_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: bne
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: bne
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: bne
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: bne
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.D, align 2
+ call void @use_D(%struct.D* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_D_4:
+;THUMB2-LABEL: test_D_4:
+;NO_NEON-LABEL:test_D_4:
+;THUMB1-LABEL: test_D_4:
+;T1POST-LABEL: test_D_4:
+ define void @test_D_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: bne
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.D, align 4
+ call void @use_D(%struct.D* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_D_8:
+;THUMB2-LABEL: test_D_8:
+;NO_NEON-LABEL:test_D_8:
+;THUMB1-LABEL: test_D_8:
+;T1POST-LABEL: test_D_8:
+ define void @test_D_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.D, align 8
+ call void @use_D(%struct.D* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_D_16:
+;THUMB2-LABEL: test_D_16:
+;NO_NEON-LABEL:test_D_16:
+;THUMB1-LABEL: test_D_16:
+;T1POST-LABEL: test_D_16:
+ define void @test_D_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.D, align 16
+ call void @use_D(%struct.D* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_E_1:
+;THUMB2-LABEL: test_E_1:
+;NO_NEON-LABEL:test_E_1:
+;THUMB1-LABEL: test_E_1:
+;T1POST-LABEL: test_E_1:
+ define void @test_E_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;ARM: bne
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;THUMB2: bne
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON: bne
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+;THUMB1: bne
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.E, align 1
+ call void @use_E(%struct.E* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_E_2:
+;THUMB2-LABEL: test_E_2:
+;NO_NEON-LABEL:test_E_2:
+;THUMB1-LABEL: test_E_2:
+;T1POST-LABEL: test_E_2:
+ define void @test_E_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.E, align 2
+ call void @use_E(%struct.E* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_E_4:
+;THUMB2-LABEL: test_E_4:
+;NO_NEON-LABEL:test_E_4:
+;THUMB1-LABEL: test_E_4:
+;T1POST-LABEL: test_E_4:
+ define void @test_E_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.E, align 4
+ call void @use_E(%struct.E* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_E_8:
+;THUMB2-LABEL: test_E_8:
+;NO_NEON-LABEL:test_E_8:
+;THUMB1-LABEL: test_E_8:
+;T1POST-LABEL: test_E_8:
+ define void @test_E_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.E, align 8
+ call void @use_E(%struct.E* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_E_16:
+;THUMB2-LABEL: test_E_16:
+;NO_NEON-LABEL:test_E_16:
+;THUMB1-LABEL: test_E_16:
+;T1POST-LABEL: test_E_16:
+ define void @test_E_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.E, align 16
+ call void @use_E(%struct.E* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_F_1:
+;THUMB2-LABEL: test_F_1:
+;NO_NEON-LABEL:test_F_1:
+;THUMB1-LABEL: test_F_1:
+;T1POST-LABEL: test_F_1:
+ define void @test_F_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;ARM: bne
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;THUMB2: bne
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON: bne
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+;THUMB1: bne
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.F, align 1
+ call void @use_F(%struct.F* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_F_2:
+;THUMB2-LABEL: test_F_2:
+;NO_NEON-LABEL:test_F_2:
+;THUMB1-LABEL: test_F_2:
+;T1POST-LABEL: test_F_2:
+ define void @test_F_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.F, align 2
+ call void @use_F(%struct.F* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_F_4:
+;THUMB2-LABEL: test_F_4:
+;NO_NEON-LABEL:test_F_4:
+;THUMB1-LABEL: test_F_4:
+;T1POST-LABEL: test_F_4:
+ define void @test_F_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.F, align 4
+ call void @use_F(%struct.F* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_F_8:
+;THUMB2-LABEL: test_F_8:
+;NO_NEON-LABEL:test_F_8:
+;THUMB1-LABEL: test_F_8:
+;T1POST-LABEL: test_F_8:
+ define void @test_F_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.F, align 8
+ call void @use_F(%struct.F* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_F_16:
+;THUMB2-LABEL: test_F_16:
+;NO_NEON-LABEL:test_F_16:
+;THUMB1-LABEL: test_F_16:
+;T1POST-LABEL: test_F_16:
+ define void @test_F_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.F, align 16
+ call void @use_F(%struct.F* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_G_1:
+;THUMB2-LABEL: test_G_1:
+;NO_NEON-LABEL:test_G_1:
+;THUMB1-LABEL: test_G_1:
+;T1POST-LABEL: test_G_1:
+ define void @test_G_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.G, align 1
+ call void @use_G(%struct.G* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_G_2:
+;THUMB2-LABEL: test_G_2:
+;NO_NEON-LABEL:test_G_2:
+;THUMB1-LABEL: test_G_2:
+;T1POST-LABEL: test_G_2:
+ define void @test_G_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.G, align 2
+ call void @use_G(%struct.G* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_G_4:
+;THUMB2-LABEL: test_G_4:
+;NO_NEON-LABEL:test_G_4:
+;THUMB1-LABEL: test_G_4:
+;T1POST-LABEL: test_G_4:
+ define void @test_G_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.G, align 4
+ call void @use_G(%struct.G* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_G_8:
+;THUMB2-LABEL: test_G_8:
+;NO_NEON-LABEL:test_G_8:
+;THUMB1-LABEL: test_G_8:
+;T1POST-LABEL: test_G_8:
+ define void @test_G_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.G, align 8
+ call void @use_G(%struct.G* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_G_16:
+;THUMB2-LABEL: test_G_16:
+;NO_NEON-LABEL:test_G_16:
+;THUMB1-LABEL: test_G_16:
+;T1POST-LABEL: test_G_16:
+ define void @test_G_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.G, align 16
+ call void @use_G(%struct.G* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_H_1:
+;THUMB2-LABEL: test_H_1:
+;NO_NEON-LABEL:test_H_1:
+;THUMB1-LABEL: test_H_1:
+;T1POST-LABEL: test_H_1:
+ define void @test_H_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.H, align 1
+ call void @use_H(%struct.H* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_H_2:
+;THUMB2-LABEL: test_H_2:
+;NO_NEON-LABEL:test_H_2:
+;THUMB1-LABEL: test_H_2:
+;T1POST-LABEL: test_H_2:
+ define void @test_H_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.H, align 2
+ call void @use_H(%struct.H* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_H_4:
+;THUMB2-LABEL: test_H_4:
+;NO_NEON-LABEL:test_H_4:
+;THUMB1-LABEL: test_H_4:
+;T1POST-LABEL: test_H_4:
+ define void @test_H_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.H, align 4
+ call void @use_H(%struct.H* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_H_8:
+;THUMB2-LABEL: test_H_8:
+;NO_NEON-LABEL:test_H_8:
+;THUMB1-LABEL: test_H_8:
+;T1POST-LABEL: test_H_8:
+ define void @test_H_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.H, align 8
+ call void @use_H(%struct.H* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_H_16:
+;THUMB2-LABEL: test_H_16:
+;NO_NEON-LABEL:test_H_16:
+;THUMB1-LABEL: test_H_16:
+;T1POST-LABEL: test_H_16:
+ define void @test_H_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.H, align 16
+ call void @use_H(%struct.H* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_I_1:
+;THUMB2-LABEL: test_I_1:
+;NO_NEON-LABEL:test_I_1:
+;THUMB1-LABEL: test_I_1:
+;T1POST-LABEL: test_I_1:
+ define void @test_I_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.I, align 1
+ call void @use_I(%struct.I* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_I_2:
+;THUMB2-LABEL: test_I_2:
+;NO_NEON-LABEL:test_I_2:
+;THUMB1-LABEL: test_I_2:
+;T1POST-LABEL: test_I_2:
+ define void @test_I_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.I, align 2
+ call void @use_I(%struct.I* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_I_4:
+;THUMB2-LABEL: test_I_4:
+;NO_NEON-LABEL:test_I_4:
+;THUMB1-LABEL: test_I_4:
+;T1POST-LABEL: test_I_4:
+ define void @test_I_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.I, align 4
+ call void @use_I(%struct.I* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_I_8:
+;THUMB2-LABEL: test_I_8:
+;NO_NEON-LABEL:test_I_8:
+;THUMB1-LABEL: test_I_8:
+;T1POST-LABEL: test_I_8:
+ define void @test_I_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.I, align 8
+ call void @use_I(%struct.I* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_I_16:
+;THUMB2-LABEL: test_I_16:
+;NO_NEON-LABEL:test_I_16:
+;THUMB1-LABEL: test_I_16:
+;T1POST-LABEL: test_I_16:
+ define void @test_I_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.I, align 16
+ call void @use_I(%struct.I* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_J_1:
+;THUMB2-LABEL: test_J_1:
+;NO_NEON-LABEL:test_J_1:
+;THUMB1-LABEL: test_J_1:
+;T1POST-LABEL: test_J_1:
+ define void @test_J_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;ARM: bne
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;THUMB2: bne
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON: bne
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+;THUMB1: bne
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.J, align 1
+ call void @use_J(%struct.J* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_J_2:
+;THUMB2-LABEL: test_J_2:
+;NO_NEON-LABEL:test_J_2:
+;THUMB1-LABEL: test_J_2:
+;T1POST-LABEL: test_J_2:
+ define void @test_J_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: bne
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: bne
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: bne
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: bne
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.J, align 2
+ call void @use_J(%struct.J* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_J_4:
+;THUMB2-LABEL: test_J_4:
+;NO_NEON-LABEL:test_J_4:
+;THUMB1-LABEL: test_J_4:
+;T1POST-LABEL: test_J_4:
+ define void @test_J_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: bne
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.J, align 4
+ call void @use_J(%struct.J* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_J_8:
+;THUMB2-LABEL: test_J_8:
+;NO_NEON-LABEL:test_J_8:
+;THUMB1-LABEL: test_J_8:
+;T1POST-LABEL: test_J_8:
+ define void @test_J_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.J, align 8
+ call void @use_J(%struct.J* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_J_16:
+;THUMB2-LABEL: test_J_16:
+;NO_NEON-LABEL:test_J_16:
+;THUMB1-LABEL: test_J_16:
+;T1POST-LABEL: test_J_16:
+ define void @test_J_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.J, align 16
+ call void @use_J(%struct.J* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_K_1:
+;THUMB2-LABEL: test_K_1:
+;NO_NEON-LABEL:test_K_1:
+;THUMB1-LABEL: test_K_1:
+;T1POST-LABEL: test_K_1:
+ define void @test_K_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;ARM: bne
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;THUMB2: bne
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON: bne
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+;THUMB1: bne
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.K, align 1
+ call void @use_K(%struct.K* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_K_2:
+;THUMB2-LABEL: test_K_2:
+;NO_NEON-LABEL:test_K_2:
+;THUMB1-LABEL: test_K_2:
+;T1POST-LABEL: test_K_2:
+ define void @test_K_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: bne
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: bne
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: bne
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: bne
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.K, align 2
+ call void @use_K(%struct.K* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_K_4:
+;THUMB2-LABEL: test_K_4:
+;NO_NEON-LABEL:test_K_4:
+;THUMB1-LABEL: test_K_4:
+;T1POST-LABEL: test_K_4:
+ define void @test_K_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: bne
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.K, align 4
+ call void @use_K(%struct.K* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_K_8:
+;THUMB2-LABEL: test_K_8:
+;NO_NEON-LABEL:test_K_8:
+;THUMB1-LABEL: test_K_8:
+;T1POST-LABEL: test_K_8:
+ define void @test_K_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.K, align 8
+ call void @use_K(%struct.K* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_K_16:
+;THUMB2-LABEL: test_K_16:
+;NO_NEON-LABEL:test_K_16:
+;THUMB1-LABEL: test_K_16:
+;T1POST-LABEL: test_K_16:
+ define void @test_K_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.K, align 16
+ call void @use_K(%struct.K* byval align 16 %a)
+ ret void
+ }
+;ARM-LABEL: test_L_1:
+;THUMB2-LABEL: test_L_1:
+;NO_NEON-LABEL:test_L_1:
+;THUMB1-LABEL: test_L_1:
+;T1POST-LABEL: test_L_1:
+ define void @test_L_1() {
+;ARM: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;ARM: bne
+
+;THUMB2: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;THUMB2: bne
+
+;NO_NEON: ldrb r{{[0-9]+}}, [{{.*}}], #1
+;NO_NEON: bne
+
+;THUMB1: ldrb r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #1
+;THUMB1: bne
+
+;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1
+ entry:
+ %a = alloca %struct.L, align 1
+ call void @use_L(%struct.L* byval align 1 %a)
+ ret void
+ }
+;ARM-LABEL: test_L_2:
+;THUMB2-LABEL: test_L_2:
+;NO_NEON-LABEL:test_L_2:
+;THUMB1-LABEL: test_L_2:
+;T1POST-LABEL: test_L_2:
+ define void @test_L_2() {
+;ARM: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;ARM: bne
+
+;THUMB2: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;THUMB2: bne
+
+;NO_NEON: ldrh r{{[0-9]+}}, [{{.*}}], #2
+;NO_NEON: bne
+
+;THUMB1: ldrh r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #2
+;THUMB1: bne
+
+;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2
+ entry:
+ %a = alloca %struct.L, align 2
+ call void @use_L(%struct.L* byval align 2 %a)
+ ret void
+ }
+;ARM-LABEL: test_L_4:
+;THUMB2-LABEL: test_L_4:
+;NO_NEON-LABEL:test_L_4:
+;THUMB1-LABEL: test_L_4:
+;T1POST-LABEL: test_L_4:
+ define void @test_L_4() {
+;ARM: ldr r{{[0-9]+}}, [{{.*}}], #4
+;ARM: bne
+
+;THUMB2: ldr r{{[0-9]+}}, [{{.*}}], #4
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4
+ entry:
+ %a = alloca %struct.L, align 4
+ call void @use_L(%struct.L* byval align 4 %a)
+ ret void
+ }
+;ARM-LABEL: test_L_8:
+;THUMB2-LABEL: test_L_8:
+;NO_NEON-LABEL:test_L_8:
+;THUMB1-LABEL: test_L_8:
+;T1POST-LABEL: test_L_8:
+ define void @test_L_8() {
+;ARM: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.L, align 8
+ call void @use_L(%struct.L* byval align 8 %a)
+ ret void
+ }
+;ARM-LABEL: test_L_16:
+;THUMB2-LABEL: test_L_16:
+;NO_NEON-LABEL:test_L_16:
+;THUMB1-LABEL: test_L_16:
+;T1POST-LABEL: test_L_16:
+ define void @test_L_16() {
+;ARM: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;ARM: bne
+
+;THUMB2: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+;THUMB2: bne
+
+;NO_NEON: ldr r{{[0-9]+}}, [{{.*}}], #4
+;NO_NEON: bne
+;NO_NEON-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+
+;THUMB1: ldr r{{[0-9]+}}, {{\[}}[[BASE:r[0-9]+]]{{\]}}
+;THUMB1: adds [[BASE]], #4
+;THUMB1: bne
+
+;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]!
+ entry:
+ %a = alloca %struct.L, align 16
+ call void @use_L(%struct.L* byval align 16 %a)
+ ret void
+ }
diff --git a/test/CodeGen/ARM/sub-cmp-peephole.ll b/test/CodeGen/ARM/sub-cmp-peephole.ll
index 2961b94d2c1e..19727dabf09e 100644
--- a/test/CodeGen/ARM/sub-cmp-peephole.ll
+++ b/test/CodeGen/ARM/sub-cmp-peephole.ll
@@ -1,8 +1,11 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s --check-prefix=V7
+; RUN: llc < %s -mtriple=armv8-none-linux-gnueabi | FileCheck %s -check-prefix=V8
+
define i32 @f(i32 %a, i32 %b) nounwind ssp {
entry:
-; CHECK: f:
+; CHECK-LABEL: f:
; CHECK: subs
; CHECK-NOT: cmp
%cmp = icmp sgt i32 %a, %b
@@ -13,7 +16,7 @@ entry:
define i32 @g(i32 %a, i32 %b) nounwind ssp {
entry:
-; CHECK: g:
+; CHECK-LABEL: g:
; CHECK: subs
; CHECK-NOT: cmp
%cmp = icmp slt i32 %a, %b
@@ -24,7 +27,7 @@ entry:
define i32 @h(i32 %a, i32 %b) nounwind ssp {
entry:
-; CHECK: h:
+; CHECK-LABEL: h:
; CHECK: subs
; CHECK-NOT: cmp
%cmp = icmp sgt i32 %a, 3
@@ -36,7 +39,7 @@ entry:
; rdar://11725965
define i32 @i(i32 %a, i32 %b) nounwind readnone ssp {
entry:
-; CHECK: i:
+; CHECK-LABEL: i:
; CHECK: subs
; CHECK-NOT: cmp
%cmp = icmp ult i32 %a, %b
@@ -48,7 +51,7 @@ entry:
; a swapped sub.
define i32 @j(i32 %a, i32 %b) nounwind {
entry:
-; CHECK: j:
+; CHECK-LABEL: j:
; CHECK: sub
; CHECK: cmp
%cmp = icmp eq i32 %b, %a
@@ -84,3 +87,60 @@ land.lhs.true: ; preds = %num2long.exit
if.end11: ; preds = %num2long.exit
ret i32 23
}
+
+define float @float_sel(i32 %a, i32 %b, float %x, float %y) {
+entry:
+; CHECK-LABEL: float_sel:
+; CHECK-NOT: cmp
+; V8-LABEL: float_sel:
+; V8-NOT: cmp
+; V8: vseleq.f32
+ %sub = sub i32 %a, %b
+ %cmp = icmp eq i32 %sub, 0
+ %ret = select i1 %cmp, float %x, float %y
+ ret float %ret
+}
+
+define double @double_sel(i32 %a, i32 %b, double %x, double %y) {
+entry:
+; CHECK-LABEL: double_sel:
+; CHECK-NOT: cmp
+; V8-LABEL: double_sel:
+; V8-NOT: cmp
+; V8: vseleq.f64
+ %sub = sub i32 %a, %b
+ %cmp = icmp eq i32 %sub, 0
+ %ret = select i1 %cmp, double %x, double %y
+ ret double %ret
+}
+
+@t = common global i32 0
+define double @double_sub(i32 %a, i32 %b, double %x, double %y) {
+entry:
+; CHECK-LABEL: double_sub:
+; CHECK: subs
+; CHECK-NOT: cmp
+; V8-LABEL: double_sub:
+; V8: vsel
+ %cmp = icmp sgt i32 %a, %b
+ %sub = sub i32 %a, %b
+ store i32 %sub, i32* @t
+ %ret = select i1 %cmp, double %x, double %y
+ ret double %ret
+}
+
+define double @double_sub_swap(i32 %a, i32 %b, double %x, double %y) {
+entry:
+; V7-LABEL: double_sub_swap:
+; V7-NOT: cmp
+; V7: subs
+; V8-LABEL: double_sub_swap:
+; V8-NOT: subs
+; V8: cmp
+; V8: vsel
+ %cmp = icmp sgt i32 %a, %b
+ %sub = sub i32 %b, %a
+ %ret = select i1 %cmp, double %x, double %y
+ store i32 %sub, i32* @t
+ ret double %ret
+}
diff --git a/test/CodeGen/ARM/swift-atomics.ll b/test/CodeGen/ARM/swift-atomics.ll
new file mode 100644
index 000000000000..1d7181557100
--- /dev/null
+++ b/test/CodeGen/ARM/swift-atomics.ll
@@ -0,0 +1,45 @@
+; RUN: llc -mtriple=armv7-apple-ios6.0 -mcpu=swift < %s | FileCheck %s
+; RUN: llc -mtriple=armv7-apple-ios6.0 < %s | FileCheck %s --check-prefix=CHECK-STRICT-ATOMIC
+
+; Release operations only need the store barrier provided by a "dmb ishst",
+
+define void @test_store_release(i32* %p, i32 %v) {
+; CHECK-LABEL: test_store_release:
+; CHECK: dmb ishst
+; CHECK: str
+
+; CHECK-STRICT-ATOMIC: dmb {{ish$}}
+ store atomic i32 %v, i32* %p release, align 4
+ ret void
+}
+
+; However, if sequential consistency is needed *something* must ensure a release
+; followed by an acquire does not get reordered. In that case a "dmb ishst" is
+; not adequate.
+define i32 @test_seq_cst(i32* %p, i32 %v) {
+; CHECK-LABEL: test_seq_cst:
+; CHECK: dmb ishst
+; CHECK: str
+; CHECK: dmb {{ish$}}
+; CHECK: ldr
+; CHECK: dmb {{ish$}}
+
+; CHECK-STRICT-ATOMIC: dmb {{ish$}}
+; CHECK-STRICT-ATOMIC: dmb {{ish$}}
+
+ store atomic i32 %v, i32* %p seq_cst, align 4
+ %val = load atomic i32* %p seq_cst, align 4
+ ret i32 %val
+}
+
+; Also, pure acquire operations should definitely not have an ishst barrier.
+
+define i32 @test_acq(i32* %addr) {
+; CHECK-LABEL: test_acq:
+; CHECK: ldr
+; CHECK: dmb {{ish$}}
+
+; CHECK-STRICT-ATOMIC: dmb {{ish$}}
+ %val = load atomic i32* %addr acquire, align 4
+ ret i32 %val
+}
diff --git a/test/CodeGen/ARM/swift-vldm.ll b/test/CodeGen/ARM/swift-vldm.ll
new file mode 100644
index 000000000000..67ae00ad7db8
--- /dev/null
+++ b/test/CodeGen/ARM/swift-vldm.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mcpu=swift -mtriple=armv7s-apple-ios | FileCheck %s
+
+; Check that we avoid producing vldm instructions using d registers that
+; begin in the most-significant half of a q register. These require more
+; micro-ops on swift and so aren't worth combining.
+
+; CHECK-LABEL: test_vldm
+; CHECK: vldmia r{{[0-9]+}}, {d2, d3, d4}
+; CHECK-NOT: vldmia r{{[0-9]+}}, {d1, d2, d3, d4}
+
+declare fastcc void @force_register(double %d0, double %d1, double %d2, double %d3, double %d4)
+
+define void @test_vldm(double* %x, double * %y) {
+entry:
+ %addr1 = getelementptr double * %x, i32 1
+ %addr2 = getelementptr double * %x, i32 2
+ %addr3 = getelementptr double * %x, i32 3
+ %d0 = load double * %y
+ %d1 = load double * %x
+ %d2 = load double * %addr1
+ %d3 = load double * %addr2
+ %d4 = load double * %addr3
+ ; We are trying to force x[0-3] in registers d1 to d4 so that we can test we
+ ; don't form a "vldmia rX, {d1, d2, d3, d4}".
+ ; We are relying on the calling convention and that register allocation
+ ; properly coalesces registers.
+ call fastcc void @force_register(double %d0, double %d1, double %d2, double %d3, double %d4)
+ ret void
+}
diff --git a/test/CodeGen/ARM/tail-dup.ll b/test/CodeGen/ARM/tail-dup.ll
index eb4d0bab929e..d654056eaf3d 100644
--- a/test/CodeGen/ARM/tail-dup.ll
+++ b/test/CodeGen/ARM/tail-dup.ll
@@ -2,7 +2,7 @@
; We should be able to tail-duplicate the basic block containing the indirectbr
; into all of its predecessors.
-; CHECK: fn:
+; CHECK-LABEL: fn:
; CHECK: mov pc
; CHECK: mov pc
; CHECK: mov pc
diff --git a/test/CodeGen/ARM/tail-opts.ll b/test/CodeGen/ARM/tail-opts.ll
index 220b0f173739..37e9a4af3be5 100644
--- a/test/CodeGen/ARM/tail-opts.ll
+++ b/test/CodeGen/ARM/tail-opts.ll
@@ -14,7 +14,7 @@ declare i8* @choose(i8*, i8*)
; BranchFolding should tail-duplicate the indirect jump to avoid
; redundant branching.
-; CHECK: tail_duplicate_me:
+; CHECK-LABEL: tail_duplicate_me:
; CHECK: qux
; CHECK: movw r{{[0-9]+}}, :lower16:_GHJK
; CHECK: movt r{{[0-9]+}}, :upper16:_GHJK
diff --git a/test/CodeGen/ARM/test-sharedidx.ll b/test/CodeGen/ARM/test-sharedidx.ll
index 93340c300cd4..9203f166ffa9 100644
--- a/test/CodeGen/ARM/test-sharedidx.ll
+++ b/test/CodeGen/ARM/test-sharedidx.ll
@@ -14,7 +14,7 @@
; rdar://10674430
define void @sharedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c, i32 %s, i32 %len) nounwind ssp {
entry:
-; CHECK: sharedidx:
+; CHECK-LABEL: sharedidx:
%cmp8 = icmp eq i32 %len, 0
br i1 %cmp8, label %for.end, label %for.body
diff --git a/test/CodeGen/ARM/this-return.ll b/test/CodeGen/ARM/this-return.ll
index f06e4a4f8ddc..cb42de69f0aa 100644
--- a/test/CodeGen/ARM/this-return.ll
+++ b/test/CodeGen/ARM/this-return.ll
@@ -17,12 +17,12 @@ declare %struct.B* @B_ctor_complete_nothisret(%struct.B*, i32)
define %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x) {
entry:
-; CHECKELF: C_ctor_base:
+; CHECKELF-LABEL: C_ctor_base:
; CHECKELF-NOT: mov {{r[0-9]+}}, r0
; CHECKELF: bl A_ctor_base
; CHECKELF-NOT: mov r0, {{r[0-9]+}}
; CHECKELF: b B_ctor_base
-; CHECKT2D: C_ctor_base:
+; CHECKT2D-LABEL: C_ctor_base:
; CHECKT2D-NOT: mov {{r[0-9]+}}, r0
; CHECKT2D: blx _A_ctor_base
; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
@@ -36,12 +36,12 @@ entry:
define %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x) {
entry:
-; CHECKELF: C_ctor_base_nothisret:
+; CHECKELF-LABEL: C_ctor_base_nothisret:
; CHECKELF: mov [[SAVETHIS:r[0-9]+]], r0
; CHECKELF: bl A_ctor_base_nothisret
; CHECKELF: mov r0, [[SAVETHIS]]
; CHECKELF-NOT: b B_ctor_base_nothisret
-; CHECKT2D: C_ctor_base_nothisret:
+; CHECKT2D-LABEL: C_ctor_base_nothisret:
; CHECKT2D: mov [[SAVETHIS:r[0-9]+]], r0
; CHECKT2D: blx _A_ctor_base_nothisret
; CHECKT2D: mov r0, [[SAVETHIS]]
@@ -55,9 +55,9 @@ entry:
define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
entry:
-; CHECKELF: C_ctor_complete:
+; CHECKELF-LABEL: C_ctor_complete:
; CHECKELF: b C_ctor_base
-; CHECKT2D: C_ctor_complete:
+; CHECKT2D-LABEL: C_ctor_complete:
; CHECKT2D: b.w _C_ctor_base
%call = tail call %struct.C* @C_ctor_base(%struct.C* %this, i32 %x)
ret %struct.C* %this
@@ -65,9 +65,9 @@ entry:
define %struct.C* @C_ctor_complete_nothisret(%struct.C* %this, i32 %x) {
entry:
-; CHECKELF: C_ctor_complete_nothisret:
+; CHECKELF-LABEL: C_ctor_complete_nothisret:
; CHECKELF-NOT: b C_ctor_base_nothisret
-; CHECKT2D: C_ctor_complete_nothisret:
+; CHECKT2D-LABEL: C_ctor_complete_nothisret:
; CHECKT2D-NOT: b.w _C_ctor_base_nothisret
%call = tail call %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x)
ret %struct.C* %this
@@ -75,12 +75,12 @@ entry:
define %struct.D* @D_ctor_base(%struct.D* %this, i32 %x) {
entry:
-; CHECKELF: D_ctor_base:
+; CHECKELF-LABEL: D_ctor_base:
; CHECKELF-NOT: mov {{r[0-9]+}}, r0
; CHECKELF: bl B_ctor_complete
; CHECKELF-NOT: mov r0, {{r[0-9]+}}
; CHECKELF: b B_ctor_complete
-; CHECKT2D: D_ctor_base:
+; CHECKT2D-LABEL: D_ctor_base:
; CHECKT2D-NOT: mov {{r[0-9]+}}, r0
; CHECKT2D: blx _B_ctor_complete
; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
@@ -93,9 +93,9 @@ entry:
define %struct.E* @E_ctor_base(%struct.E* %this, i32 %x) {
entry:
-; CHECKELF: E_ctor_base:
+; CHECKELF-LABEL: E_ctor_base:
; CHECKELF-NOT: b B_ctor_complete
-; CHECKT2D: E_ctor_base:
+; CHECKT2D-LABEL: E_ctor_base:
; CHECKT2D-NOT: b.w _B_ctor_complete
%b = getelementptr inbounds %struct.E* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
diff --git a/test/CodeGen/ARM/thumb1-varalloc.ll b/test/CodeGen/ARM/thumb1-varalloc.ll
index aa88ae0c1a86..e07e8aab77aa 100644
--- a/test/CodeGen/ARM/thumb1-varalloc.ll
+++ b/test/CodeGen/ARM/thumb1-varalloc.ll
@@ -39,4 +39,4 @@ bb3:
}
declare noalias i8* @strdup(i8* nocapture) nounwind
-declare i32 @_called_func(i8*, i32*) nounwind \ No newline at end of file
+declare i32 @_called_func(i8*, i32*) nounwind
diff --git a/test/CodeGen/ARM/thumb2-it-block.ll b/test/CodeGen/ARM/thumb2-it-block.ll
index a25352c0f03d..47c5dccd6fee 100644
--- a/test/CodeGen/ARM/thumb2-it-block.ll
+++ b/test/CodeGen/ARM/thumb2-it-block.ll
@@ -1,14 +1,15 @@
; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8 | FileCheck %s
; PR11107
define i32 @test(i32 %a, i32 %b) {
entry:
; CHECK: cmp
; CHECK-NEXT: it mi
-; CHECK-NEXT: rsbmi
+; CHECK-NEXT: rsb{{s?}}mi
; CHECK-NEXT: cmp
; CHECK-NEXT: it mi
-; CHECK-NEXT: rsbmi
+; CHECK-NEXT: rsb{{s?}}mi
%cmp1 = icmp slt i32 %a, 0
%sub1 = sub nsw i32 0, %a
%abs1 = select i1 %cmp1, i32 %sub1, i32 %a
diff --git a/test/CodeGen/ARM/tls-models.ll b/test/CodeGen/ARM/tls-models.ll
index a5f3c9005af0..ccc9032313b8 100644
--- a/test/CodeGen/ARM/tls-models.ll
+++ b/test/CodeGen/ARM/tls-models.ll
@@ -21,9 +21,9 @@ entry:
ret i32* @external_gd
; Non-PIC code can use initial-exec, PIC code has to use general dynamic.
- ; CHECK-NONPIC: f1:
+ ; CHECK-NONPIC-LABEL: f1:
; CHECK-NONPIC: external_gd(gottpoff)
- ; CHECK-PIC: f1:
+ ; CHECK-PIC-LABEL: f1:
; CHECK-PIC: external_gd(tlsgd)
}
@@ -33,9 +33,9 @@ entry:
; Non-PIC code can use local exec, PIC code can use local dynamic,
; but that is not implemented, so falls back to general dynamic.
- ; CHECK-NONPIC: f2:
+ ; CHECK-NONPIC-LABEL: f2:
; CHECK-NONPIC: internal_gd(tpoff)
- ; CHECK-PIC: f2:
+ ; CHECK-PIC-LABEL: f2:
; CHECK-PIC: internal_gd(tlsgd)
}
@@ -48,9 +48,9 @@ entry:
; Non-PIC code can use initial exec, PIC should use local dynamic,
; but that is not implemented, so falls back to general dynamic.
- ; CHECK-NONPIC: f3:
+ ; CHECK-NONPIC-LABEL: f3:
; CHECK-NONPIC: external_ld(gottpoff)
- ; CHECK-PIC: f3:
+ ; CHECK-PIC-LABEL: f3:
; CHECK-PIC: external_ld(tlsgd)
}
@@ -60,9 +60,9 @@ entry:
; Non-PIC code can use local exec, PIC code can use local dynamic,
; but that is not implemented, so it falls back to general dynamic.
- ; CHECK-NONPIC: f4:
+ ; CHECK-NONPIC-LABEL: f4:
; CHECK-NONPIC: internal_ld(tpoff)
- ; CHECK-PIC: f4:
+ ; CHECK-PIC-LABEL: f4:
; CHECK-PIC: internal_ld(tlsgd)
}
@@ -74,9 +74,9 @@ entry:
ret i32* @external_ie
; Non-PIC and PIC code will use initial exec as specified.
- ; CHECK-NONPIC: f5:
+ ; CHECK-NONPIC-LABEL: f5:
; CHECK-NONPIC: external_ie(gottpoff)
- ; CHECK-PIC: f5:
+ ; CHECK-PIC-LABEL: f5:
; CHECK-PIC: external_ie(gottpoff)
}
@@ -85,9 +85,9 @@ entry:
ret i32* @internal_ie
; Non-PIC code can use local exec, PIC code use initial exec as specified.
- ; CHECK-NONPIC: f6:
+ ; CHECK-NONPIC-LABEL: f6:
; CHECK-NONPIC: internal_ie(tpoff)
- ; CHECK-PIC: f6:
+ ; CHECK-PIC-LABEL: f6:
; CHECK-PIC: internal_ie(gottpoff)
}
@@ -99,9 +99,9 @@ entry:
ret i32* @external_le
; Non-PIC and PIC code will use local exec as specified.
- ; CHECK-NONPIC: f7:
+ ; CHECK-NONPIC-LABEL: f7:
; CHECK-NONPIC: external_le(tpoff)
- ; CHECK-PIC: f7:
+ ; CHECK-PIC-LABEL: f7:
; CHECK-PIC: external_le(tpoff)
}
@@ -110,8 +110,8 @@ entry:
ret i32* @internal_le
; Non-PIC and PIC code will use local exec as specified.
- ; CHECK-NONPIC: f8:
+ ; CHECK-NONPIC-LABEL: f8:
; CHECK-NONPIC: internal_le(tpoff)
- ; CHECK-PIC: f8:
+ ; CHECK-PIC-LABEL: f8:
; CHECK-PIC: internal_le(tpoff)
}
diff --git a/test/CodeGen/ARM/tls2.ll b/test/CodeGen/ARM/tls2.ll
index 57370c4de1c2..f04812583114 100644
--- a/test/CodeGen/ARM/tls2.ll
+++ b/test/CodeGen/ARM/tls2.ll
@@ -6,10 +6,10 @@
@i = external thread_local global i32 ; <i32*> [#uses=2]
define i32 @f() {
-; CHECK-NONPIC: f:
+; CHECK-NONPIC-LABEL: f:
; CHECK-NONPIC: ldr {{r.}}, [pc, {{r.}}]
; CHECK-NONPIC: i(gottpoff)
-; CHECK-PIC: f:
+; CHECK-PIC-LABEL: f:
; CHECK-PIC: __tls_get_addr
entry:
%tmp1 = load i32* @i ; <i32> [#uses=1]
@@ -17,10 +17,10 @@ entry:
}
define i32* @g() {
-; CHECK-NONPIC: g:
+; CHECK-NONPIC-LABEL: g:
; CHECK-NONPIC: ldr {{r.}}, [pc, {{r.}}]
; CHECK-NONPIC: i(gottpoff)
-; CHECK-PIC: g:
+; CHECK-PIC-LABEL: g:
; CHECK-PIC: __tls_get_addr
entry:
ret i32* @i
diff --git a/test/CodeGen/ARM/trap.ll b/test/CodeGen/ARM/trap.ll
index a4e3c3c0efa9..6cb26e331ba0 100644
--- a/test/CodeGen/ARM/trap.ll
+++ b/test/CodeGen/ARM/trap.ll
@@ -9,13 +9,13 @@
; RUN: llc -mtriple=armv7 -mattr=+nacl-trap -filetype=obj %s -o - \
; RUN: | llvm-objdump -disassemble -triple armv7 -mattr=+nacl-trap - \
; RUN: | FileCheck %s -check-prefix=ENCODING-NACL
-; RUN: llc -fast-isel -mtriple=armv7-unknown-nacl -filetype=obj %s -o - \
+; RUN: llc -verify-machineinstrs -fast-isel -mtriple=armv7-unknown-nacl -filetype=obj %s -o - \
; RUN: | llvm-objdump -disassemble -triple armv7-unknown-nacl - \
; RUN: | FileCheck %s -check-prefix=ENCODING-NACL
; RUN: llc -mtriple=armv7 -filetype=obj %s -o - \
; RUN: | llvm-objdump -disassemble -triple armv7 - \
; RUN: | FileCheck %s -check-prefix=ENCODING-ALL
-; RUN: llc -fast-isel -mtriple=armv7 -filetype=obj %s -o - \
+; RUN: llc -verify-machineinstrs -fast-isel -mtriple=armv7 -filetype=obj %s -o - \
; RUN: | llvm-objdump -disassemble -triple armv7 - \
; RUN: | FileCheck %s -check-prefix=ENCODING-ALL
; rdar://7961298
@@ -23,10 +23,10 @@
define void @t() nounwind {
entry:
-; INSTR: t:
+; INSTR-LABEL: t:
; INSTR: trap
-; FUNC: t:
+; FUNC-LABEL: t:
; FUNC: bl __trap
; ENCODING-NACL: f0 de fe e7
@@ -39,10 +39,10 @@ entry:
define void @t2() nounwind {
entry:
-; INSTR: t2:
+; INSTR-LABEL: t2:
; INSTR: trap
-; FUNC: t2:
+; FUNC-LABEL: t2:
; FUNC: bl __trap
; ENCODING-NACL: f0 de fe e7
diff --git a/test/CodeGen/ARM/twoaddrinstr.ll b/test/CodeGen/ARM/twoaddrinstr.ll
index fc2aa1e568e2..2172f6b9a6cd 100644
--- a/test/CodeGen/ARM/twoaddrinstr.ll
+++ b/test/CodeGen/ARM/twoaddrinstr.ll
@@ -3,7 +3,7 @@
define void @PR13378() nounwind {
; This was orriginally a crasher trying to schedule the instructions.
-; CHECK: PR13378:
+; CHECK-LABEL: PR13378:
; CHECK: vld1.32
; CHECK-NEXT: vst1.32
; CHECK-NEXT: vst1.32
diff --git a/test/CodeGen/ARM/umulo-32.ll b/test/CodeGen/ARM/umulo-32.ll
index fa5c0168fefe..19875ce94071 100644
--- a/test/CodeGen/ARM/umulo-32.ll
+++ b/test/CodeGen/ARM/umulo-32.ll
@@ -2,8 +2,8 @@
%umul.ty = type { i32, i1 }
-define i32 @func(i32 %a) nounwind {
-; CHECK: func
+define i32 @test1(i32 %a) nounwind {
+; CHECK: test1:
; CHECK: muldi3
%tmp0 = tail call %umul.ty @llvm.umul.with.overflow.i32(i32 %a, i32 37)
%tmp1 = extractvalue %umul.ty %tmp0, 0
@@ -13,8 +13,8 @@ define i32 @func(i32 %a) nounwind {
declare %umul.ty @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
-define i32 @f(i32 %argc, i8** %argv) ssp {
-; CHECK: func
+define i32 @test2(i32 %argc, i8** %argv) ssp {
+; CHECK: test2:
; CHECK: str r0
; CHECK: movs r2
; CHECK: mov r1
diff --git a/test/CodeGen/ARM/unaligned_load_store.ll b/test/CodeGen/ARM/unaligned_load_store.ll
index 3064202eb3fe..e7ff63f8dbb0 100644
--- a/test/CodeGen/ARM/unaligned_load_store.ll
+++ b/test/CodeGen/ARM/unaligned_load_store.ll
@@ -7,7 +7,7 @@
define void @t(i8* nocapture %a, i8* nocapture %b) nounwind {
entry:
-; EXPANDED: t:
+; EXPANDED-LABEL: t:
; EXPANDED: ldrb [[R2:r[0-9]+]]
; EXPANDED: ldrb [[R3:r[0-9]+]]
; EXPANDED: ldrb [[R12:r[0-9]+]]
@@ -17,7 +17,7 @@ entry:
; EXPANDED: strb [[R3]]
; EXPANDED: strb [[R2]]
-; UNALIGNED: t:
+; UNALIGNED-LABEL: t:
; UNALIGNED: ldr r1
; UNALIGNED: str r1
@@ -30,13 +30,13 @@ entry:
define void @hword(double* %a, double* %b) nounwind {
entry:
-; EXPANDED: hword:
+; EXPANDED-LABEL: hword:
; EXPANDED-NOT: vld1
; EXPANDED: ldrh
; EXPANDED-NOT: str1
; EXPANDED: strh
-; UNALIGNED: hword:
+; UNALIGNED-LABEL: hword:
; UNALIGNED: vld1.16
; UNALIGNED: vst1.16
%tmp = load double* %a, align 2
@@ -46,13 +46,13 @@ entry:
define void @byte(double* %a, double* %b) nounwind {
entry:
-; EXPANDED: byte:
+; EXPANDED-LABEL: byte:
; EXPANDED-NOT: vld1
; EXPANDED: ldrb
; EXPANDED-NOT: str1
; EXPANDED: strb
-; UNALIGNED: byte:
+; UNALIGNED-LABEL: byte:
; UNALIGNED: vld1.8
; UNALIGNED: vst1.8
%tmp = load double* %a, align 1
@@ -62,11 +62,11 @@ entry:
define void @byte_word_ops(i32* %a, i32* %b) nounwind {
entry:
-; EXPANDED: byte_word_ops:
+; EXPANDED-LABEL: byte_word_ops:
; EXPANDED: ldrb
; EXPANDED: strb
-; UNALIGNED: byte_word_ops:
+; UNALIGNED-LABEL: byte_word_ops:
; UNALIGNED-NOT: ldrb
; UNALIGNED: ldr
; UNALIGNED-NOT: strb
diff --git a/test/CodeGen/ARM/unaligned_load_store_vector.ll b/test/CodeGen/ARM/unaligned_load_store_vector.ll
index 25ae6517937b..968a2c7ad0bb 100644
--- a/test/CodeGen/ARM/unaligned_load_store_vector.ll
+++ b/test/CodeGen/ARM/unaligned_load_store_vector.ll
@@ -4,7 +4,7 @@
;SIZE = 64
;TYPE = <8 x i8>
define void @v64_v8i8_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v8i8_1:
+;CHECK-LABEL: v64_v8i8_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -22,7 +22,7 @@ entry:
;SIZE = 64
;TYPE = <4 x i16>
define void @v64_v4i16_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v4i16_1:
+;CHECK-LABEL: v64_v4i16_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -40,7 +40,7 @@ entry:
;SIZE = 64
;TYPE = <2 x i32>
define void @v64_v2i32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v2i32_1:
+;CHECK-LABEL: v64_v2i32_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -58,7 +58,7 @@ entry:
;SIZE = 64
;TYPE = <2 x float>
define void @v64_v2f32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v2f32_1:
+;CHECK-LABEL: v64_v2f32_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -76,7 +76,7 @@ entry:
;SIZE = 128
;TYPE = <16 x i8>
define void @v128_v16i8_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v16i8_1:
+;CHECK-LABEL: v128_v16i8_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -94,7 +94,7 @@ entry:
;SIZE = 128
;TYPE = <8 x i16>
define void @v128_v8i16_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v8i16_1:
+;CHECK-LABEL: v128_v8i16_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -112,7 +112,7 @@ entry:
;SIZE = 128
;TYPE = <4 x i32>
define void @v128_v4i32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v4i32_1:
+;CHECK-LABEL: v128_v4i32_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -130,7 +130,7 @@ entry:
;SIZE = 128
;TYPE = <2 x i64>
define void @v128_v2i64_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v2i64_1:
+;CHECK-LABEL: v128_v2i64_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -148,7 +148,7 @@ entry:
;SIZE = 128
;TYPE = <4 x float>
define void @v128_v4f32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v4f32_1:
+;CHECK-LABEL: v128_v4f32_1:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -166,7 +166,7 @@ entry:
;SIZE = 64
;TYPE = <8 x i8>
define void @v64_v8i8_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v8i8_2:
+;CHECK-LABEL: v64_v8i8_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -184,7 +184,7 @@ entry:
;SIZE = 64
;TYPE = <4 x i16>
define void @v64_v4i16_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v4i16_2:
+;CHECK-LABEL: v64_v4i16_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -202,7 +202,7 @@ entry:
;SIZE = 64
;TYPE = <2 x i32>
define void @v64_v2i32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v2i32_2:
+;CHECK-LABEL: v64_v2i32_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -220,7 +220,7 @@ entry:
;SIZE = 64
;TYPE = <2 x float>
define void @v64_v2f32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v2f32_2:
+;CHECK-LABEL: v64_v2f32_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -238,7 +238,7 @@ entry:
;SIZE = 128
;TYPE = <16 x i8>
define void @v128_v16i8_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v16i8_2:
+;CHECK-LABEL: v128_v16i8_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -256,7 +256,7 @@ entry:
;SIZE = 128
;TYPE = <8 x i16>
define void @v128_v8i16_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v8i16_2:
+;CHECK-LABEL: v128_v8i16_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -274,7 +274,7 @@ entry:
;SIZE = 128
;TYPE = <4 x i32>
define void @v128_v4i32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v4i32_2:
+;CHECK-LABEL: v128_v4i32_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -292,7 +292,7 @@ entry:
;SIZE = 128
;TYPE = <2 x i64>
define void @v128_v2i64_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v2i64_2:
+;CHECK-LABEL: v128_v2i64_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -310,7 +310,7 @@ entry:
;SIZE = 128
;TYPE = <4 x float>
define void @v128_v4f32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v4f32_2:
+;CHECK-LABEL: v128_v4f32_2:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -328,7 +328,7 @@ entry:
;SIZE = 64
;TYPE = <8 x i8>
define void @v64_v8i8_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v8i8_4:
+;CHECK-LABEL: v64_v8i8_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -346,7 +346,7 @@ entry:
;SIZE = 64
;TYPE = <4 x i16>
define void @v64_v4i16_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v4i16_4:
+;CHECK-LABEL: v64_v4i16_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -364,7 +364,7 @@ entry:
;SIZE = 64
;TYPE = <2 x i32>
define void @v64_v2i32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v2i32_4:
+;CHECK-LABEL: v64_v2i32_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -382,7 +382,7 @@ entry:
;SIZE = 64
;TYPE = <2 x float>
define void @v64_v2f32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v64_v2f32_4:
+;CHECK-LABEL: v64_v2f32_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -400,7 +400,7 @@ entry:
;SIZE = 128
;TYPE = <16 x i8>
define void @v128_v16i8_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v16i8_4:
+;CHECK-LABEL: v128_v16i8_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -418,7 +418,7 @@ entry:
;SIZE = 128
;TYPE = <8 x i16>
define void @v128_v8i16_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v8i16_4:
+;CHECK-LABEL: v128_v8i16_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -436,7 +436,7 @@ entry:
;SIZE = 128
;TYPE = <4 x i32>
define void @v128_v4i32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v4i32_4:
+;CHECK-LABEL: v128_v4i32_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -454,7 +454,7 @@ entry:
;SIZE = 128
;TYPE = <2 x i64>
define void @v128_v2i64_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v2i64_4:
+;CHECK-LABEL: v128_v2i64_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
@@ -472,7 +472,7 @@ entry:
;SIZE = 128
;TYPE = <4 x float>
define void @v128_v4f32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
-;CHECK: v128_v4f32_4:
+;CHECK-LABEL: v128_v4f32_4:
entry:
%po = getelementptr i8* %out, i32 0
%pi = getelementptr i8* %in, i32 0
diff --git a/test/CodeGen/ARM/undef-sext.ll b/test/CodeGen/ARM/undef-sext.ll
index 2c28da3b6461..c6d76d0017df 100644
--- a/test/CodeGen/ARM/undef-sext.ll
+++ b/test/CodeGen/ARM/undef-sext.ll
@@ -4,7 +4,7 @@
define i32 @t(i32* %a) nounwind {
entry:
-; CHECK: t:
+; CHECK-LABEL: t:
; CHECK: ldr r0, [r0]
; CHECK: bx lr
%0 = sext i16 undef to i32
diff --git a/test/CodeGen/ARM/unwind-init.ll b/test/CodeGen/ARM/unwind-init.ll
new file mode 100644
index 000000000000..1e12f5510823
--- /dev/null
+++ b/test/CodeGen/ARM/unwind-init.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=armv7-unknown-linux-gnueabi < %s | FileCheck %s
+; Check that all callee-saved registers are saved and restored in functions
+; that call __builtin_unwind_init(). This is its undocumented behavior in gcc,
+; and it is used in compiling libgcc_eh.
+; See also PR8541
+
+declare void @llvm.eh.unwind.init()
+
+define void @calls_unwind_init() {
+ call void @llvm.eh.unwind.init()
+ ret void
+}
+
+; CHECK-LABEL: calls_unwind_init:
+; CHECK: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; CHECK: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
diff --git a/test/CodeGen/ARM/v1-constant-fold.ll b/test/CodeGen/ARM/v1-constant-fold.ll
index b86d5db29c4b..eb49a81ab763 100644
--- a/test/CodeGen/ARM/v1-constant-fold.ll
+++ b/test/CodeGen/ARM/v1-constant-fold.ll
@@ -2,7 +2,7 @@
; PR15611. Check that we don't crash when constant folding v1i32 types.
-; CHECK: foo:
+; CHECK-LABEL: foo:
define void @foo(i32 %arg) {
bb:
%tmp = insertelement <4 x i32> undef, i32 %arg, i32 0
diff --git a/test/CodeGen/ARM/va_arg.ll b/test/CodeGen/ARM/va_arg.ll
index af477b40a781..f18b49822847 100644
--- a/test/CodeGen/ARM/va_arg.ll
+++ b/test/CodeGen/ARM/va_arg.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -pre-RA-sched=source | FileCheck %s
; Test that we correctly align elements when using va_arg
-; CHECK: test1:
+; CHECK-LABEL: test1:
; CHECK-NOT: bfc
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
; CHECK: bfc [[REG]], #0, #3
@@ -17,7 +17,7 @@ entry:
ret i64 %0
}
-; CHECK: test2:
+; CHECK-LABEL: test2:
; CHECK-NOT: bfc
; CHECK: add [[REG:(r[0-9]+)|(lr)]], {{(r[0-9]+)|(lr)}}, #7
; CHECK: bfc [[REG]], #0, #3
diff --git a/test/CodeGen/ARM/vaba.ll b/test/CodeGen/ARM/vaba.ll
index 4fe1c434799d..97139e9b6ccc 100644
--- a/test/CodeGen/ARM/vaba.ll
+++ b/test/CodeGen/ARM/vaba.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vabas8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabas8:
+;CHECK-LABEL: vabas8:
;CHECK: vaba.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -12,7 +12,7 @@ define <8 x i8> @vabas8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i16> @vabas16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabas16:
+;CHECK-LABEL: vabas16:
;CHECK: vaba.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -23,7 +23,7 @@ define <4 x i16> @vabas16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i32> @vabas32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabas32:
+;CHECK-LABEL: vabas32:
;CHECK: vaba.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -34,7 +34,7 @@ define <2 x i32> @vabas32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <8 x i8> @vabau8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabau8:
+;CHECK-LABEL: vabau8:
;CHECK: vaba.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -45,7 +45,7 @@ define <8 x i8> @vabau8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i16> @vabau16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabau16:
+;CHECK-LABEL: vabau16:
;CHECK: vaba.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -56,7 +56,7 @@ define <4 x i16> @vabau16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i32> @vabau32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabau32:
+;CHECK-LABEL: vabau32:
;CHECK: vaba.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -67,7 +67,7 @@ define <2 x i32> @vabau32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <16 x i8> @vabaQs8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: vabaQs8:
+;CHECK-LABEL: vabaQs8:
;CHECK: vaba.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -78,7 +78,7 @@ define <16 x i8> @vabaQs8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
}
define <8 x i16> @vabaQs16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vabaQs16:
+;CHECK-LABEL: vabaQs16:
;CHECK: vaba.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -89,7 +89,7 @@ define <8 x i16> @vabaQs16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind
}
define <4 x i32> @vabaQs32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vabaQs32:
+;CHECK-LABEL: vabaQs32:
;CHECK: vaba.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -100,7 +100,7 @@ define <4 x i32> @vabaQs32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind
}
define <16 x i8> @vabaQu8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: vabaQu8:
+;CHECK-LABEL: vabaQu8:
;CHECK: vaba.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -111,7 +111,7 @@ define <16 x i8> @vabaQu8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
}
define <8 x i16> @vabaQu16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vabaQu16:
+;CHECK-LABEL: vabaQu16:
;CHECK: vaba.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -122,7 +122,7 @@ define <8 x i16> @vabaQu16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind
}
define <4 x i32> @vabaQu32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vabaQu32:
+;CHECK-LABEL: vabaQu32:
;CHECK: vaba.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -149,7 +149,7 @@ declare <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16>, <8 x i16>) nounwind read
declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i16> @vabals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabals8:
+;CHECK-LABEL: vabals8:
;CHECK: vabal.s8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -161,7 +161,7 @@ define <8 x i16> @vabals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i32> @vabals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabals16:
+;CHECK-LABEL: vabals16:
;CHECK: vabal.s16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -173,7 +173,7 @@ define <4 x i32> @vabals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i64> @vabals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabals32:
+;CHECK-LABEL: vabals32:
;CHECK: vabal.s32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
@@ -185,7 +185,7 @@ define <2 x i64> @vabals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <8 x i16> @vabalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabalu8:
+;CHECK-LABEL: vabalu8:
;CHECK: vabal.u8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -197,7 +197,7 @@ define <8 x i16> @vabalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i32> @vabalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabalu16:
+;CHECK-LABEL: vabalu16:
;CHECK: vabal.u16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -209,7 +209,7 @@ define <4 x i32> @vabalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i64> @vabalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabalu32:
+;CHECK-LABEL: vabalu32:
;CHECK: vabal.u32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
diff --git a/test/CodeGen/ARM/vabd.ll b/test/CodeGen/ARM/vabd.ll
index 9ec734fa7641..2eb6d935de83 100644
--- a/test/CodeGen/ARM/vabd.ll
+++ b/test/CodeGen/ARM/vabd.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vabds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabds8:
+;CHECK-LABEL: vabds8:
;CHECK: vabd.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vabds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vabds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabds16:
+;CHECK-LABEL: vabds16:
;CHECK: vabd.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vabds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vabds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabds32:
+;CHECK-LABEL: vabds32:
;CHECK: vabd.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vabds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vabdu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabdu8:
+;CHECK-LABEL: vabdu8:
;CHECK: vabd.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -37,7 +37,7 @@ define <8 x i8> @vabdu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vabdu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabdu16:
+;CHECK-LABEL: vabdu16:
;CHECK: vabd.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <4 x i16> @vabdu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vabdu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabdu32:
+;CHECK-LABEL: vabdu32:
;CHECK: vabd.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -55,7 +55,7 @@ define <2 x i32> @vabdu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vabdf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vabdf32:
+;CHECK-LABEL: vabdf32:
;CHECK: vabd.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -64,7 +64,7 @@ define <2 x float> @vabdf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vabdQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vabdQs8:
+;CHECK-LABEL: vabdQs8:
;CHECK: vabd.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -73,7 +73,7 @@ define <16 x i8> @vabdQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vabdQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vabdQs16:
+;CHECK-LABEL: vabdQs16:
;CHECK: vabd.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -82,7 +82,7 @@ define <8 x i16> @vabdQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vabdQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vabdQs32:
+;CHECK-LABEL: vabdQs32:
;CHECK: vabd.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -91,7 +91,7 @@ define <4 x i32> @vabdQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vabdQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vabdQu8:
+;CHECK-LABEL: vabdQu8:
;CHECK: vabd.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -100,7 +100,7 @@ define <16 x i8> @vabdQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vabdQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vabdQu16:
+;CHECK-LABEL: vabdQu16:
;CHECK: vabd.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -109,7 +109,7 @@ define <8 x i16> @vabdQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vabdQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vabdQu32:
+;CHECK-LABEL: vabdQu32:
;CHECK: vabd.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -118,7 +118,7 @@ define <4 x i32> @vabdQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vabdQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vabdQf32:
+;CHECK-LABEL: vabdQf32:
;CHECK: vabd.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -147,7 +147,7 @@ declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>) nounwind read
declare <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float>, <4 x float>) nounwind readnone
define <8 x i16> @vabdls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabdls8:
+;CHECK-LABEL: vabdls8:
;CHECK: vabdl.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -157,7 +157,7 @@ define <8 x i16> @vabdls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vabdls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabdls16:
+;CHECK-LABEL: vabdls16:
;CHECK: vabdl.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -167,7 +167,7 @@ define <4 x i32> @vabdls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vabdls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabdls32:
+;CHECK-LABEL: vabdls32:
;CHECK: vabdl.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -177,7 +177,7 @@ define <2 x i64> @vabdls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vabdlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabdlu8:
+;CHECK-LABEL: vabdlu8:
;CHECK: vabdl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -187,7 +187,7 @@ define <8 x i16> @vabdlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vabdlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabdlu16:
+;CHECK-LABEL: vabdlu16:
;CHECK: vabdl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -197,7 +197,7 @@ define <4 x i32> @vabdlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vabdlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabdlu32:
+;CHECK-LABEL: vabdlu32:
;CHECK: vabdl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
diff --git a/test/CodeGen/ARM/vabs.ll b/test/CodeGen/ARM/vabs.ll
index 18ba61f81e65..96dd38ec2e68 100644
--- a/test/CodeGen/ARM/vabs.ll
+++ b/test/CodeGen/ARM/vabs.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vabss8(<8 x i8>* %A) nounwind {
-;CHECK: vabss8:
+;CHECK-LABEL: vabss8:
;CHECK: vabs.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1)
@@ -9,7 +9,7 @@ define <8 x i8> @vabss8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vabss16(<4 x i16>* %A) nounwind {
-;CHECK: vabss16:
+;CHECK-LABEL: vabss16:
;CHECK: vabs.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1)
@@ -17,7 +17,7 @@ define <4 x i16> @vabss16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vabss32(<2 x i32>* %A) nounwind {
-;CHECK: vabss32:
+;CHECK-LABEL: vabss32:
;CHECK: vabs.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1)
@@ -25,7 +25,7 @@ define <2 x i32> @vabss32(<2 x i32>* %A) nounwind {
}
define <2 x float> @vabsf32(<2 x float>* %A) nounwind {
-;CHECK: vabsf32:
+;CHECK-LABEL: vabsf32:
;CHECK: vabs.f32
%tmp1 = load <2 x float>* %A
%tmp2 = call <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float> %tmp1)
@@ -33,7 +33,7 @@ define <2 x float> @vabsf32(<2 x float>* %A) nounwind {
}
define <16 x i8> @vabsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vabsQs8:
+;CHECK-LABEL: vabsQs8:
;CHECK: vabs.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1)
@@ -41,7 +41,7 @@ define <16 x i8> @vabsQs8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vabsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vabsQs16:
+;CHECK-LABEL: vabsQs16:
;CHECK: vabs.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %tmp1)
@@ -49,7 +49,7 @@ define <8 x i16> @vabsQs16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vabsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vabsQs32:
+;CHECK-LABEL: vabsQs32:
;CHECK: vabs.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %tmp1)
@@ -57,7 +57,7 @@ define <4 x i32> @vabsQs32(<4 x i32>* %A) nounwind {
}
define <4 x float> @vabsQf32(<4 x float>* %A) nounwind {
-;CHECK: vabsQf32:
+;CHECK-LABEL: vabsQf32:
;CHECK: vabs.f32
%tmp1 = load <4 x float>* %A
%tmp2 = call <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float> %tmp1)
@@ -75,7 +75,7 @@ declare <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32>) nounwind readnone
declare <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float>) nounwind readnone
define <8 x i8> @vqabss8(<8 x i8>* %A) nounwind {
-;CHECK: vqabss8:
+;CHECK-LABEL: vqabss8:
;CHECK: vqabs.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8> %tmp1)
@@ -83,7 +83,7 @@ define <8 x i8> @vqabss8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vqabss16(<4 x i16>* %A) nounwind {
-;CHECK: vqabss16:
+;CHECK-LABEL: vqabss16:
;CHECK: vqabs.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16> %tmp1)
@@ -91,7 +91,7 @@ define <4 x i16> @vqabss16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vqabss32(<2 x i32>* %A) nounwind {
-;CHECK: vqabss32:
+;CHECK-LABEL: vqabss32:
;CHECK: vqabs.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32> %tmp1)
@@ -99,7 +99,7 @@ define <2 x i32> @vqabss32(<2 x i32>* %A) nounwind {
}
define <16 x i8> @vqabsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vqabsQs8:
+;CHECK-LABEL: vqabsQs8:
;CHECK: vqabs.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %tmp1)
@@ -107,7 +107,7 @@ define <16 x i8> @vqabsQs8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vqabsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vqabsQs16:
+;CHECK-LABEL: vqabsQs16:
;CHECK: vqabs.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16> %tmp1)
@@ -115,7 +115,7 @@ define <8 x i16> @vqabsQs16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vqabsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vqabsQs32:
+;CHECK-LABEL: vqabsQs32:
;CHECK: vqabs.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32> %tmp1)
diff --git a/test/CodeGen/ARM/vadd.ll b/test/CodeGen/ARM/vadd.ll
index a830e968ff78..fcb5408272f4 100644
--- a/test/CodeGen/ARM/vadd.ll
+++ b/test/CodeGen/ARM/vadd.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddi8:
+;CHECK-LABEL: vaddi8:
;CHECK: vadd.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddi16:
+;CHECK-LABEL: vaddi16:
;CHECK: vadd.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddi32:
+;CHECK-LABEL: vaddi32:
;CHECK: vadd.i32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vaddi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vaddi64:
+;CHECK-LABEL: vaddi64:
;CHECK: vadd.i64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -37,7 +37,7 @@ define <1 x i64> @vaddi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <2 x float> @vaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vaddf32:
+;CHECK-LABEL: vaddf32:
;CHECK: vadd.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -46,7 +46,7 @@ define <2 x float> @vaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vaddQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vaddQi8:
+;CHECK-LABEL: vaddQi8:
;CHECK: vadd.i8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -55,7 +55,7 @@ define <16 x i8> @vaddQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vaddQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vaddQi16:
+;CHECK-LABEL: vaddQi16:
;CHECK: vadd.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -64,7 +64,7 @@ define <8 x i16> @vaddQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vaddQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vaddQi32:
+;CHECK-LABEL: vaddQi32:
;CHECK: vadd.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -73,7 +73,7 @@ define <4 x i32> @vaddQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vaddQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vaddQi64:
+;CHECK-LABEL: vaddQi64:
;CHECK: vadd.i64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -82,7 +82,7 @@ define <2 x i64> @vaddQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <4 x float> @vaddQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vaddQf32:
+;CHECK-LABEL: vaddQf32:
;CHECK: vadd.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -90,39 +90,8 @@ define <4 x float> @vaddQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
ret <4 x float> %tmp3
}
-define <8 x i8> @vaddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vaddhni16:
-;CHECK: vaddhn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vaddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vaddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vaddhni32:
-;CHECK: vaddhn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vaddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vaddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vaddhni64:
-;CHECK: vaddhn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vaddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vaddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vaddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vaddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
define <8 x i8> @vraddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vraddhni16:
+;CHECK-LABEL: vraddhni16:
;CHECK: vraddhn.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -131,7 +100,7 @@ define <8 x i8> @vraddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i16> @vraddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vraddhni32:
+;CHECK-LABEL: vraddhni32:
;CHECK: vraddhn.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -140,7 +109,7 @@ define <4 x i16> @vraddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i32> @vraddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vraddhni64:
+;CHECK-LABEL: vraddhni64:
;CHECK: vraddhn.i64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -152,8 +121,35 @@ declare <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16>, <8 x i16>) nounwind rea
declare <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
declare <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
+define <8 x i8> @vaddhni16_natural(<8 x i16> %A, <8 x i16> %B) nounwind {
+; CHECK-LABEL: vaddhni16_natural:
+; CHECK: vaddhn.i16
+ %sum = add <8 x i16> %A, %B
+ %shift = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %trunc = trunc <8 x i16> %shift to <8 x i8>
+ ret <8 x i8> %trunc
+}
+
+define <4 x i16> @vaddhni32_natural(<4 x i32> %A, <4 x i32> %B) nounwind {
+; CHECK-LABEL: vaddhni32_natural:
+; CHECK: vaddhn.i32
+ %sum = add <4 x i32> %A, %B
+ %shift = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ %trunc = trunc <4 x i32> %shift to <4 x i16>
+ ret <4 x i16> %trunc
+}
+
+define <2 x i32> @vaddhni64_natural(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK-LABEL: vaddhni64_natural:
+; CHECK: vaddhn.i64
+ %sum = add <2 x i64> %A, %B
+ %shift = lshr <2 x i64> %sum, <i64 32, i64 32>
+ %trunc = trunc <2 x i64> %shift to <2 x i32>
+ ret <2 x i32> %trunc
+}
+
define <8 x i16> @vaddls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddls8:
+;CHECK-LABEL: vaddls8:
;CHECK: vaddl.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -164,7 +160,7 @@ define <8 x i16> @vaddls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddls16:
+;CHECK-LABEL: vaddls16:
;CHECK: vaddl.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -175,7 +171,7 @@ define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddls32:
+;CHECK-LABEL: vaddls32:
;CHECK: vaddl.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -186,7 +182,7 @@ define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddlu8:
+;CHECK-LABEL: vaddlu8:
;CHECK: vaddl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -197,7 +193,7 @@ define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddlu16:
+;CHECK-LABEL: vaddlu16:
;CHECK: vaddl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -208,7 +204,7 @@ define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddlu32:
+;CHECK-LABEL: vaddlu32:
;CHECK: vaddl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -219,7 +215,7 @@ define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddws8:
+;CHECK-LABEL: vaddws8:
;CHECK: vaddw.s8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -229,7 +225,7 @@ define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddws16:
+;CHECK-LABEL: vaddws16:
;CHECK: vaddw.s16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -239,7 +235,7 @@ define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddws32:
+;CHECK-LABEL: vaddws32:
;CHECK: vaddw.s32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
@@ -249,7 +245,7 @@ define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddwu8:
+;CHECK-LABEL: vaddwu8:
;CHECK: vaddw.u8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -259,7 +255,7 @@ define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddwu16:
+;CHECK-LABEL: vaddwu16:
;CHECK: vaddw.u16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -269,7 +265,7 @@ define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddwu32:
+;CHECK-LABEL: vaddwu32:
;CHECK: vaddw.u32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
diff --git a/test/CodeGen/ARM/vbits.ll b/test/CodeGen/ARM/vbits.ll
index 51f9bdf9718b..7b48441958f6 100644
--- a/test/CodeGen/ARM/vbits.ll
+++ b/test/CodeGen/ARM/vbits.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon -mcpu=cortex-a8 | FileCheck %s
define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_andi8:
+;CHECK-LABEL: v_andi8:
;CHECK: vand
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_andi16:
+;CHECK-LABEL: v_andi16:
;CHECK: vand
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_andi32:
+;CHECK-LABEL: v_andi32:
;CHECK: vand
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_andi64:
+;CHECK-LABEL: v_andi64:
;CHECK: vand
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -37,7 +37,7 @@ define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_andQi8:
+;CHECK-LABEL: v_andQi8:
;CHECK: vand
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -46,7 +46,7 @@ define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_andQi16:
+;CHECK-LABEL: v_andQi16:
;CHECK: vand
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -55,7 +55,7 @@ define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_andQi32:
+;CHECK-LABEL: v_andQi32:
;CHECK: vand
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -64,7 +64,7 @@ define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_andQi64:
+;CHECK-LABEL: v_andQi64:
;CHECK: vand
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -73,7 +73,7 @@ define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_bici8:
+;CHECK-LABEL: v_bici8:
;CHECK: vbic
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -83,7 +83,7 @@ define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_bici16:
+;CHECK-LABEL: v_bici16:
;CHECK: vbic
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -93,7 +93,7 @@ define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_bici32:
+;CHECK-LABEL: v_bici32:
;CHECK: vbic
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -103,7 +103,7 @@ define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_bici64:
+;CHECK-LABEL: v_bici64:
;CHECK: vbic
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -113,7 +113,7 @@ define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_bicQi8:
+;CHECK-LABEL: v_bicQi8:
;CHECK: vbic
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -123,7 +123,7 @@ define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_bicQi16:
+;CHECK-LABEL: v_bicQi16:
;CHECK: vbic
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -133,7 +133,7 @@ define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_bicQi32:
+;CHECK-LABEL: v_bicQi32:
;CHECK: vbic
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -143,7 +143,7 @@ define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_bicQi64:
+;CHECK-LABEL: v_bicQi64:
;CHECK: vbic
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -153,7 +153,7 @@ define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_eori8:
+;CHECK-LABEL: v_eori8:
;CHECK: veor
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -162,7 +162,7 @@ define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_eori16:
+;CHECK-LABEL: v_eori16:
;CHECK: veor
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -171,7 +171,7 @@ define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_eori32:
+;CHECK-LABEL: v_eori32:
;CHECK: veor
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -180,7 +180,7 @@ define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_eori64:
+;CHECK-LABEL: v_eori64:
;CHECK: veor
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -189,7 +189,7 @@ define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_eorQi8:
+;CHECK-LABEL: v_eorQi8:
;CHECK: veor
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -198,7 +198,7 @@ define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_eorQi16:
+;CHECK-LABEL: v_eorQi16:
;CHECK: veor
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -207,7 +207,7 @@ define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_eorQi32:
+;CHECK-LABEL: v_eorQi32:
;CHECK: veor
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -216,7 +216,7 @@ define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_eorQi64:
+;CHECK-LABEL: v_eorQi64:
;CHECK: veor
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -225,7 +225,7 @@ define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind {
-;CHECK: v_mvni8:
+;CHECK-LABEL: v_mvni8:
;CHECK: vmvn
%tmp1 = load <8 x i8>* %A
%tmp2 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
@@ -233,7 +233,7 @@ define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind {
-;CHECK: v_mvni16:
+;CHECK-LABEL: v_mvni16:
;CHECK: vmvn
%tmp1 = load <4 x i16>* %A
%tmp2 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
@@ -241,7 +241,7 @@ define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind {
-;CHECK: v_mvni32:
+;CHECK-LABEL: v_mvni32:
;CHECK: vmvn
%tmp1 = load <2 x i32>* %A
%tmp2 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
@@ -249,7 +249,7 @@ define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind {
}
define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind {
-;CHECK: v_mvni64:
+;CHECK-LABEL: v_mvni64:
;CHECK: vmvn
%tmp1 = load <1 x i64>* %A
%tmp2 = xor <1 x i64> %tmp1, < i64 -1 >
@@ -257,7 +257,7 @@ define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind {
}
define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind {
-;CHECK: v_mvnQi8:
+;CHECK-LABEL: v_mvnQi8:
;CHECK: vmvn
%tmp1 = load <16 x i8>* %A
%tmp2 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
@@ -265,7 +265,7 @@ define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind {
-;CHECK: v_mvnQi16:
+;CHECK-LABEL: v_mvnQi16:
;CHECK: vmvn
%tmp1 = load <8 x i16>* %A
%tmp2 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
@@ -273,7 +273,7 @@ define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind {
-;CHECK: v_mvnQi32:
+;CHECK-LABEL: v_mvnQi32:
;CHECK: vmvn
%tmp1 = load <4 x i32>* %A
%tmp2 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 >
@@ -281,7 +281,7 @@ define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind {
}
define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind {
-;CHECK: v_mvnQi64:
+;CHECK-LABEL: v_mvnQi64:
;CHECK: vmvn
%tmp1 = load <2 x i64>* %A
%tmp2 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 >
@@ -289,7 +289,7 @@ define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind {
}
define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_orri8:
+;CHECK-LABEL: v_orri8:
;CHECK: vorr
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -298,7 +298,7 @@ define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_orri16:
+;CHECK-LABEL: v_orri16:
;CHECK: vorr
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -307,7 +307,7 @@ define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_orri32:
+;CHECK-LABEL: v_orri32:
;CHECK: vorr
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -316,7 +316,7 @@ define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_orri64:
+;CHECK-LABEL: v_orri64:
;CHECK: vorr
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -325,7 +325,7 @@ define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_orrQi8:
+;CHECK-LABEL: v_orrQi8:
;CHECK: vorr
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -334,7 +334,7 @@ define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_orrQi16:
+;CHECK-LABEL: v_orrQi16:
;CHECK: vorr
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -343,7 +343,7 @@ define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_orrQi32:
+;CHECK-LABEL: v_orrQi32:
;CHECK: vorr
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -352,7 +352,7 @@ define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_orrQi64:
+;CHECK-LABEL: v_orrQi64:
;CHECK: vorr
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -361,7 +361,7 @@ define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_orni8:
+;CHECK-LABEL: v_orni8:
;CHECK: vorn
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -371,7 +371,7 @@ define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_orni16:
+;CHECK-LABEL: v_orni16:
;CHECK: vorn
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -381,7 +381,7 @@ define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_orni32:
+;CHECK-LABEL: v_orni32:
;CHECK: vorn
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -391,7 +391,7 @@ define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_orni64:
+;CHECK-LABEL: v_orni64:
;CHECK: vorn
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -401,7 +401,7 @@ define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_ornQi8:
+;CHECK-LABEL: v_ornQi8:
;CHECK: vorn
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -411,7 +411,7 @@ define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_ornQi16:
+;CHECK-LABEL: v_ornQi16:
;CHECK: vorn
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -421,7 +421,7 @@ define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_ornQi32:
+;CHECK-LABEL: v_ornQi32:
;CHECK: vorn
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -431,7 +431,7 @@ define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_ornQi64:
+;CHECK-LABEL: v_ornQi64:
;CHECK: vorn
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -441,7 +441,7 @@ define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vtsti8:
+;CHECK-LABEL: vtsti8:
;CHECK: vtst.8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -452,7 +452,7 @@ define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vtsti16:
+;CHECK-LABEL: vtsti16:
;CHECK: vtst.16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -463,7 +463,7 @@ define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vtsti32:
+;CHECK-LABEL: vtsti32:
;CHECK: vtst.32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -474,7 +474,7 @@ define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vtstQi8:
+;CHECK-LABEL: vtstQi8:
;CHECK: vtst.8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -485,7 +485,7 @@ define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vtstQi16:
+;CHECK-LABEL: vtstQi16:
;CHECK: vtst.16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -496,7 +496,7 @@ define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vtstQi32:
+;CHECK-LABEL: vtstQi32:
;CHECK: vtst.32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -507,7 +507,7 @@ define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <8 x i8> @v_orrimm(<8 x i8>* %A) nounwind {
-; CHECK: v_orrimm:
+; CHECK-LABEL: v_orrimm:
; CHECK-NOT: vmov
; CHECK-NOT: vmvn
; CHECK: vorr
@@ -527,7 +527,7 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %A) nounwind {
}
define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
-; CHECK: v_bicimm:
+; CHECK-LABEL: v_bicimm:
; CHECK-NOT: vmov
; CHECK-NOT: vmvn
; CHECK: vbic
@@ -537,7 +537,7 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A) nounwind {
}
define <16 x i8> @v_bicimmQ(<16 x i8>* %A) nounwind {
-; CHECK: v_bicimmQ:
+; CHECK-LABEL: v_bicimmQ:
; CHECK-NOT: vmov
; CHECK-NOT: vmvn
; CHECK: vbic
diff --git a/test/CodeGen/ARM/vbsl-constant.ll b/test/CodeGen/ARM/vbsl-constant.ll
index ffda0a51bdd0..5e033fe2a647 100644
--- a/test/CodeGen/ARM/vbsl-constant.ll
+++ b/test/CodeGen/ARM/vbsl-constant.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm-apple-ios -mattr=+neon | FileCheck %s
define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: v_bsli8:
+;CHECK-LABEL: v_bsli8:
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
@@ -15,7 +15,7 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: v_bsli16:
+;CHECK-LABEL: v_bsli16:
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
@@ -29,7 +29,7 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: v_bsli32:
+;CHECK-LABEL: v_bsli32:
;CHECK: vldr
;CHECK: vldr
;CHECK: vbsl
@@ -43,7 +43,7 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
-;CHECK: v_bsli64:
+;CHECK-LABEL: v_bsli64:
;CHECK: vldr
;CHECK: vldr
;CHECK: vldr
@@ -58,7 +58,7 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind
}
define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: v_bslQi8:
+;CHECK-LABEL: v_bslQi8:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vbsl
@@ -72,7 +72,7 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
}
define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: v_bslQi16:
+;CHECK-LABEL: v_bslQi16:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vbsl
@@ -86,7 +86,7 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin
}
define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: v_bslQi32:
+;CHECK-LABEL: v_bslQi32:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vbsl
@@ -100,7 +100,7 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin
}
define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
-;CHECK: v_bslQi64:
+;CHECK-LABEL: v_bslQi64:
;CHECK: vld1.32
;CHECK: vld1.32
;CHECK: vld1.64
diff --git a/test/CodeGen/ARM/vbsl.ll b/test/CodeGen/ARM/vbsl.ll
index 750fb0de5383..1e53e51f8bb0 100644
--- a/test/CodeGen/ARM/vbsl.ll
+++ b/test/CodeGen/ARM/vbsl.ll
@@ -3,7 +3,7 @@
; rdar://12471808
define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: v_bsli8:
+;CHECK-LABEL: v_bsli8:
;CHECK: vbsl
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -16,7 +16,7 @@ define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: v_bsli16:
+;CHECK-LABEL: v_bsli16:
;CHECK: vbsl
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -29,7 +29,7 @@ define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: v_bsli32:
+;CHECK-LABEL: v_bsli32:
;CHECK: vbsl
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -42,7 +42,7 @@ define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
-;CHECK: v_bsli64:
+;CHECK-LABEL: v_bsli64:
;CHECK: vbsl
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -55,7 +55,7 @@ define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind
}
define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: v_bslQi8:
+;CHECK-LABEL: v_bslQi8:
;CHECK: vbsl
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -68,7 +68,7 @@ define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind
}
define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: v_bslQi16:
+;CHECK-LABEL: v_bslQi16:
;CHECK: vbsl
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -81,7 +81,7 @@ define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwin
}
define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: v_bslQi32:
+;CHECK-LABEL: v_bslQi32:
;CHECK: vbsl
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -94,7 +94,7 @@ define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwin
}
define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
-;CHECK: v_bslQi64:
+;CHECK-LABEL: v_bslQi64:
;CHECK: vbsl
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -107,84 +107,84 @@ define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwin
}
define <8 x i8> @f1(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind readnone optsize ssp {
-; CHECK: f1:
+; CHECK-LABEL: f1:
; CHECK: vbsl
%vbsl.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) nounwind
ret <8 x i8> %vbsl.i
}
define <4 x i16> @f2(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind readnone optsize ssp {
-; CHECK: f2:
+; CHECK-LABEL: f2:
; CHECK: vbsl
%vbsl3.i = tail call <4 x i16> @llvm.arm.neon.vbsl.v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c) nounwind
ret <4 x i16> %vbsl3.i
}
define <2 x i32> @f3(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind readnone optsize ssp {
-; CHECK: f3:
+; CHECK-LABEL: f3:
; CHECK: vbsl
%vbsl3.i = tail call <2 x i32> @llvm.arm.neon.vbsl.v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) nounwind
ret <2 x i32> %vbsl3.i
}
define <2 x float> @f4(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind readnone optsize ssp {
-; CHECK: f4:
+; CHECK-LABEL: f4:
; CHECK: vbsl
%vbsl4.i = tail call <2 x float> @llvm.arm.neon.vbsl.v2f32(<2 x float> %a, <2 x float> %b, <2 x float> %c) nounwind
ret <2 x float> %vbsl4.i
}
define <16 x i8> @g1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind readnone optsize ssp {
-; CHECK: g1:
+; CHECK-LABEL: g1:
; CHECK: vbsl
%vbsl.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) nounwind
ret <16 x i8> %vbsl.i
}
define <8 x i16> @g2(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone optsize ssp {
-; CHECK: g2:
+; CHECK-LABEL: g2:
; CHECK: vbsl
%vbsl3.i = tail call <8 x i16> @llvm.arm.neon.vbsl.v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind
ret <8 x i16> %vbsl3.i
}
define <4 x i32> @g3(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone optsize ssp {
-; CHECK: g3:
+; CHECK-LABEL: g3:
; CHECK: vbsl
%vbsl3.i = tail call <4 x i32> @llvm.arm.neon.vbsl.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind
ret <4 x i32> %vbsl3.i
}
define <4 x float> @g4(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind readnone optsize ssp {
-; CHECK: g4:
+; CHECK-LABEL: g4:
; CHECK: vbsl
%vbsl4.i = tail call <4 x float> @llvm.arm.neon.vbsl.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) nounwind
ret <4 x float> %vbsl4.i
}
define <1 x i64> @test_vbsl_s64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
-; CHECK: test_vbsl_s64:
+; CHECK-LABEL: test_vbsl_s64:
; CHECK: vbsl d
%vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
ret <1 x i64> %vbsl3.i
}
define <1 x i64> @test_vbsl_u64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind readnone optsize ssp {
-; CHECK: test_vbsl_u64:
+; CHECK-LABEL: test_vbsl_u64:
; CHECK: vbsl d
%vbsl3.i = tail call <1 x i64> @llvm.arm.neon.vbsl.v1i64(<1 x i64> %a, <1 x i64> %b, <1 x i64> %c) nounwind
ret <1 x i64> %vbsl3.i
}
define <2 x i64> @test_vbslq_s64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
-; CHECK: test_vbslq_s64:
+; CHECK-LABEL: test_vbslq_s64:
; CHECK: vbsl q
%vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
ret <2 x i64> %vbsl3.i
}
define <2 x i64> @test_vbslq_u64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind readnone optsize ssp {
-; CHECK: test_vbslq_u64:
+; CHECK-LABEL: test_vbslq_u64:
; CHECK: vbsl q
%vbsl3.i = tail call <2 x i64> @llvm.arm.neon.vbsl.v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) nounwind
ret <2 x i64> %vbsl3.i
diff --git a/test/CodeGen/ARM/vceq.ll b/test/CodeGen/ARM/vceq.ll
index 051c349a06a4..0a1f2ebe4f83 100644
--- a/test/CodeGen/ARM/vceq.ll
+++ b/test/CodeGen/ARM/vceq.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vceqi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vceqi8:
+;CHECK-LABEL: vceqi8:
;CHECK: vceq.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -11,7 +11,7 @@ define <8 x i8> @vceqi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vceqi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vceqi16:
+;CHECK-LABEL: vceqi16:
;CHECK: vceq.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -21,7 +21,7 @@ define <4 x i16> @vceqi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vceqi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vceqi32:
+;CHECK-LABEL: vceqi32:
;CHECK: vceq.i32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -31,7 +31,7 @@ define <2 x i32> @vceqi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x i32> @vceqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vceqf32:
+;CHECK-LABEL: vceqf32:
;CHECK: vceq.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -41,7 +41,7 @@ define <2 x i32> @vceqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vceqQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vceqQi8:
+;CHECK-LABEL: vceqQi8:
;CHECK: vceq.i8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -51,7 +51,7 @@ define <16 x i8> @vceqQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vceqQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vceqQi16:
+;CHECK-LABEL: vceqQi16:
;CHECK: vceq.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -61,7 +61,7 @@ define <8 x i16> @vceqQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vceqQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vceqQi32:
+;CHECK-LABEL: vceqQi32:
;CHECK: vceq.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -71,7 +71,7 @@ define <4 x i32> @vceqQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x i32> @vceqQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vceqQf32:
+;CHECK-LABEL: vceqQf32:
;CHECK: vceq.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -81,7 +81,7 @@ define <4 x i32> @vceqQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
}
define <8 x i8> @vceqi8Z(<8 x i8>* %A) nounwind {
-;CHECK: vceqi8Z:
+;CHECK-LABEL: vceqi8Z:
;CHECK-NOT: vmov
;CHECK-NOT: vmvn
;CHECK: vceq.i8
diff --git a/test/CodeGen/ARM/vcge.ll b/test/CodeGen/ARM/vcge.ll
index bf5f0b9efb2f..81a59dbdfe90 100644
--- a/test/CodeGen/ARM/vcge.ll
+++ b/test/CodeGen/ARM/vcge.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vcges8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcges8:
+;CHECK-LABEL: vcges8:
;CHECK: vcge.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -11,7 +11,7 @@ define <8 x i8> @vcges8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vcges16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcges16:
+;CHECK-LABEL: vcges16:
;CHECK: vcge.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -21,7 +21,7 @@ define <4 x i16> @vcges16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vcges32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcges32:
+;CHECK-LABEL: vcges32:
;CHECK: vcge.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -31,7 +31,7 @@ define <2 x i32> @vcges32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vcgeu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcgeu8:
+;CHECK-LABEL: vcgeu8:
;CHECK: vcge.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -41,7 +41,7 @@ define <8 x i8> @vcgeu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vcgeu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcgeu16:
+;CHECK-LABEL: vcgeu16:
;CHECK: vcge.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -51,7 +51,7 @@ define <4 x i16> @vcgeu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vcgeu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcgeu32:
+;CHECK-LABEL: vcgeu32:
;CHECK: vcge.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -61,7 +61,7 @@ define <2 x i32> @vcgeu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x i32> @vcgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcgef32:
+;CHECK-LABEL: vcgef32:
;CHECK: vcge.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -71,7 +71,7 @@ define <2 x i32> @vcgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vcgeQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgeQs8:
+;CHECK-LABEL: vcgeQs8:
;CHECK: vcge.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -81,7 +81,7 @@ define <16 x i8> @vcgeQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vcgeQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgeQs16:
+;CHECK-LABEL: vcgeQs16:
;CHECK: vcge.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -91,7 +91,7 @@ define <8 x i16> @vcgeQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vcgeQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgeQs32:
+;CHECK-LABEL: vcgeQs32:
;CHECK: vcge.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -101,7 +101,7 @@ define <4 x i32> @vcgeQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vcgeQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgeQu8:
+;CHECK-LABEL: vcgeQu8:
;CHECK: vcge.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -111,7 +111,7 @@ define <16 x i8> @vcgeQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vcgeQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgeQu16:
+;CHECK-LABEL: vcgeQu16:
;CHECK: vcge.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -121,7 +121,7 @@ define <8 x i16> @vcgeQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vcgeQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgeQu32:
+;CHECK-LABEL: vcgeQu32:
;CHECK: vcge.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -131,7 +131,7 @@ define <4 x i32> @vcgeQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x i32> @vcgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vcgeQf32:
+;CHECK-LABEL: vcgeQf32:
;CHECK: vcge.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -141,7 +141,7 @@ define <4 x i32> @vcgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
}
define <2 x i32> @vacgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vacgef32:
+;CHECK-LABEL: vacgef32:
;CHECK: vacge.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -150,7 +150,7 @@ define <2 x i32> @vacgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <4 x i32> @vacgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vacgeQf32:
+;CHECK-LABEL: vacgeQf32:
;CHECK: vacge.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -162,7 +162,7 @@ declare <2 x i32> @llvm.arm.neon.vacged(<2 x float>, <2 x float>) nounwind readn
declare <4 x i32> @llvm.arm.neon.vacgeq(<4 x float>, <4 x float>) nounwind readnone
define <8 x i8> @vcgei8Z(<8 x i8>* %A) nounwind {
-;CHECK: vcgei8Z:
+;CHECK-LABEL: vcgei8Z:
;CHECK-NOT: vmov
;CHECK-NOT: vmvn
;CHECK: vcge.s8
@@ -173,7 +173,7 @@ define <8 x i8> @vcgei8Z(<8 x i8>* %A) nounwind {
}
define <8 x i8> @vclei8Z(<8 x i8>* %A) nounwind {
-;CHECK: vclei8Z:
+;CHECK-LABEL: vclei8Z:
;CHECK-NOT: vmov
;CHECK-NOT: vmvn
;CHECK: vcle.s8
@@ -187,7 +187,7 @@ define <8 x i8> @vclei8Z(<8 x i8>* %A) nounwind {
; Floating-point comparisons against zero produce results with integer
; elements, not floating-point elements.
define void @test_vclez_fp() nounwind optsize {
-;CHECK: test_vclez_fp
+;CHECK-LABEL: test_vclez_fp:
;CHECK: vcle.f32
entry:
%0 = fcmp ole <4 x float> undef, zeroinitializer
diff --git a/test/CodeGen/ARM/vcgt.ll b/test/CodeGen/ARM/vcgt.ll
index 2243bac91fb1..056866fe994b 100644
--- a/test/CodeGen/ARM/vcgt.ll
+++ b/test/CodeGen/ARM/vcgt.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
define <8 x i8> @vcgts8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcgts8:
+;CHECK-LABEL: vcgts8:
;CHECK: vcgt.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -12,7 +12,7 @@ define <8 x i8> @vcgts8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vcgts16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcgts16:
+;CHECK-LABEL: vcgts16:
;CHECK: vcgt.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -22,7 +22,7 @@ define <4 x i16> @vcgts16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vcgts32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcgts32:
+;CHECK-LABEL: vcgts32:
;CHECK: vcgt.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -32,7 +32,7 @@ define <2 x i32> @vcgts32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vcgtu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcgtu8:
+;CHECK-LABEL: vcgtu8:
;CHECK: vcgt.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -42,7 +42,7 @@ define <8 x i8> @vcgtu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vcgtu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcgtu16:
+;CHECK-LABEL: vcgtu16:
;CHECK: vcgt.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -52,7 +52,7 @@ define <4 x i16> @vcgtu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vcgtu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcgtu32:
+;CHECK-LABEL: vcgtu32:
;CHECK: vcgt.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -62,7 +62,7 @@ define <2 x i32> @vcgtu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x i32> @vcgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcgtf32:
+;CHECK-LABEL: vcgtf32:
;CHECK: vcgt.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -72,7 +72,7 @@ define <2 x i32> @vcgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vcgtQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgtQs8:
+;CHECK-LABEL: vcgtQs8:
;CHECK: vcgt.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -82,7 +82,7 @@ define <16 x i8> @vcgtQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vcgtQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgtQs16:
+;CHECK-LABEL: vcgtQs16:
;CHECK: vcgt.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -92,7 +92,7 @@ define <8 x i16> @vcgtQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vcgtQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgtQs32:
+;CHECK-LABEL: vcgtQs32:
;CHECK: vcgt.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -102,7 +102,7 @@ define <4 x i32> @vcgtQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vcgtQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgtQu8:
+;CHECK-LABEL: vcgtQu8:
;CHECK: vcgt.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -112,7 +112,7 @@ define <16 x i8> @vcgtQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vcgtQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgtQu16:
+;CHECK-LABEL: vcgtQu16:
;CHECK: vcgt.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -122,7 +122,7 @@ define <8 x i16> @vcgtQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vcgtQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgtQu32:
+;CHECK-LABEL: vcgtQu32:
;CHECK: vcgt.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -132,7 +132,7 @@ define <4 x i32> @vcgtQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x i32> @vcgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vcgtQf32:
+;CHECK-LABEL: vcgtQf32:
;CHECK: vcgt.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -142,7 +142,7 @@ define <4 x i32> @vcgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
}
define <2 x i32> @vacgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vacgtf32:
+;CHECK-LABEL: vacgtf32:
;CHECK: vacgt.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -151,7 +151,7 @@ define <2 x i32> @vacgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vacgtQf32:
+;CHECK-LABEL: vacgtQf32:
;CHECK: vacgt.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -161,7 +161,7 @@ define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
; rdar://7923010
define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vcgt_zext:
+;CHECK-LABEL: vcgt_zext:
;CHECK: vmov.i32 [[Q0:q[0-9]+]], #0x1
;CHECK: vcgt.f32 [[Q1:q[0-9]+]]
;CHECK: vand [[Q2:q[0-9]+]], [[Q1]], [[Q0]]
@@ -176,7 +176,7 @@ declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>) nounwind readn
declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>) nounwind readnone
define <8 x i8> @vcgti8Z(<8 x i8>* %A) nounwind {
-;CHECK: vcgti8Z:
+;CHECK-LABEL: vcgti8Z:
;CHECK-NOT: vmov
;CHECK-NOT: vmvn
;CHECK: vcgt.s8
@@ -187,7 +187,7 @@ define <8 x i8> @vcgti8Z(<8 x i8>* %A) nounwind {
}
define <8 x i8> @vclti8Z(<8 x i8>* %A) nounwind {
-;CHECK: vclti8Z:
+;CHECK-LABEL: vclti8Z:
;CHECK-NOT: vmov
;CHECK-NOT: vmvn
;CHECK: vclt.s8
diff --git a/test/CodeGen/ARM/vcnt.ll b/test/CodeGen/ARM/vcnt.ll
index 9f55c24b4029..0b539799833d 100644
--- a/test/CodeGen/ARM/vcnt.ll
+++ b/test/CodeGen/ARM/vcnt.ll
@@ -2,7 +2,7 @@
; NB: this tests vcnt, vclz, and vcls
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
-;CHECK: vcnt8:
+;CHECK-LABEL: vcnt8:
;CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
@@ -10,7 +10,7 @@ define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
}
define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
-;CHECK: vcntQ8:
+;CHECK-LABEL: vcntQ8:
;CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
@@ -21,7 +21,7 @@ declare <8 x i8> @llvm.ctpop.v8i8(<8 x i8>) nounwind readnone
declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>) nounwind readnone
define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
-;CHECK: vclz8:
+;CHECK-LABEL: vclz8:
;CHECK: vclz.i8 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
@@ -29,7 +29,7 @@ define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
-;CHECK: vclz16:
+;CHECK-LABEL: vclz16:
;CHECK: vclz.i16 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
@@ -37,7 +37,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
-;CHECK: vclz32:
+;CHECK-LABEL: vclz32:
;CHECK: vclz.i32 {{d[0-9]+}}, {{d[0-9]+}}
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
@@ -45,7 +45,7 @@ define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
}
define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
-;CHECK: vclzQ8:
+;CHECK-LABEL: vclzQ8:
;CHECK: vclz.i8 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
@@ -53,7 +53,7 @@ define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
-;CHECK: vclzQ16:
+;CHECK-LABEL: vclzQ16:
;CHECK: vclz.i16 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
@@ -61,7 +61,7 @@ define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
-;CHECK: vclzQ32:
+;CHECK-LABEL: vclzQ32:
;CHECK: vclz.i32 {{q[0-9]+}}, {{q[0-9]+}}
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
@@ -77,7 +77,7 @@ declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone
declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
-;CHECK: vclss8:
+;CHECK-LABEL: vclss8:
;CHECK: vcls.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
@@ -85,7 +85,7 @@ define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
-;CHECK: vclss16:
+;CHECK-LABEL: vclss16:
;CHECK: vcls.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
@@ -93,7 +93,7 @@ define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
-;CHECK: vclss32:
+;CHECK-LABEL: vclss32:
;CHECK: vcls.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
@@ -101,7 +101,7 @@ define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
}
define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vclsQs8:
+;CHECK-LABEL: vclsQs8:
;CHECK: vcls.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
@@ -109,7 +109,7 @@ define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vclsQs16:
+;CHECK-LABEL: vclsQs16:
;CHECK: vcls.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
@@ -117,7 +117,7 @@ define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vclsQs32:
+;CHECK-LABEL: vclsQs32:
;CHECK: vcls.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
diff --git a/test/CodeGen/ARM/vcvt-cost.ll b/test/CodeGen/ARM/vcvt-cost.ll
index 0d45c40b8814..5e56a5b34cf0 100644
--- a/test/CodeGen/ARM/vcvt-cost.ll
+++ b/test/CodeGen/ARM/vcvt-cost.ll
@@ -4,7 +4,7 @@
; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
%T0_5 = type <8 x i8>
%T1_5 = type <8 x i32>
-; CHECK: func_cvt5:
+; CHECK-LABEL: func_cvt5:
define void @func_cvt5(%T0_5* %loadaddr, %T1_5* %storeaddr) {
; CHECK: vmovl.s8
; CHECK: vmovl.s16
@@ -20,7 +20,7 @@ define void @func_cvt5(%T0_5* %loadaddr, %T1_5* %storeaddr) {
;; is improved the cost needs to change.
%TA0_5 = type <8 x i8>
%TA1_5 = type <8 x i32>
-; CHECK: func_cvt1:
+; CHECK-LABEL: func_cvt1:
define void @func_cvt1(%TA0_5* %loadaddr, %TA1_5* %storeaddr) {
; CHECK: vmovl.u8
; CHECK: vmovl.u16
@@ -35,7 +35,7 @@ define void @func_cvt1(%TA0_5* %loadaddr, %TA1_5* %storeaddr) {
%T0_51 = type <8 x i32>
%T1_51 = type <8 x i8>
-; CHECK: func_cvt51:
+; CHECK-LABEL: func_cvt51:
define void @func_cvt51(%T0_51* %loadaddr, %T1_51* %storeaddr) {
; CHECK: vmovn.i32
; CHECK: vmovn.i32
@@ -50,7 +50,7 @@ define void @func_cvt51(%T0_51* %loadaddr, %T1_51* %storeaddr) {
%TT0_5 = type <16 x i8>
%TT1_5 = type <16 x i32>
-; CHECK: func_cvt52:
+; CHECK-LABEL: func_cvt52:
define void @func_cvt52(%TT0_5* %loadaddr, %TT1_5* %storeaddr) {
; CHECK: vmovl.s16
; CHECK: vmovl.s16
@@ -67,7 +67,7 @@ define void @func_cvt52(%TT0_5* %loadaddr, %TT1_5* %storeaddr) {
;; is improved the cost needs to change.
%TTA0_5 = type <16 x i8>
%TTA1_5 = type <16 x i32>
-; CHECK: func_cvt12:
+; CHECK-LABEL: func_cvt12:
define void @func_cvt12(%TTA0_5* %loadaddr, %TTA1_5* %storeaddr) {
; CHECK: vmovl.u16
; CHECK: vmovl.u16
@@ -83,7 +83,7 @@ define void @func_cvt12(%TTA0_5* %loadaddr, %TTA1_5* %storeaddr) {
%TT0_51 = type <16 x i32>
%TT1_51 = type <16 x i8>
-; CHECK: func_cvt512:
+; CHECK-LABEL: func_cvt512:
define void @func_cvt512(%TT0_51* %loadaddr, %TT1_51* %storeaddr) {
; CHECK: vmovn.i32
; CHECK: vmovn.i32
@@ -99,7 +99,7 @@ define void @func_cvt512(%TT0_51* %loadaddr, %TT1_51* %storeaddr) {
ret void
}
-; CHECK: sext_v4i16_v4i64:
+; CHECK-LABEL: sext_v4i16_v4i64:
define void @sext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
; CHECK: vmovl.s32
; CHECK: vmovl.s32
@@ -111,7 +111,7 @@ define void @sext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
ret void
}
-; CHECK: zext_v4i16_v4i64:
+; CHECK-LABEL: zext_v4i16_v4i64:
define void @zext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
; CHECK: vmovl.u32
; CHECK: vmovl.u32
@@ -123,7 +123,7 @@ define void @zext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
ret void
}
-; CHECK: sext_v8i16_v8i64:
+; CHECK-LABEL: sext_v8i16_v8i64:
define void @sext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
; CHECK: vmovl.s32
; CHECK: vmovl.s32
@@ -137,7 +137,7 @@ define void @sext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
ret void
}
-; CHECK: zext_v8i16_v8i64:
+; CHECK-LABEL: zext_v8i16_v8i64:
define void @zext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
; CHECK: vmovl.u32
; CHECK: vmovl.u32
diff --git a/test/CodeGen/ARM/vcvt-v8.ll b/test/CodeGen/ARM/vcvt-v8.ll
new file mode 100644
index 000000000000..c449009e1e1f
--- /dev/null
+++ b/test/CodeGen/ARM/vcvt-v8.ll
@@ -0,0 +1,145 @@
+; RUN: llc < %s -mtriple=armv8 -mattr=+neon | FileCheck %s
+define <4 x i32> @vcvtasq(<4 x float>* %A) {
+; CHECK: vcvtasq
+; CHECK: vcvta.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtasd(<2 x float>* %A) {
+; CHECK: vcvtasd
+; CHECK: vcvta.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtnsq(<4 x float>* %A) {
+; CHECK: vcvtnsq
+; CHECK: vcvtn.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtnsd(<2 x float>* %A) {
+; CHECK: vcvtnsd
+; CHECK: vcvtn.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtpsq(<4 x float>* %A) {
+; CHECK: vcvtpsq
+; CHECK: vcvtp.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtpsd(<2 x float>* %A) {
+; CHECK: vcvtpsd
+; CHECK: vcvtp.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtmsq(<4 x float>* %A) {
+; CHECK: vcvtmsq
+; CHECK: vcvtm.s32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtmsd(<2 x float>* %A) {
+; CHECK: vcvtmsd
+; CHECK: vcvtm.s32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtauq(<4 x float>* %A) {
+; CHECK: vcvtauq
+; CHECK: vcvta.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtaud(<2 x float>* %A) {
+; CHECK: vcvtaud
+; CHECK: vcvta.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtnuq(<4 x float>* %A) {
+; CHECK: vcvtnuq
+; CHECK: vcvtn.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtnud(<2 x float>* %A) {
+; CHECK: vcvtnud
+; CHECK: vcvtn.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtpuq(<4 x float>* %A) {
+; CHECK: vcvtpuq
+; CHECK: vcvtp.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtpud(<2 x float>* %A) {
+; CHECK: vcvtpud
+; CHECK: vcvtp.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+define <4 x i32> @vcvtmuq(<4 x float>* %A) {
+; CHECK: vcvtmuq
+; CHECK: vcvtm.u32.f32 q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float> %tmp1)
+ ret <4 x i32> %tmp2
+}
+
+define <2 x i32> @vcvtmud(<2 x float>* %A) {
+; CHECK: vcvtmud
+; CHECK: vcvtm.u32.f32 d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float> %tmp1)
+ ret <2 x i32> %tmp2
+}
+
+declare <4 x i32> @llvm.arm.neon.vcvtas.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtas.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtns.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtns.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtps.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtps.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtms.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtms.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtau.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtau.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtnu.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtnu.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtpu.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtpu.v2i32.v2f32(<2 x float>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vcvtmu.v4i32.v4f32(<4 x float>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vcvtmu.v2i32.v2f32(<2 x float>) nounwind readnone
diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll
index c078f493094b..4f17dc559480 100644
--- a/test/CodeGen/ARM/vcvt.ll
+++ b/test/CodeGen/ARM/vcvt.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon,+fp16 | FileCheck %s
define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_f32tos32:
+;CHECK-LABEL: vcvt_f32tos32:
;CHECK: vcvt.s32.f32
%tmp1 = load <2 x float>* %A
%tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
@@ -9,7 +9,7 @@ define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
}
define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_f32tou32:
+;CHECK-LABEL: vcvt_f32tou32:
;CHECK: vcvt.u32.f32
%tmp1 = load <2 x float>* %A
%tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
@@ -17,7 +17,7 @@ define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
}
define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_s32tof32:
+;CHECK-LABEL: vcvt_s32tof32:
;CHECK: vcvt.f32.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = sitofp <2 x i32> %tmp1 to <2 x float>
@@ -25,7 +25,7 @@ define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
}
define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_u32tof32:
+;CHECK-LABEL: vcvt_u32tof32:
;CHECK: vcvt.f32.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = uitofp <2 x i32> %tmp1 to <2 x float>
@@ -33,7 +33,7 @@ define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
}
define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_f32tos32:
+;CHECK-LABEL: vcvtQ_f32tos32:
;CHECK: vcvt.s32.f32
%tmp1 = load <4 x float>* %A
%tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
@@ -41,7 +41,7 @@ define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
}
define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_f32tou32:
+;CHECK-LABEL: vcvtQ_f32tou32:
;CHECK: vcvt.u32.f32
%tmp1 = load <4 x float>* %A
%tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
@@ -49,7 +49,7 @@ define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
}
define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_s32tof32:
+;CHECK-LABEL: vcvtQ_s32tof32:
;CHECK: vcvt.f32.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = sitofp <4 x i32> %tmp1 to <4 x float>
@@ -57,7 +57,7 @@ define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
}
define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_u32tof32:
+;CHECK-LABEL: vcvtQ_u32tof32:
;CHECK: vcvt.f32.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = uitofp <4 x i32> %tmp1 to <4 x float>
@@ -65,7 +65,7 @@ define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_n_f32tos32:
+;CHECK-LABEL: vcvt_n_f32tos32:
;CHECK: vcvt.s32.f32
%tmp1 = load <2 x float>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %tmp1, i32 1)
@@ -73,7 +73,7 @@ define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
}
define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_n_f32tou32:
+;CHECK-LABEL: vcvt_n_f32tou32:
;CHECK: vcvt.u32.f32
%tmp1 = load <2 x float>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %tmp1, i32 1)
@@ -81,7 +81,7 @@ define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
}
define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_n_s32tof32:
+;CHECK-LABEL: vcvt_n_s32tof32:
;CHECK: vcvt.f32.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
@@ -89,7 +89,7 @@ define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
}
define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_n_u32tof32:
+;CHECK-LABEL: vcvt_n_u32tof32:
;CHECK: vcvt.f32.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
@@ -102,7 +102,7 @@ declare <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) nounwi
declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_n_f32tos32:
+;CHECK-LABEL: vcvtQ_n_f32tos32:
;CHECK: vcvt.s32.f32
%tmp1 = load <4 x float>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %tmp1, i32 1)
@@ -110,7 +110,7 @@ define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
}
define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_n_f32tou32:
+;CHECK-LABEL: vcvtQ_n_f32tou32:
;CHECK: vcvt.u32.f32
%tmp1 = load <4 x float>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %tmp1, i32 1)
@@ -118,7 +118,7 @@ define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
}
define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_n_s32tof32:
+;CHECK-LABEL: vcvtQ_n_s32tof32:
;CHECK: vcvt.f32.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
@@ -126,7 +126,7 @@ define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
}
define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_n_u32tof32:
+;CHECK-LABEL: vcvtQ_n_u32tof32:
;CHECK: vcvt.f32.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
@@ -139,7 +139,7 @@ declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwi
declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
-;CHECK: vcvt_f16tof32:
+;CHECK-LABEL: vcvt_f16tof32:
;CHECK: vcvt.f32.f16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16> %tmp1)
@@ -147,7 +147,7 @@ define <4 x float> @vcvt_f16tof32(<4 x i16>* %A) nounwind {
}
define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
-;CHECK: vcvt_f32tof16:
+;CHECK-LABEL: vcvt_f32tof16:
;CHECK: vcvt.f16.f32
%tmp1 = load <4 x float>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float> %tmp1)
@@ -156,3 +156,44 @@ define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) nounwind readnone
declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone
+
+
+define <4 x i16> @fix_float_to_i16(<4 x float> %in) {
+; CHECK-LABEL: fix_float_to_i16:
+; CHECK: vcvt.u32.f32 [[TMP:q[0-9]+]], {{q[0-9]+}}, #1
+; CHECK: vmovn.i32 {{d[0-9]+}}, [[TMP]]
+
+ %scale = fmul <4 x float> %in, <float 2.0, float 2.0, float 2.0, float 2.0>
+ %conv = fptoui <4 x float> %scale to <4 x i16>
+ ret <4 x i16> %conv
+}
+
+define <2 x i64> @fix_float_to_i64(<2 x float> %in) {
+; CHECK-LABEL: fix_float_to_i64:
+; CHECK: bl
+; CHECK: bl
+
+ %scale = fmul <2 x float> %in, <float 2.0, float 2.0>
+ %conv = fptoui <2 x float> %scale to <2 x i64>
+ ret <2 x i64> %conv
+}
+
+define <4 x i16> @fix_double_to_i16(<4 x double> %in) {
+; CHECK-LABEL: fix_double_to_i16:
+; CHECK: vcvt.s32.f64
+; CHECK: vcvt.s32.f64
+
+ %scale = fmul <4 x double> %in, <double 2.0, double 2.0, double 2.0, double 2.0>
+ %conv = fptoui <4 x double> %scale to <4 x i16>
+ ret <4 x i16> %conv
+}
+
+define <2 x i64> @fix_double_to_i64(<2 x double> %in) {
+; CHECK-LABEL: fix_double_to_i64:
+; CHECK: bl
+; CHECK: bl
+ %scale = fmul <2 x double> %in, <double 2.0, double 2.0>
+ %conv = fptoui <2 x double> %scale to <2 x i64>
+ ret <2 x i64> %conv
+}
+
diff --git a/test/CodeGen/ARM/vdiv_combine.ll b/test/CodeGen/ARM/vdiv_combine.ll
index e6f1338b8539..96807f7280f8 100644
--- a/test/CodeGen/ARM/vdiv_combine.ll
+++ b/test/CodeGen/ARM/vdiv_combine.ll
@@ -95,3 +95,44 @@ entry:
}
declare void @foo_float32x4_t(<4 x float>)
+
+define <4 x float> @fix_unsigned_i16_to_float(<4 x i16> %in) {
+; CHECK-LABEL: fix_unsigned_i16_to_float:
+; CHECK: vmovl.u16 [[TMP:q[0-9]+]], {{d[0-9]+}}
+; CHECK: vcvt.f32.u32 {{q[0-9]+}}, [[TMP]], #1
+
+ %conv = uitofp <4 x i16> %in to <4 x float>
+ %shift = fdiv <4 x float> %conv, <float 2.0, float 2.0, float 2.0, float 2.0>
+ ret <4 x float> %shift
+}
+
+define <4 x float> @fix_signed_i16_to_float(<4 x i16> %in) {
+; CHECK-LABEL: fix_signed_i16_to_float:
+; CHECK: vmovl.s16 [[TMP:q[0-9]+]], {{d[0-9]+}}
+; CHECK: vcvt.f32.s32 {{q[0-9]+}}, [[TMP]], #1
+
+ %conv = sitofp <4 x i16> %in to <4 x float>
+ %shift = fdiv <4 x float> %conv, <float 2.0, float 2.0, float 2.0, float 2.0>
+ ret <4 x float> %shift
+}
+
+define <2 x float> @fix_i64_to_float(<2 x i64> %in) {
+; CHECK-LABEL: fix_i64_to_float:
+; CHECK: bl
+; CHECK: bl
+
+ %conv = uitofp <2 x i64> %in to <2 x float>
+ %shift = fdiv <2 x float> %conv, <float 2.0, float 2.0>
+ ret <2 x float> %shift
+}
+
+define <2 x double> @fix_i64_to_double(<2 x i64> %in) {
+; CHECK-LABEL: fix_i64_to_double:
+; CHECK: bl
+; CHECK: bl
+
+ %conv = uitofp <2 x i64> %in to <2 x double>
+ %shift = fdiv <2 x double> %conv, <double 2.0, double 2.0>
+ ret <2 x double> %shift
+}
+
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index 2cf94d63ca14..b24be2654dfc 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @v_dup8(i8 %A) nounwind {
-;CHECK: v_dup8:
+;CHECK-LABEL: v_dup8:
;CHECK: vdup.8
%tmp1 = insertelement <8 x i8> zeroinitializer, i8 %A, i32 0
%tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1
@@ -15,7 +15,7 @@ define <8 x i8> @v_dup8(i8 %A) nounwind {
}
define <4 x i16> @v_dup16(i16 %A) nounwind {
-;CHECK: v_dup16:
+;CHECK-LABEL: v_dup16:
;CHECK: vdup.16
%tmp1 = insertelement <4 x i16> zeroinitializer, i16 %A, i32 0
%tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1
@@ -25,7 +25,7 @@ define <4 x i16> @v_dup16(i16 %A) nounwind {
}
define <2 x i32> @v_dup32(i32 %A) nounwind {
-;CHECK: v_dup32:
+;CHECK-LABEL: v_dup32:
;CHECK: vdup.32
%tmp1 = insertelement <2 x i32> zeroinitializer, i32 %A, i32 0
%tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1
@@ -33,7 +33,7 @@ define <2 x i32> @v_dup32(i32 %A) nounwind {
}
define <2 x float> @v_dupfloat(float %A) nounwind {
-;CHECK: v_dupfloat:
+;CHECK-LABEL: v_dupfloat:
;CHECK: vdup.32
%tmp1 = insertelement <2 x float> zeroinitializer, float %A, i32 0
%tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1
@@ -41,7 +41,7 @@ define <2 x float> @v_dupfloat(float %A) nounwind {
}
define <16 x i8> @v_dupQ8(i8 %A) nounwind {
-;CHECK: v_dupQ8:
+;CHECK-LABEL: v_dupQ8:
;CHECK: vdup.8
%tmp1 = insertelement <16 x i8> zeroinitializer, i8 %A, i32 0
%tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32 1
@@ -63,7 +63,7 @@ define <16 x i8> @v_dupQ8(i8 %A) nounwind {
}
define <8 x i16> @v_dupQ16(i16 %A) nounwind {
-;CHECK: v_dupQ16:
+;CHECK-LABEL: v_dupQ16:
;CHECK: vdup.16
%tmp1 = insertelement <8 x i16> zeroinitializer, i16 %A, i32 0
%tmp2 = insertelement <8 x i16> %tmp1, i16 %A, i32 1
@@ -77,7 +77,7 @@ define <8 x i16> @v_dupQ16(i16 %A) nounwind {
}
define <4 x i32> @v_dupQ32(i32 %A) nounwind {
-;CHECK: v_dupQ32:
+;CHECK-LABEL: v_dupQ32:
;CHECK: vdup.32
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %A, i32 0
%tmp2 = insertelement <4 x i32> %tmp1, i32 %A, i32 1
@@ -87,7 +87,7 @@ define <4 x i32> @v_dupQ32(i32 %A) nounwind {
}
define <4 x float> @v_dupQfloat(float %A) nounwind {
-;CHECK: v_dupQfloat:
+;CHECK-LABEL: v_dupQfloat:
;CHECK: vdup.32
%tmp1 = insertelement <4 x float> zeroinitializer, float %A, i32 0
%tmp2 = insertelement <4 x float> %tmp1, float %A, i32 1
@@ -99,7 +99,7 @@ define <4 x float> @v_dupQfloat(float %A) nounwind {
; Check to make sure it works with shuffles, too.
define <8 x i8> @v_shuffledup8(i8 %A) nounwind {
-;CHECK: v_shuffledup8:
+;CHECK-LABEL: v_shuffledup8:
;CHECK: vdup.8
%tmp1 = insertelement <8 x i8> undef, i8 %A, i32 0
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
@@ -107,7 +107,7 @@ define <8 x i8> @v_shuffledup8(i8 %A) nounwind {
}
define <4 x i16> @v_shuffledup16(i16 %A) nounwind {
-;CHECK: v_shuffledup16:
+;CHECK-LABEL: v_shuffledup16:
;CHECK: vdup.16
%tmp1 = insertelement <4 x i16> undef, i16 %A, i32 0
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -115,7 +115,7 @@ define <4 x i16> @v_shuffledup16(i16 %A) nounwind {
}
define <2 x i32> @v_shuffledup32(i32 %A) nounwind {
-;CHECK: v_shuffledup32:
+;CHECK-LABEL: v_shuffledup32:
;CHECK: vdup.32
%tmp1 = insertelement <2 x i32> undef, i32 %A, i32 0
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
@@ -123,7 +123,7 @@ define <2 x i32> @v_shuffledup32(i32 %A) nounwind {
}
define <2 x float> @v_shuffledupfloat(float %A) nounwind {
-;CHECK: v_shuffledupfloat:
+;CHECK-LABEL: v_shuffledupfloat:
;CHECK: vdup.32
%tmp1 = insertelement <2 x float> undef, float %A, i32 0
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
@@ -131,7 +131,7 @@ define <2 x float> @v_shuffledupfloat(float %A) nounwind {
}
define <16 x i8> @v_shuffledupQ8(i8 %A) nounwind {
-;CHECK: v_shuffledupQ8:
+;CHECK-LABEL: v_shuffledupQ8:
;CHECK: vdup.8
%tmp1 = insertelement <16 x i8> undef, i8 %A, i32 0
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -139,7 +139,7 @@ define <16 x i8> @v_shuffledupQ8(i8 %A) nounwind {
}
define <8 x i16> @v_shuffledupQ16(i16 %A) nounwind {
-;CHECK: v_shuffledupQ16:
+;CHECK-LABEL: v_shuffledupQ16:
;CHECK: vdup.16
%tmp1 = insertelement <8 x i16> undef, i16 %A, i32 0
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> zeroinitializer
@@ -147,7 +147,7 @@ define <8 x i16> @v_shuffledupQ16(i16 %A) nounwind {
}
define <4 x i32> @v_shuffledupQ32(i32 %A) nounwind {
-;CHECK: v_shuffledupQ32:
+;CHECK-LABEL: v_shuffledupQ32:
;CHECK: vdup.32
%tmp1 = insertelement <4 x i32> undef, i32 %A, i32 0
%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -155,7 +155,7 @@ define <4 x i32> @v_shuffledupQ32(i32 %A) nounwind {
}
define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
-;CHECK: v_shuffledupQfloat:
+;CHECK-LABEL: v_shuffledupQfloat:
;CHECK: vdup.32
%tmp1 = insertelement <4 x float> undef, float %A, i32 0
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
@@ -163,7 +163,7 @@ define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
}
define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
-;CHECK: vduplane8:
+;CHECK-LABEL: vduplane8:
;CHECK: vdup.8
%tmp1 = load <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
@@ -171,7 +171,7 @@ define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
-;CHECK: vduplane16:
+;CHECK-LABEL: vduplane16:
;CHECK: vdup.16
%tmp1 = load <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
@@ -179,7 +179,7 @@ define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
-;CHECK: vduplane32:
+;CHECK-LABEL: vduplane32:
;CHECK: vdup.32
%tmp1 = load <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
@@ -187,7 +187,7 @@ define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
}
define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
-;CHECK: vduplanefloat:
+;CHECK-LABEL: vduplanefloat:
;CHECK: vdup.32
%tmp1 = load <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
@@ -195,7 +195,7 @@ define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
}
define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
-;CHECK: vduplaneQ8:
+;CHECK-LABEL: vduplaneQ8:
;CHECK: vdup.8
%tmp1 = load <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
@@ -203,7 +203,7 @@ define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
}
define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
-;CHECK: vduplaneQ16:
+;CHECK-LABEL: vduplaneQ16:
;CHECK: vdup.16
%tmp1 = load <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
@@ -211,7 +211,7 @@ define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
}
define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
-;CHECK: vduplaneQ32:
+;CHECK-LABEL: vduplaneQ32:
;CHECK: vdup.32
%tmp1 = load <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
@@ -219,7 +219,7 @@ define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
}
define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
-;CHECK: vduplaneQfloat:
+;CHECK-LABEL: vduplaneQfloat:
;CHECK: vdup.32
%tmp1 = load <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
@@ -251,7 +251,7 @@ entry:
}
; Radar 7373643
-;CHECK: redundantVdup:
+;CHECK-LABEL: redundantVdup:
;CHECK: vmov.i8
;CHECK-NOT: vdup.8
;CHECK: vstr
@@ -263,7 +263,7 @@ define void @redundantVdup(<8 x i8>* %ptr) nounwind {
}
define <4 x i32> @tdupi(i32 %x, i32 %y) {
-;CHECK: tdupi
+;CHECK-LABEL: tdupi:
;CHECK: vdup.32
%1 = insertelement <4 x i32> undef, i32 %x, i32 0
%2 = insertelement <4 x i32> %1, i32 %x, i32 1
@@ -273,7 +273,7 @@ define <4 x i32> @tdupi(i32 %x, i32 %y) {
}
define <4 x float> @tdupf(float %x, float %y) {
-;CHECK: tdupf
+;CHECK-LABEL: tdupf:
;CHECK: vdup.32
%1 = insertelement <4 x float> undef, float %x, i32 0
%2 = insertelement <4 x float> %1, float %x, i32 1
@@ -285,7 +285,7 @@ define <4 x float> @tdupf(float %x, float %y) {
; This test checks that when splatting an element from a vector into another,
; the value isn't moved out to GPRs first.
define <4 x i32> @tduplane(<4 x i32> %invec) {
-;CHECK: tduplane
+;CHECK-LABEL: tduplane:
;CHECK-NOT: vmov {{.*}}, d16[1]
;CHECK: vdup.32 {{.*}}, d16[1]
%in = extractelement <4 x i32> %invec, i32 1
@@ -297,7 +297,7 @@ define <4 x i32> @tduplane(<4 x i32> %invec) {
}
define <2 x float> @check_f32(<4 x float> %v) nounwind {
-;CHECK: check_f32:
+;CHECK-LABEL: check_f32:
;CHECK: vdup.32 {{.*}}, d{{..}}[1]
%x = extractelement <4 x float> %v, i32 3
%1 = insertelement <2 x float> undef, float %x, i32 0
@@ -306,7 +306,7 @@ define <2 x float> @check_f32(<4 x float> %v) nounwind {
}
define <2 x i32> @check_i32(<4 x i32> %v) nounwind {
-;CHECK: check_i32:
+;CHECK-LABEL: check_i32:
;CHECK: vdup.32 {{.*}}, d{{..}}[1]
%x = extractelement <4 x i32> %v, i32 3
%1 = insertelement <2 x i32> undef, i32 %x, i32 0
@@ -315,7 +315,7 @@ define <2 x i32> @check_i32(<4 x i32> %v) nounwind {
}
define <4 x i16> @check_i16(<8 x i16> %v) nounwind {
-;CHECK: check_i16:
+;CHECK-LABEL: check_i16:
;CHECK: vdup.16 {{.*}}, d{{..}}[3]
%x = extractelement <8 x i16> %v, i32 3
%1 = insertelement <4 x i16> undef, i16 %x, i32 0
@@ -324,7 +324,7 @@ define <4 x i16> @check_i16(<8 x i16> %v) nounwind {
}
define <8 x i8> @check_i8(<16 x i8> %v) nounwind {
-;CHECK: check_i8:
+;CHECK-LABEL: check_i8:
;CHECK: vdup.8 {{.*}}, d{{..}}[3]
%x = extractelement <16 x i8> %v, i32 3
%1 = insertelement <8 x i8> undef, i8 %x, i32 0
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 42964deb0b5e..759da2235e41 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -29,7 +29,7 @@ entry:
; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is
; converted back to be used as a vector type.
-; CHECK: test_vmovrrd_combine
+; CHECK-LABEL: test_vmovrrd_combine:
define <4 x i32> @test_vmovrrd_combine() nounwind {
entry:
br i1 undef, label %bb1, label %bb2
@@ -136,7 +136,7 @@ define i16 @foldBuildVectors() {
; Test that we are generating vrev and vext for reverse shuffles of v8i16
; shuffles.
-; CHECK: reverse_v8i16
+; CHECK-LABEL: reverse_v8i16:
define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
%v0 = load <8 x i16>* %loadaddr
; CHECK: vrev64.16
@@ -149,7 +149,7 @@ define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
; Test that we are generating vrev and vext for reverse shuffles of v16i8
; shuffles.
-; CHECK: reverse_v16i8
+; CHECK-LABEL: reverse_v16i8:
define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
%v0 = load <16 x i8>* %loadaddr
; CHECK: vrev64.8
@@ -160,3 +160,87 @@ define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
store <16 x i8> %v1, <16 x i8>* %storeaddr
ret void
}
+
+; <rdar://problem/14170854>.
+; vldr cannot handle unaligned loads.
+; Fall back to vld1.32, which can, instead of using the general purpose loads
+; followed by a costly sequence of instructions to build the vector register.
+; CHECK-LABEL: t3:
+; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}
+; CHECK: vld1.32 {[[REG]][1]}
+; CHECK: vmull.u8 q{{[0-9]+}}, [[REG]], [[REG]]
+define <8 x i16> @t3(i8 zeroext %xf, i8* nocapture %sp0, i8* nocapture %sp1, i32* nocapture %outp) {
+entry:
+ %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
+ %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1
+ %pix_sp1.0.cast = bitcast i8* %sp1 to i32*
+ %pix_sp1.0.copyload = load i32* %pix_sp1.0.cast, align 1
+ %vecinit = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
+ %vecinit1 = insertelement <2 x i32> %vecinit, i32 %pix_sp1.0.copyload, i32 1
+ %0 = bitcast <2 x i32> %vecinit1 to <8 x i8>
+ %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %0)
+ ret <8 x i16> %vmull.i
+}
+
+; Function Attrs: nounwind readnone
+declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>)
+
+; Check that (insert_vector_elt (load)) => (vector_load).
+; Thus, check that scalar_to_vector do not interfer with that.
+define <8 x i16> @t4(i8* nocapture %sp0) {
+; CHECK-LABEL: t4:
+; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0]
+entry:
+ %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
+ %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1
+ %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
+ %0 = bitcast <2 x i32> %vec to <8 x i8>
+ %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %0)
+ ret <8 x i16> %vmull.i
+}
+
+; Make sure vector load is used for all three loads.
+; Lowering to build vector was breaking the single use property of the load of
+; %pix_sp0.0.copyload.
+; CHECK-LABEL: t5:
+; CHECK: vld1.32 {[[REG1:d[0-9]+]][1]}, [r0]
+; CHECK: vorr [[REG2:d[0-9]+]], [[REG1]], [[REG1]]
+; CHECK: vld1.32 {[[REG1]][0]}, [r1]
+; CHECK: vld1.32 {[[REG2]][0]}, [r2]
+; CHECK: vmull.u8 q{{[0-9]+}}, [[REG1]], [[REG2]]
+define <8 x i16> @t5(i8* nocapture %sp0, i8* nocapture %sp1, i8* nocapture %sp2) {
+entry:
+ %pix_sp0.0.cast = bitcast i8* %sp0 to i32*
+ %pix_sp0.0.copyload = load i32* %pix_sp0.0.cast, align 1
+ %pix_sp1.0.cast = bitcast i8* %sp1 to i32*
+ %pix_sp1.0.copyload = load i32* %pix_sp1.0.cast, align 1
+ %pix_sp2.0.cast = bitcast i8* %sp2 to i32*
+ %pix_sp2.0.copyload = load i32* %pix_sp2.0.cast, align 1
+ %vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 1
+ %vecinit1 = insertelement <2 x i32> %vec, i32 %pix_sp1.0.copyload, i32 0
+ %vecinit2 = insertelement <2 x i32> %vec, i32 %pix_sp2.0.copyload, i32 0
+ %0 = bitcast <2 x i32> %vecinit1 to <8 x i8>
+ %1 = bitcast <2 x i32> %vecinit2 to <8 x i8>
+ %vmull.i = tail call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %0, <8 x i8> %1)
+ ret <8 x i16> %vmull.i
+}
+
+; <rdar://problem/14989896> Make sure we manage to truncate a vector from an
+; illegal type to a legal type.
+define <2 x i8> @test_truncate(<2 x i128> %in) {
+; CHECK-LABEL: test_truncate:
+; CHECK: mov [[BASE:r[0-9]+]], sp
+; CHECK-NEXT: vld1.32 {[[REG1:d[0-9]+]][0]}, {{\[}}[[BASE]]:32]
+; CHECK-NEXT: add [[BASE2:r[0-9]+]], [[BASE]], #4
+; CHECK-NEXT: vld1.32 {[[REG1]][1]}, {{\[}}[[BASE2]]:32]
+; REG2 Should map on the same Q register as REG1, i.e., REG2 = REG1 - 1, but we
+; cannot express that.
+; CHECK-NEXT: vmov.32 [[REG2:d[0-9]+]][0], r0
+; CHECK-NEXT: vmov.32 [[REG2]][1], r1
+; The Q register used here should match floor(REG1/2), but we cannot express that.
+; CHECK-NEXT: vmovn.i64 [[RES:d[0-9]+]], q{{[0-9]+}}
+; CHECK-NEXT: vmov r0, r1, [[RES]]
+entry:
+ %res = trunc <2 x i128> %in to <2 x i8>
+ ret <2 x i8> %res
+}
diff --git a/test/CodeGen/ARM/vector-extend-narrow.ll b/test/CodeGen/ARM/vector-extend-narrow.ll
index 22af79762128..f3218969c78e 100644
--- a/test/CodeGen/ARM/vector-extend-narrow.ll
+++ b/test/CodeGen/ARM/vector-extend-narrow.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
-; CHECK: f:
+; CHECK-LABEL: f:
define float @f(<4 x i16>* nocapture %in) {
; CHECK: vldr
; CHECK: vmovl.u16
@@ -18,7 +18,7 @@ define float @f(<4 x i16>* nocapture %in) {
ret float %7
}
-; CHECK: g:
+; CHECK-LABEL: g:
define float @g(<4 x i8>* nocapture %in) {
; Note: vld1 here is reasonably important. Mixing VFP and NEON
; instructions is bad on some cores
@@ -39,7 +39,7 @@ define float @g(<4 x i8>* nocapture %in) {
ret float %7
}
-; CHECK: h:
+; CHECK-LABEL: h:
define <4 x i8> @h(<4 x float> %v) {
; CHECK: vcvt.{{[us]}}32.f32
; CHECK: vmovn.i32
@@ -47,7 +47,7 @@ define <4 x i8> @h(<4 x float> %v) {
ret <4 x i8> %1
}
-; CHECK: i:
+; CHECK-LABEL: i:
define <4 x i8> @i(<4 x i8>* %x) {
; Note: vld1 here is reasonably important. Mixing VFP and NEON
; instructions is bad on some cores
@@ -62,7 +62,7 @@ define <4 x i8> @i(<4 x i8>* %x) {
%2 = sdiv <4 x i8> zeroinitializer, %1
ret <4 x i8> %2
}
-; CHECK: j:
+; CHECK-LABEL: j:
define <4 x i32> @j(<4 x i8>* %in) nounwind {
; CHECK: vld1
; CHECK: vmovl.u8
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index f404eb8be5b7..5555a4759b00 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: test_vextd:
+;CHECK-LABEL: test_vextd:
;CHECK: vext
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: test_vextRd:
+;CHECK-LABEL: test_vextRd:
;CHECK: vext
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -19,7 +19,7 @@ define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: test_vextq:
+;CHECK-LABEL: test_vextq:
;CHECK: vext
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -28,7 +28,7 @@ define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: test_vextRq:
+;CHECK-LABEL: test_vextRq:
;CHECK: vext
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -37,7 +37,7 @@ define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: test_vextd16:
+;CHECK-LABEL: test_vextd16:
;CHECK: vext
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: test_vextq32:
+;CHECK-LABEL: test_vextq32:
;CHECK: vext
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -57,7 +57,7 @@ define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; Undef shuffle indices should not prevent matching to VEXT:
define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: test_vextd_undef:
+;CHECK-LABEL: test_vextd_undef:
;CHECK: vext
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -66,7 +66,7 @@ define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: test_vextRq_undef:
+;CHECK-LABEL: test_vextRq_undef:
;CHECK: vext
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -75,7 +75,7 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind {
-;CHECK: test_vextq_undef_op2:
+;CHECK-LABEL: test_vextq_undef_op2:
;CHECK: vext
entry:
%tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
@@ -83,7 +83,7 @@ entry:
}
define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind {
-;CHECK: test_vextd_undef_op2:
+;CHECK-LABEL: test_vextd_undef_op2:
;CHECK: vext
entry:
%tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
@@ -92,7 +92,7 @@ entry:
define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind {
-;CHECK: test_vextq_undef_op2_undef:
+;CHECK-LABEL: test_vextq_undef_op2_undef:
;CHECK: vext
entry:
%tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
@@ -100,7 +100,7 @@ entry:
}
define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind {
-;CHECK: test_vextd_undef_op2_undef:
+;CHECK-LABEL: test_vextd_undef_op2_undef:
;CHECK: vext
entry:
%tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 1>
@@ -114,7 +114,7 @@ entry:
; Also checks interleaving of sources is handled correctly.
; Essence: a vext is used on %A and something saner than stack load/store for final result.
define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: test_interleaved:
+;CHECK-LABEL: test_interleaved:
;CHECK: vext.16
;CHECK-NOT: vext.16
;CHECK: vzip.16
@@ -126,7 +126,7 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; An undef in the shuffle list should still be optimizable
define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: test_undef:
+;CHECK-LABEL: test_undef:
;CHECK: vzip.16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -136,20 +136,26 @@ define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; We should ignore a build_vector with more than two sources.
; Use illegal <32 x i16> type to produce such a shuffle after legalizing types.
-; Try to look for fallback to stack expansion.
+; Try to look for fallback to by-element inserts.
define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
-;CHECK: test_multisource:
-;CHECK: vst1.16
+;CHECK-LABEL: test_multisource:
+;CHECK: vmov.16 [[REG:d[0-9]+]][0]
+;CHECK: vmov.16 [[REG]][1]
+;CHECK: vmov.16 [[REG]][2]
+;CHECK: vmov.16 [[REG]][3]
%tmp1 = load <32 x i16>* %B
%tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
ret <4 x i16> %tmp2
}
; We don't handle shuffles using more than half of a 128-bit vector.
-; Again, test for fallback to stack expansion
+; Again, test for fallback to by-element inserts.
define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
-;CHECK: test_largespan:
-;CHECK: vst1.16
+;CHECK-LABEL: test_largespan:
+;CHECK: vmov.16 [[REG:d[0-9]+]][0]
+;CHECK: vmov.16 [[REG]][1]
+;CHECK: vmov.16 [[REG]][2]
+;CHECK: vmov.16 [[REG]][3]
%tmp1 = load <8 x i16>* %B
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
ret <4 x i16> %tmp2
@@ -159,8 +165,15 @@ define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
; this rather than blindly emitting a VECTOR_SHUFFLE (infinite
; lowering loop can result otherwise).
define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: test_illegal:
-;CHECK: vst1.16
+;CHECK-LABEL: test_illegal:
+;CHECK: vmov.16 [[REG:d[0-9]+]][0]
+;CHECK: vmov.16 [[REG]][1]
+;CHECK: vmov.16 [[REG]][2]
+;CHECK: vmov.16 [[REG]][3]
+;CHECK: vmov.16 [[REG2:d[0-9]+]][0]
+;CHECK: vmov.16 [[REG2]][1]
+;CHECK: vmov.16 [[REG2]][2]
+;CHECK: vmov.16 [[REG2]][3]
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
@@ -170,7 +183,7 @@ define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; PR11129
; Make sure this doesn't crash
define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
-; CHECK: test_elem_mismatch:
+; CHECK-LABEL: test_elem_mismatch:
; CHECK: vstr
%tmp0 = load <2 x i64>* %src, align 16
%tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
diff --git a/test/CodeGen/ARM/vfcmp.ll b/test/CodeGen/ARM/vfcmp.ll
index 6946d02637ea..a23db7be7615 100644
--- a/test/CodeGen/ARM/vfcmp.ll
+++ b/test/CodeGen/ARM/vfcmp.ll
@@ -4,7 +4,7 @@
; une is implemented with VCEQ/VMVN
define <2 x i32> @vcunef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcunef32:
+;CHECK-LABEL: vcunef32:
;CHECK: vceq.f32
;CHECK-NEXT: vmvn
%tmp1 = load <2 x float>* %A
@@ -16,7 +16,7 @@ define <2 x i32> @vcunef32(<2 x float>* %A, <2 x float>* %B) nounwind {
; olt is implemented with VCGT
define <2 x i32> @vcoltf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcoltf32:
+;CHECK-LABEL: vcoltf32:
;CHECK: vcgt.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -27,7 +27,7 @@ define <2 x i32> @vcoltf32(<2 x float>* %A, <2 x float>* %B) nounwind {
; ole is implemented with VCGE
define <2 x i32> @vcolef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcolef32:
+;CHECK-LABEL: vcolef32:
;CHECK: vcge.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -38,7 +38,7 @@ define <2 x i32> @vcolef32(<2 x float>* %A, <2 x float>* %B) nounwind {
; uge is implemented with VCGT/VMVN
define <2 x i32> @vcugef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcugef32:
+;CHECK-LABEL: vcugef32:
;CHECK: vcgt.f32
;CHECK-NEXT: vmvn
%tmp1 = load <2 x float>* %A
@@ -50,7 +50,7 @@ define <2 x i32> @vcugef32(<2 x float>* %A, <2 x float>* %B) nounwind {
; ule is implemented with VCGT/VMVN
define <2 x i32> @vculef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vculef32:
+;CHECK-LABEL: vculef32:
;CHECK: vcgt.f32
;CHECK-NEXT: vmvn
%tmp1 = load <2 x float>* %A
@@ -62,7 +62,7 @@ define <2 x i32> @vculef32(<2 x float>* %A, <2 x float>* %B) nounwind {
; ugt is implemented with VCGE/VMVN
define <2 x i32> @vcugtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcugtf32:
+;CHECK-LABEL: vcugtf32:
;CHECK: vcge.f32
;CHECK-NEXT: vmvn
%tmp1 = load <2 x float>* %A
@@ -74,7 +74,7 @@ define <2 x i32> @vcugtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
; ult is implemented with VCGE/VMVN
define <2 x i32> @vcultf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcultf32:
+;CHECK-LABEL: vcultf32:
;CHECK: vcge.f32
;CHECK-NEXT: vmvn
%tmp1 = load <2 x float>* %A
@@ -86,7 +86,7 @@ define <2 x i32> @vcultf32(<2 x float>* %A, <2 x float>* %B) nounwind {
; ueq is implemented with VCGT/VCGT/VORR/VMVN
define <2 x i32> @vcueqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcueqf32:
+;CHECK-LABEL: vcueqf32:
;CHECK: vcgt.f32
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
@@ -100,7 +100,7 @@ define <2 x i32> @vcueqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
; one is implemented with VCGT/VCGT/VORR
define <2 x i32> @vconef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vconef32:
+;CHECK-LABEL: vconef32:
;CHECK: vcgt.f32
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
@@ -113,7 +113,7 @@ define <2 x i32> @vconef32(<2 x float>* %A, <2 x float>* %B) nounwind {
; uno is implemented with VCGT/VCGE/VORR/VMVN
define <2 x i32> @vcunof32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcunof32:
+;CHECK-LABEL: vcunof32:
;CHECK: vcge.f32
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
@@ -127,7 +127,7 @@ define <2 x i32> @vcunof32(<2 x float>* %A, <2 x float>* %B) nounwind {
; ord is implemented with VCGT/VCGE/VORR
define <2 x i32> @vcordf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcordf32:
+;CHECK-LABEL: vcordf32:
;CHECK: vcge.f32
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll
index 7a4b34f4a3f0..5d2943cbfd2f 100644
--- a/test/CodeGen/ARM/vfp.ll
+++ b/test/CodeGen/ARM/vfp.ll
@@ -14,7 +14,7 @@ declare float @fabsf(float)
declare double @fabs(double)
define void @test_abs(float* %P, double* %D) {
-;CHECK: test_abs:
+;CHECK-LABEL: test_abs:
%a = load float* %P ; <float> [#uses=1]
;CHECK: vabs.f32
%b = call float @fabsf( float %a ) readnone ; <float> [#uses=1]
@@ -27,7 +27,7 @@ define void @test_abs(float* %P, double* %D) {
}
define void @test_add(float* %P, double* %D) {
-;CHECK: test_add:
+;CHECK-LABEL: test_add:
%a = load float* %P ; <float> [#uses=2]
%b = fadd float %a, %a ; <float> [#uses=1]
store float %b, float* %P
@@ -38,7 +38,7 @@ define void @test_add(float* %P, double* %D) {
}
define void @test_ext_round(float* %P, double* %D) {
-;CHECK: test_ext_round:
+;CHECK-LABEL: test_ext_round:
%a = load float* %P ; <float> [#uses=1]
;CHECK: vcvt.f64.f32
;CHECK: vcvt.f32.f64
@@ -51,7 +51,7 @@ define void @test_ext_round(float* %P, double* %D) {
}
define void @test_fma(float* %P1, float* %P2, float* %P3) {
-;CHECK: test_fma:
+;CHECK-LABEL: test_fma:
%a1 = load float* %P1 ; <float> [#uses=1]
%a2 = load float* %P2 ; <float> [#uses=1]
%a3 = load float* %P3 ; <float> [#uses=1]
@@ -63,7 +63,7 @@ define void @test_fma(float* %P1, float* %P2, float* %P3) {
}
define i32 @test_ftoi(float* %P1) {
-;CHECK: test_ftoi:
+;CHECK-LABEL: test_ftoi:
%a1 = load float* %P1 ; <float> [#uses=1]
;CHECK: vcvt.s32.f32
%b1 = fptosi float %a1 to i32 ; <i32> [#uses=1]
@@ -71,7 +71,7 @@ define i32 @test_ftoi(float* %P1) {
}
define i32 @test_ftou(float* %P1) {
-;CHECK: test_ftou:
+;CHECK-LABEL: test_ftou:
%a1 = load float* %P1 ; <float> [#uses=1]
;CHECK: vcvt.u32.f32
%b1 = fptoui float %a1 to i32 ; <i32> [#uses=1]
@@ -79,7 +79,7 @@ define i32 @test_ftou(float* %P1) {
}
define i32 @test_dtoi(double* %P1) {
-;CHECK: test_dtoi:
+;CHECK-LABEL: test_dtoi:
%a1 = load double* %P1 ; <double> [#uses=1]
;CHECK: vcvt.s32.f64
%b1 = fptosi double %a1 to i32 ; <i32> [#uses=1]
@@ -87,7 +87,7 @@ define i32 @test_dtoi(double* %P1) {
}
define i32 @test_dtou(double* %P1) {
-;CHECK: test_dtou:
+;CHECK-LABEL: test_dtou:
%a1 = load double* %P1 ; <double> [#uses=1]
;CHECK: vcvt.u32.f64
%b1 = fptoui double %a1 to i32 ; <i32> [#uses=1]
@@ -95,7 +95,7 @@ define i32 @test_dtou(double* %P1) {
}
define void @test_utod(double* %P1, i32 %X) {
-;CHECK: test_utod:
+;CHECK-LABEL: test_utod:
;CHECK: vcvt.f64.u32
%b1 = uitofp i32 %X to double ; <double> [#uses=1]
store double %b1, double* %P1
@@ -103,7 +103,7 @@ define void @test_utod(double* %P1, i32 %X) {
}
define void @test_utod2(double* %P1, i8 %X) {
-;CHECK: test_utod2:
+;CHECK-LABEL: test_utod2:
;CHECK: vcvt.f64.u32
%b1 = uitofp i8 %X to double ; <double> [#uses=1]
store double %b1, double* %P1
@@ -111,7 +111,7 @@ define void @test_utod2(double* %P1, i8 %X) {
}
define void @test_cmp(float* %glob, i32 %X) {
-;CHECK: test_cmp:
+;CHECK-LABEL: test_cmp:
entry:
%tmp = load float* %glob ; <float> [#uses=2]
%tmp3 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1]
@@ -139,7 +139,7 @@ declare i32 @bar(...)
declare i32 @baz(...)
define void @test_cmpfp0(float* %glob, i32 %X) {
-;CHECK: test_cmpfp0:
+;CHECK-LABEL: test_cmpfp0:
entry:
%tmp = load float* %glob ; <float> [#uses=1]
;CHECK: vcmpe.f32
diff --git a/test/CodeGen/ARM/vget_lane.ll b/test/CodeGen/ARM/vget_lane.ll
index c9ce3b7450b6..2518ee2278cc 100644
--- a/test/CodeGen/ARM/vget_lane.ll
+++ b/test/CodeGen/ARM/vget_lane.ll
@@ -3,7 +3,7 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
target triple = "thumbv7-elf"
define i32 @vget_lanes8(<8 x i8>* %A) nounwind {
-;CHECK: vget_lanes8:
+;CHECK-LABEL: vget_lanes8:
;CHECK: vmov.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = extractelement <8 x i8> %tmp1, i32 1
@@ -12,7 +12,7 @@ define i32 @vget_lanes8(<8 x i8>* %A) nounwind {
}
define i32 @vget_lanes16(<4 x i16>* %A) nounwind {
-;CHECK: vget_lanes16:
+;CHECK-LABEL: vget_lanes16:
;CHECK: vmov.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = extractelement <4 x i16> %tmp1, i32 1
@@ -21,7 +21,7 @@ define i32 @vget_lanes16(<4 x i16>* %A) nounwind {
}
define i32 @vget_laneu8(<8 x i8>* %A) nounwind {
-;CHECK: vget_laneu8:
+;CHECK-LABEL: vget_laneu8:
;CHECK: vmov.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = extractelement <8 x i8> %tmp1, i32 1
@@ -30,7 +30,7 @@ define i32 @vget_laneu8(<8 x i8>* %A) nounwind {
}
define i32 @vget_laneu16(<4 x i16>* %A) nounwind {
-;CHECK: vget_laneu16:
+;CHECK-LABEL: vget_laneu16:
;CHECK: vmov.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = extractelement <4 x i16> %tmp1, i32 1
@@ -40,7 +40,7 @@ define i32 @vget_laneu16(<4 x i16>* %A) nounwind {
; Do a vector add to keep the extraction from being done directly from memory.
define i32 @vget_lanei32(<2 x i32>* %A) nounwind {
-;CHECK: vget_lanei32:
+;CHECK-LABEL: vget_lanei32:
;CHECK: vmov.32
%tmp1 = load <2 x i32>* %A
%tmp2 = add <2 x i32> %tmp1, %tmp1
@@ -49,7 +49,7 @@ define i32 @vget_lanei32(<2 x i32>* %A) nounwind {
}
define i32 @vgetQ_lanes8(<16 x i8>* %A) nounwind {
-;CHECK: vgetQ_lanes8:
+;CHECK-LABEL: vgetQ_lanes8:
;CHECK: vmov.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = extractelement <16 x i8> %tmp1, i32 1
@@ -58,7 +58,7 @@ define i32 @vgetQ_lanes8(<16 x i8>* %A) nounwind {
}
define i32 @vgetQ_lanes16(<8 x i16>* %A) nounwind {
-;CHECK: vgetQ_lanes16:
+;CHECK-LABEL: vgetQ_lanes16:
;CHECK: vmov.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = extractelement <8 x i16> %tmp1, i32 1
@@ -67,7 +67,7 @@ define i32 @vgetQ_lanes16(<8 x i16>* %A) nounwind {
}
define i32 @vgetQ_laneu8(<16 x i8>* %A) nounwind {
-;CHECK: vgetQ_laneu8:
+;CHECK-LABEL: vgetQ_laneu8:
;CHECK: vmov.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = extractelement <16 x i8> %tmp1, i32 1
@@ -76,7 +76,7 @@ define i32 @vgetQ_laneu8(<16 x i8>* %A) nounwind {
}
define i32 @vgetQ_laneu16(<8 x i16>* %A) nounwind {
-;CHECK: vgetQ_laneu16:
+;CHECK-LABEL: vgetQ_laneu16:
;CHECK: vmov.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = extractelement <8 x i16> %tmp1, i32 1
@@ -86,7 +86,7 @@ define i32 @vgetQ_laneu16(<8 x i16>* %A) nounwind {
; Do a vector add to keep the extraction from being done directly from memory.
define i32 @vgetQ_lanei32(<4 x i32>* %A) nounwind {
-;CHECK: vgetQ_lanei32:
+;CHECK-LABEL: vgetQ_lanei32:
;CHECK: vmov.32
%tmp1 = load <4 x i32>* %A
%tmp2 = add <4 x i32> %tmp1, %tmp1
@@ -159,7 +159,7 @@ return: ; preds = %entry
}
define <8 x i8> @vset_lane8(<8 x i8>* %A, i8 %B) nounwind {
-;CHECK: vset_lane8:
+;CHECK-LABEL: vset_lane8:
;CHECK: vmov.8
%tmp1 = load <8 x i8>* %A
%tmp2 = insertelement <8 x i8> %tmp1, i8 %B, i32 1
@@ -167,7 +167,7 @@ define <8 x i8> @vset_lane8(<8 x i8>* %A, i8 %B) nounwind {
}
define <4 x i16> @vset_lane16(<4 x i16>* %A, i16 %B) nounwind {
-;CHECK: vset_lane16:
+;CHECK-LABEL: vset_lane16:
;CHECK: vmov.16
%tmp1 = load <4 x i16>* %A
%tmp2 = insertelement <4 x i16> %tmp1, i16 %B, i32 1
@@ -175,7 +175,7 @@ define <4 x i16> @vset_lane16(<4 x i16>* %A, i16 %B) nounwind {
}
define <2 x i32> @vset_lane32(<2 x i32>* %A, i32 %B) nounwind {
-;CHECK: vset_lane32:
+;CHECK-LABEL: vset_lane32:
;CHECK: vmov.32
%tmp1 = load <2 x i32>* %A
%tmp2 = insertelement <2 x i32> %tmp1, i32 %B, i32 1
@@ -183,7 +183,7 @@ define <2 x i32> @vset_lane32(<2 x i32>* %A, i32 %B) nounwind {
}
define <16 x i8> @vsetQ_lane8(<16 x i8>* %A, i8 %B) nounwind {
-;CHECK: vsetQ_lane8:
+;CHECK-LABEL: vsetQ_lane8:
;CHECK: vmov.8
%tmp1 = load <16 x i8>* %A
%tmp2 = insertelement <16 x i8> %tmp1, i8 %B, i32 1
@@ -191,7 +191,7 @@ define <16 x i8> @vsetQ_lane8(<16 x i8>* %A, i8 %B) nounwind {
}
define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind {
-;CHECK: vsetQ_lane16:
+;CHECK-LABEL: vsetQ_lane16:
;CHECK: vmov.16
%tmp1 = load <8 x i16>* %A
%tmp2 = insertelement <8 x i16> %tmp1, i16 %B, i32 1
@@ -199,7 +199,7 @@ define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind {
}
define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind {
-;CHECK: vsetQ_lane32:
+;CHECK-LABEL: vsetQ_lane32:
;CHECK: vmov.32 d{{.*}}[1], r1
%tmp1 = load <4 x i32>* %A
%tmp2 = insertelement <4 x i32> %tmp1, i32 %B, i32 1
@@ -207,7 +207,7 @@ define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind {
}
define arm_aapcs_vfpcc <2 x float> @test_vset_lanef32(float %arg0_float32_t, <2 x float> %arg1_float32x2_t) nounwind {
-;CHECK: test_vset_lanef32:
+;CHECK-LABEL: test_vset_lanef32:
;CHECK: vmov.f32 s3, s0
;CHECK: vmov.f64 d0, d1
entry:
diff --git a/test/CodeGen/ARM/vhadd.ll b/test/CodeGen/ARM/vhadd.ll
index 379e062838f6..9c2ed579c98e 100644
--- a/test/CodeGen/ARM/vhadd.ll
+++ b/test/CodeGen/ARM/vhadd.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhadds8:
+;CHECK-LABEL: vhadds8:
;CHECK: vhadd.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhadds16:
+;CHECK-LABEL: vhadds16:
;CHECK: vhadd.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhadds32:
+;CHECK-LABEL: vhadds32:
;CHECK: vhadd.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhaddu8:
+;CHECK-LABEL: vhaddu8:
;CHECK: vhadd.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -37,7 +37,7 @@ define <8 x i8> @vhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhaddu16:
+;CHECK-LABEL: vhaddu16:
;CHECK: vhadd.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <4 x i16> @vhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhaddu32:
+;CHECK-LABEL: vhaddu32:
;CHECK: vhadd.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -55,7 +55,7 @@ define <2 x i32> @vhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <16 x i8> @vhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhaddQs8:
+;CHECK-LABEL: vhaddQs8:
;CHECK: vhadd.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -64,7 +64,7 @@ define <16 x i8> @vhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhaddQs16:
+;CHECK-LABEL: vhaddQs16:
;CHECK: vhadd.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -73,7 +73,7 @@ define <8 x i16> @vhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhaddQs32:
+;CHECK-LABEL: vhaddQs32:
;CHECK: vhadd.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -82,7 +82,7 @@ define <4 x i32> @vhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhaddQu8:
+;CHECK-LABEL: vhaddQu8:
;CHECK: vhadd.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -91,7 +91,7 @@ define <16 x i8> @vhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhaddQu16:
+;CHECK-LABEL: vhaddQu16:
;CHECK: vhadd.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -100,7 +100,7 @@ define <8 x i16> @vhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhaddQu32:
+;CHECK-LABEL: vhaddQu32:
;CHECK: vhadd.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -125,7 +125,7 @@ declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind rea
declare <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <8 x i8> @vrhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrhadds8:
+;CHECK-LABEL: vrhadds8:
;CHECK: vrhadd.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -134,7 +134,7 @@ define <8 x i8> @vrhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vrhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrhadds16:
+;CHECK-LABEL: vrhadds16:
;CHECK: vrhadd.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -143,7 +143,7 @@ define <4 x i16> @vrhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vrhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrhadds32:
+;CHECK-LABEL: vrhadds32:
;CHECK: vrhadd.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -152,7 +152,7 @@ define <2 x i32> @vrhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vrhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrhaddu8:
+;CHECK-LABEL: vrhaddu8:
;CHECK: vrhadd.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -161,7 +161,7 @@ define <8 x i8> @vrhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vrhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrhaddu16:
+;CHECK-LABEL: vrhaddu16:
;CHECK: vrhadd.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -170,7 +170,7 @@ define <4 x i16> @vrhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vrhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrhaddu32:
+;CHECK-LABEL: vrhaddu32:
;CHECK: vrhadd.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -179,7 +179,7 @@ define <2 x i32> @vrhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <16 x i8> @vrhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrhaddQs8:
+;CHECK-LABEL: vrhaddQs8:
;CHECK: vrhadd.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -188,7 +188,7 @@ define <16 x i8> @vrhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vrhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrhaddQs16:
+;CHECK-LABEL: vrhaddQs16:
;CHECK: vrhadd.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -197,7 +197,7 @@ define <8 x i16> @vrhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vrhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrhaddQs32:
+;CHECK-LABEL: vrhaddQs32:
;CHECK: vrhadd.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -206,7 +206,7 @@ define <4 x i32> @vrhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vrhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrhaddQu8:
+;CHECK-LABEL: vrhaddQu8:
;CHECK: vrhadd.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -215,7 +215,7 @@ define <16 x i8> @vrhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vrhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrhaddQu16:
+;CHECK-LABEL: vrhaddQu16:
;CHECK: vrhadd.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -224,7 +224,7 @@ define <8 x i16> @vrhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vrhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrhaddQu32:
+;CHECK-LABEL: vrhaddQu32:
;CHECK: vrhadd.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
diff --git a/test/CodeGen/ARM/vhsub.ll b/test/CodeGen/ARM/vhsub.ll
index 0f0d0279a521..4bc2e87ab577 100644
--- a/test/CodeGen/ARM/vhsub.ll
+++ b/test/CodeGen/ARM/vhsub.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vhsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhsubs8:
+;CHECK-LABEL: vhsubs8:
;CHECK: vhsub.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vhsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vhsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhsubs16:
+;CHECK-LABEL: vhsubs16:
;CHECK: vhsub.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vhsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vhsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhsubs32:
+;CHECK-LABEL: vhsubs32:
;CHECK: vhsub.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vhsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vhsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhsubu8:
+;CHECK-LABEL: vhsubu8:
;CHECK: vhsub.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -37,7 +37,7 @@ define <8 x i8> @vhsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vhsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhsubu16:
+;CHECK-LABEL: vhsubu16:
;CHECK: vhsub.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <4 x i16> @vhsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vhsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhsubu32:
+;CHECK-LABEL: vhsubu32:
;CHECK: vhsub.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -55,7 +55,7 @@ define <2 x i32> @vhsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <16 x i8> @vhsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhsubQs8:
+;CHECK-LABEL: vhsubQs8:
;CHECK: vhsub.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -64,7 +64,7 @@ define <16 x i8> @vhsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vhsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhsubQs16:
+;CHECK-LABEL: vhsubQs16:
;CHECK: vhsub.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -73,7 +73,7 @@ define <8 x i16> @vhsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vhsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhsubQs32:
+;CHECK-LABEL: vhsubQs32:
;CHECK: vhsub.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -82,7 +82,7 @@ define <4 x i32> @vhsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vhsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhsubQu8:
+;CHECK-LABEL: vhsubQu8:
;CHECK: vhsub.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -91,7 +91,7 @@ define <16 x i8> @vhsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vhsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhsubQu16:
+;CHECK-LABEL: vhsubQu16:
;CHECK: vhsub.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -100,7 +100,7 @@ define <8 x i16> @vhsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vhsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhsubQu32:
+;CHECK-LABEL: vhsubQu32:
;CHECK: vhsub.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
diff --git a/test/CodeGen/ARM/vicmp.ll b/test/CodeGen/ARM/vicmp.ll
index 2d8cb893bd86..0a8f103102b1 100644
--- a/test/CodeGen/ARM/vicmp.ll
+++ b/test/CodeGen/ARM/vicmp.ll
@@ -7,7 +7,7 @@
; the other operations.
define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcnei8:
+;CHECK-LABEL: vcnei8:
;CHECK: vceq.i8
;CHECK-NEXT: vmvn
%tmp1 = load <8 x i8>* %A
@@ -18,7 +18,7 @@ define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcnei16:
+;CHECK-LABEL: vcnei16:
;CHECK: vceq.i16
;CHECK-NEXT: vmvn
%tmp1 = load <4 x i16>* %A
@@ -29,7 +29,7 @@ define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcnei32:
+;CHECK-LABEL: vcnei32:
;CHECK: vceq.i32
;CHECK-NEXT: vmvn
%tmp1 = load <2 x i32>* %A
@@ -40,7 +40,7 @@ define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcneQi8:
+;CHECK-LABEL: vcneQi8:
;CHECK: vceq.i8
;CHECK-NEXT: vmvn
%tmp1 = load <16 x i8>* %A
@@ -51,7 +51,7 @@ define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcneQi16:
+;CHECK-LABEL: vcneQi16:
;CHECK: vceq.i16
;CHECK-NEXT: vmvn
%tmp1 = load <8 x i16>* %A
@@ -62,7 +62,7 @@ define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcneQi32:
+;CHECK-LABEL: vcneQi32:
;CHECK: vceq.i32
;CHECK-NEXT: vmvn
%tmp1 = load <4 x i32>* %A
@@ -73,7 +73,7 @@ define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcltQs8:
+;CHECK-LABEL: vcltQs8:
;CHECK: vcgt.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -83,7 +83,7 @@ define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcles16:
+;CHECK-LABEL: vcles16:
;CHECK: vcge.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -93,7 +93,7 @@ define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcltu16:
+;CHECK-LABEL: vcltu16:
;CHECK: vcgt.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -103,7 +103,7 @@ define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcleQu32:
+;CHECK-LABEL: vcleQu32:
;CHECK: vcge.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index 994f05dacb84..444d0d5b5edc 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
define <8 x i8> @vld1i8(i8* %A) nounwind {
-;CHECK: vld1i8:
+;CHECK-LABEL: vld1i8:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld1.8 {d16}, [r0:64]
%tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 16)
@@ -10,7 +10,7 @@ define <8 x i8> @vld1i8(i8* %A) nounwind {
}
define <4 x i16> @vld1i16(i16* %A) nounwind {
-;CHECK: vld1i16:
+;CHECK-LABEL: vld1i16:
;CHECK: vld1.16
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
@@ -19,7 +19,7 @@ define <4 x i16> @vld1i16(i16* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i16> @vld1i16_update(i16** %ptr) nounwind {
-;CHECK: vld1i16_update:
+;CHECK-LABEL: vld1i16_update:
;CHECK: vld1.16 {d16}, [{{r[0-9]+}}]!
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
@@ -30,7 +30,7 @@ define <4 x i16> @vld1i16_update(i16** %ptr) nounwind {
}
define <2 x i32> @vld1i32(i32* %A) nounwind {
-;CHECK: vld1i32:
+;CHECK-LABEL: vld1i32:
;CHECK: vld1.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
@@ -39,7 +39,7 @@ define <2 x i32> @vld1i32(i32* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind {
-;CHECK: vld1i32_update:
+;CHECK-LABEL: vld1i32_update:
;CHECK: vld1.32 {d16}, [{{r[0-9]+}}], {{r[0-9]+}}
%A = load i32** %ptr
%tmp0 = bitcast i32* %A to i8*
@@ -50,7 +50,7 @@ define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind {
}
define <2 x float> @vld1f(float* %A) nounwind {
-;CHECK: vld1f:
+;CHECK-LABEL: vld1f:
;CHECK: vld1.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %tmp0, i32 1)
@@ -58,7 +58,7 @@ define <2 x float> @vld1f(float* %A) nounwind {
}
define <1 x i64> @vld1i64(i64* %A) nounwind {
-;CHECK: vld1i64:
+;CHECK-LABEL: vld1i64:
;CHECK: vld1.64
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %tmp0, i32 1)
@@ -66,7 +66,7 @@ define <1 x i64> @vld1i64(i64* %A) nounwind {
}
define <16 x i8> @vld1Qi8(i8* %A) nounwind {
-;CHECK: vld1Qi8:
+;CHECK-LABEL: vld1Qi8:
;Check the alignment value. Max for this instruction is 128 bits:
;CHECK: vld1.8 {d16, d17}, [r0:64]
%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
@@ -75,7 +75,7 @@ define <16 x i8> @vld1Qi8(i8* %A) nounwind {
;Check for a post-increment updating load.
define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
-;CHECK: vld1Qi8_update:
+;CHECK-LABEL: vld1Qi8_update:
;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+}}:64]!
%A = load i8** %ptr
%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
@@ -85,7 +85,7 @@ define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
}
define <8 x i16> @vld1Qi16(i16* %A) nounwind {
-;CHECK: vld1Qi16:
+;CHECK-LABEL: vld1Qi16:
;Check the alignment value. Max for this instruction is 128 bits:
;CHECK: vld1.16 {d16, d17}, [r0:128]
%tmp0 = bitcast i16* %A to i8*
@@ -94,7 +94,7 @@ define <8 x i16> @vld1Qi16(i16* %A) nounwind {
}
define <4 x i32> @vld1Qi32(i32* %A) nounwind {
-;CHECK: vld1Qi32:
+;CHECK-LABEL: vld1Qi32:
;CHECK: vld1.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %tmp0, i32 1)
@@ -102,7 +102,7 @@ define <4 x i32> @vld1Qi32(i32* %A) nounwind {
}
define <4 x float> @vld1Qf(float* %A) nounwind {
-;CHECK: vld1Qf:
+;CHECK-LABEL: vld1Qf:
;CHECK: vld1.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %tmp0, i32 1)
@@ -110,7 +110,7 @@ define <4 x float> @vld1Qf(float* %A) nounwind {
}
define <2 x i64> @vld1Qi64(i64* %A) nounwind {
-;CHECK: vld1Qi64:
+;CHECK-LABEL: vld1Qi64:
;CHECK: vld1.64
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %tmp0, i32 1)
diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll
index caa016e929d8..fddafeab91cc 100644
--- a/test/CodeGen/ARM/vld2.ll
+++ b/test/CodeGen/ARM/vld2.ll
@@ -12,7 +12,7 @@
%struct.__neon_float32x4x2_t = type { <4 x float>, <4 x float> }
define <8 x i8> @vld2i8(i8* %A) nounwind {
-;CHECK: vld2i8:
+;CHECK-LABEL: vld2i8:
;Check the alignment value. Max for this instruction is 128 bits:
;CHECK: vld2.8 {d16, d17}, [r0:64]
%tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A, i32 8)
@@ -23,7 +23,7 @@ define <8 x i8> @vld2i8(i8* %A) nounwind {
}
define <4 x i16> @vld2i16(i16* %A) nounwind {
-;CHECK: vld2i16:
+;CHECK-LABEL: vld2i16:
;Check the alignment value. Max for this instruction is 128 bits:
;CHECK: vld2.16 {d16, d17}, [r0:128]
%tmp0 = bitcast i16* %A to i8*
@@ -35,7 +35,7 @@ define <4 x i16> @vld2i16(i16* %A) nounwind {
}
define <2 x i32> @vld2i32(i32* %A) nounwind {
-;CHECK: vld2i32:
+;CHECK-LABEL: vld2i32:
;CHECK: vld2.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8* %tmp0, i32 1)
@@ -46,7 +46,7 @@ define <2 x i32> @vld2i32(i32* %A) nounwind {
}
define <2 x float> @vld2f(float* %A) nounwind {
-;CHECK: vld2f:
+;CHECK-LABEL: vld2f:
;CHECK: vld2.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 1)
@@ -58,7 +58,7 @@ define <2 x float> @vld2f(float* %A) nounwind {
;Check for a post-increment updating load.
define <2 x float> @vld2f_update(float** %ptr) nounwind {
-;CHECK: vld2f_update:
+;CHECK-LABEL: vld2f_update:
;CHECK: vld2.32 {d16, d17}, [r1]!
%A = load float** %ptr
%tmp0 = bitcast float* %A to i8*
@@ -72,7 +72,7 @@ define <2 x float> @vld2f_update(float** %ptr) nounwind {
}
define <1 x i64> @vld2i64(i64* %A) nounwind {
-;CHECK: vld2i64:
+;CHECK-LABEL: vld2i64:
;Check the alignment value. Max for this instruction is 128 bits:
;CHECK: vld1.64 {d16, d17}, [r0:128]
%tmp0 = bitcast i64* %A to i8*
@@ -84,7 +84,7 @@ define <1 x i64> @vld2i64(i64* %A) nounwind {
}
define <16 x i8> @vld2Qi8(i8* %A) nounwind {
-;CHECK: vld2Qi8:
+;CHECK-LABEL: vld2Qi8:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld2.8 {d16, d17, d18, d19}, [r0:64]
%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 8)
@@ -96,7 +96,7 @@ define <16 x i8> @vld2Qi8(i8* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
-;CHECK: vld2Qi8_update:
+;CHECK-LABEL: vld2Qi8_update:
;CHECK: vld2.8 {d16, d17, d18, d19}, [r2:128], r1
%A = load i8** %ptr
%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 16)
@@ -109,7 +109,7 @@ define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
}
define <8 x i16> @vld2Qi16(i16* %A) nounwind {
-;CHECK: vld2Qi16:
+;CHECK-LABEL: vld2Qi16:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld2.16 {d16, d17, d18, d19}, [r0:128]
%tmp0 = bitcast i16* %A to i8*
@@ -121,7 +121,7 @@ define <8 x i16> @vld2Qi16(i16* %A) nounwind {
}
define <4 x i32> @vld2Qi32(i32* %A) nounwind {
-;CHECK: vld2Qi32:
+;CHECK-LABEL: vld2Qi32:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld2.32 {d16, d17, d18, d19}, [r0:256]
%tmp0 = bitcast i32* %A to i8*
@@ -133,7 +133,7 @@ define <4 x i32> @vld2Qi32(i32* %A) nounwind {
}
define <4 x float> @vld2Qf(float* %A) nounwind {
-;CHECK: vld2Qf:
+;CHECK-LABEL: vld2Qf:
;CHECK: vld2.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8* %tmp0, i32 1)
diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll
index ad63e1f716b2..400541fb90a2 100644
--- a/test/CodeGen/ARM/vld3.ll
+++ b/test/CodeGen/ARM/vld3.ll
@@ -13,7 +13,7 @@
%struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
define <8 x i8> @vld3i8(i8* %A) nounwind {
-;CHECK: vld3i8:
+;CHECK-LABEL: vld3i8:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld3.8 {d16, d17, d18}, [r0:64]
%tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 32)
@@ -24,7 +24,7 @@ define <8 x i8> @vld3i8(i8* %A) nounwind {
}
define <4 x i16> @vld3i16(i16* %A) nounwind {
-;CHECK: vld3i16:
+;CHECK-LABEL: vld3i16:
;CHECK: vld3.16
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 1)
@@ -36,7 +36,7 @@ define <4 x i16> @vld3i16(i16* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <4 x i16> @vld3i16_update(i16** %ptr, i32 %inc) nounwind {
-;CHECK: vld3i16_update:
+;CHECK-LABEL: vld3i16_update:
;CHECK: vld3.16 {d16, d17, d18}, [{{r[0-9]+}}], {{r[0-9]+}}
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
@@ -50,7 +50,7 @@ define <4 x i16> @vld3i16_update(i16** %ptr, i32 %inc) nounwind {
}
define <2 x i32> @vld3i32(i32* %A) nounwind {
-;CHECK: vld3i32:
+;CHECK-LABEL: vld3i32:
;CHECK: vld3.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8* %tmp0, i32 1)
@@ -61,7 +61,7 @@ define <2 x i32> @vld3i32(i32* %A) nounwind {
}
define <2 x float> @vld3f(float* %A) nounwind {
-;CHECK: vld3f:
+;CHECK-LABEL: vld3f:
;CHECK: vld3.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8* %tmp0, i32 1)
@@ -72,7 +72,7 @@ define <2 x float> @vld3f(float* %A) nounwind {
}
define <1 x i64> @vld3i64(i64* %A) nounwind {
-;CHECK: vld3i64:
+;CHECK-LABEL: vld3i64:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld1.64 {d16, d17, d18}, [r0:64]
%tmp0 = bitcast i64* %A to i8*
@@ -84,7 +84,7 @@ define <1 x i64> @vld3i64(i64* %A) nounwind {
}
define <16 x i8> @vld3Qi8(i8* %A) nounwind {
-;CHECK: vld3Qi8:
+;CHECK-LABEL: vld3Qi8:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld3.8 {d16, d18, d20}, [r0:64]!
;CHECK: vld3.8 {d17, d19, d21}, [r0:64]
@@ -96,7 +96,7 @@ define <16 x i8> @vld3Qi8(i8* %A) nounwind {
}
define <8 x i16> @vld3Qi16(i16* %A) nounwind {
-;CHECK: vld3Qi16:
+;CHECK-LABEL: vld3Qi16:
;CHECK: vld3.16
;CHECK: vld3.16
%tmp0 = bitcast i16* %A to i8*
@@ -108,7 +108,7 @@ define <8 x i16> @vld3Qi16(i16* %A) nounwind {
}
define <4 x i32> @vld3Qi32(i32* %A) nounwind {
-;CHECK: vld3Qi32:
+;CHECK-LABEL: vld3Qi32:
;CHECK: vld3.32
;CHECK: vld3.32
%tmp0 = bitcast i32* %A to i8*
@@ -121,7 +121,7 @@ define <4 x i32> @vld3Qi32(i32* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i32> @vld3Qi32_update(i32** %ptr) nounwind {
-;CHECK: vld3Qi32_update:
+;CHECK-LABEL: vld3Qi32_update:
;CHECK: vld3.32 {d16, d18, d20}, [r[[R:[0-9]+]]]!
;CHECK: vld3.32 {d17, d19, d21}, [r[[R]]]!
%A = load i32** %ptr
@@ -136,7 +136,7 @@ define <4 x i32> @vld3Qi32_update(i32** %ptr) nounwind {
}
define <4 x float> @vld3Qf(float* %A) nounwind {
-;CHECK: vld3Qf:
+;CHECK-LABEL: vld3Qf:
;CHECK: vld3.32
;CHECK: vld3.32
%tmp0 = bitcast float* %A to i8*
diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll
index 9ee5fe46eea2..f7376b503a30 100644
--- a/test/CodeGen/ARM/vld4.ll
+++ b/test/CodeGen/ARM/vld4.ll
@@ -12,7 +12,7 @@
%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
define <8 x i8> @vld4i8(i8* %A) nounwind {
-;CHECK: vld4i8:
+;CHECK-LABEL: vld4i8:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld4.8 {d16, d17, d18, d19}, [r0:64]
%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 8)
@@ -24,7 +24,7 @@ define <8 x i8> @vld4i8(i8* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
-;CHECK: vld4i8_update:
+;CHECK-LABEL: vld4i8_update:
;CHECK: vld4.8 {d16, d17, d18, d19}, [r2:128], r1
%A = load i8** %ptr
%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 16)
@@ -37,7 +37,7 @@ define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
}
define <4 x i16> @vld4i16(i16* %A) nounwind {
-;CHECK: vld4i16:
+;CHECK-LABEL: vld4i16:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld4.16 {d16, d17, d18, d19}, [r0:128]
%tmp0 = bitcast i16* %A to i8*
@@ -49,7 +49,7 @@ define <4 x i16> @vld4i16(i16* %A) nounwind {
}
define <2 x i32> @vld4i32(i32* %A) nounwind {
-;CHECK: vld4i32:
+;CHECK-LABEL: vld4i32:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld4.32 {d16, d17, d18, d19}, [r0:256]
%tmp0 = bitcast i32* %A to i8*
@@ -61,7 +61,7 @@ define <2 x i32> @vld4i32(i32* %A) nounwind {
}
define <2 x float> @vld4f(float* %A) nounwind {
-;CHECK: vld4f:
+;CHECK-LABEL: vld4f:
;CHECK: vld4.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8* %tmp0, i32 1)
@@ -72,7 +72,7 @@ define <2 x float> @vld4f(float* %A) nounwind {
}
define <1 x i64> @vld4i64(i64* %A) nounwind {
-;CHECK: vld4i64:
+;CHECK-LABEL: vld4i64:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld1.64 {d16, d17, d18, d19}, [r0:256]
%tmp0 = bitcast i64* %A to i8*
@@ -84,7 +84,7 @@ define <1 x i64> @vld4i64(i64* %A) nounwind {
}
define <16 x i8> @vld4Qi8(i8* %A) nounwind {
-;CHECK: vld4Qi8:
+;CHECK-LABEL: vld4Qi8:
;Check the alignment value. Max for this instruction is 256 bits:
;CHECK: vld4.8 {d16, d18, d20, d22}, [r0:256]!
;CHECK: vld4.8 {d17, d19, d21, d23}, [r0:256]
@@ -96,7 +96,7 @@ define <16 x i8> @vld4Qi8(i8* %A) nounwind {
}
define <8 x i16> @vld4Qi16(i16* %A) nounwind {
-;CHECK: vld4Qi16:
+;CHECK-LABEL: vld4Qi16:
;Check for no alignment specifier.
;CHECK: vld4.16 {d16, d18, d20, d22}, [r0]!
;CHECK: vld4.16 {d17, d19, d21, d23}, [r0]
@@ -110,7 +110,7 @@ define <8 x i16> @vld4Qi16(i16* %A) nounwind {
;Check for a post-increment updating load.
define <8 x i16> @vld4Qi16_update(i16** %ptr) nounwind {
-;CHECK: vld4Qi16_update:
+;CHECK-LABEL: vld4Qi16_update:
;CHECK: vld4.16 {d16, d18, d20, d22}, [r1:64]!
;CHECK: vld4.16 {d17, d19, d21, d23}, [r1:64]!
%A = load i16** %ptr
@@ -125,7 +125,7 @@ define <8 x i16> @vld4Qi16_update(i16** %ptr) nounwind {
}
define <4 x i32> @vld4Qi32(i32* %A) nounwind {
-;CHECK: vld4Qi32:
+;CHECK-LABEL: vld4Qi32:
;CHECK: vld4.32
;CHECK: vld4.32
%tmp0 = bitcast i32* %A to i8*
@@ -137,7 +137,7 @@ define <4 x i32> @vld4Qi32(i32* %A) nounwind {
}
define <4 x float> @vld4Qf(float* %A) nounwind {
-;CHECK: vld4Qf:
+;CHECK-LABEL: vld4Qf:
;CHECK: vld4.32
;CHECK: vld4.32
%tmp0 = bitcast float* %A to i8*
diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll
index 7c7319c090ba..5509f3e0a0da 100644
--- a/test/CodeGen/ARM/vlddup.ll
+++ b/test/CodeGen/ARM/vlddup.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vld1dupi8(i8* %A) nounwind {
-;CHECK: vld1dupi8:
+;CHECK-LABEL: vld1dupi8:
;Check the (default) alignment value.
;CHECK: vld1.8 {d16[]}, [r0]
%tmp1 = load i8* %A, align 8
@@ -11,7 +11,7 @@ define <8 x i8> @vld1dupi8(i8* %A) nounwind {
}
define <4 x i16> @vld1dupi16(i16* %A) nounwind {
-;CHECK: vld1dupi16:
+;CHECK-LABEL: vld1dupi16:
;Check the alignment value. Max for this instruction is 16 bits:
;CHECK: vld1.16 {d16[]}, [r0:16]
%tmp1 = load i16* %A, align 8
@@ -21,7 +21,7 @@ define <4 x i16> @vld1dupi16(i16* %A) nounwind {
}
define <2 x i32> @vld1dupi32(i32* %A) nounwind {
-;CHECK: vld1dupi32:
+;CHECK-LABEL: vld1dupi32:
;Check the alignment value. Max for this instruction is 32 bits:
;CHECK: vld1.32 {d16[]}, [r0:32]
%tmp1 = load i32* %A, align 8
@@ -31,7 +31,7 @@ define <2 x i32> @vld1dupi32(i32* %A) nounwind {
}
define <2 x float> @vld1dupf(float* %A) nounwind {
-;CHECK: vld1dupf:
+;CHECK-LABEL: vld1dupf:
;CHECK: vld1.32 {d16[]}, [r0:32]
%tmp0 = load float* %A
%tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
@@ -40,7 +40,7 @@ define <2 x float> @vld1dupf(float* %A) nounwind {
}
define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
-;CHECK: vld1dupQi8:
+;CHECK-LABEL: vld1dupQi8:
;Check the (default) alignment value.
;CHECK: vld1.8 {d16[], d17[]}, [r0]
%tmp1 = load i8* %A, align 8
@@ -50,7 +50,7 @@ define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
}
define <4 x float> @vld1dupQf(float* %A) nounwind {
-;CHECK: vld1dupQf:
+;CHECK-LABEL: vld1dupQf:
;CHECK: vld1.32 {d16[], d17[]}, [r0:32]
%tmp0 = load float* %A
%tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
@@ -63,7 +63,7 @@ define <4 x float> @vld1dupQf(float* %A) nounwind {
%struct.__neon_int2x32x2_t = type { <2 x i32>, <2 x i32> }
define <8 x i8> @vld2dupi8(i8* %A) nounwind {
-;CHECK: vld2dupi8:
+;CHECK-LABEL: vld2dupi8:
;Check the (default) alignment value.
;CHECK: vld2.8 {d16[], d17[]}, [r0]
%tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
@@ -76,7 +76,7 @@ define <8 x i8> @vld2dupi8(i8* %A) nounwind {
}
define <4 x i16> @vld2dupi16(i8* %A) nounwind {
-;CHECK: vld2dupi16:
+;CHECK-LABEL: vld2dupi16:
;Check that a power-of-two alignment smaller than the total size of the memory
;being loaded is ignored.
;CHECK: vld2.16 {d16[], d17[]}, [r0]
@@ -91,7 +91,7 @@ define <4 x i16> @vld2dupi16(i8* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
-;CHECK: vld2dupi16_update:
+;CHECK-LABEL: vld2dupi16_update:
;CHECK: vld2.16 {d16[], d17[]}, [r1]!
%A = load i16** %ptr
%A2 = bitcast i16* %A to i8*
@@ -107,7 +107,7 @@ define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
}
define <2 x i32> @vld2dupi32(i8* %A) nounwind {
-;CHECK: vld2dupi32:
+;CHECK-LABEL: vld2dupi32:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld2.32 {d16[], d17[]}, [r0:64]
%tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16)
@@ -128,7 +128,7 @@ declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>,
;Check for a post-increment updating load with register increment.
define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
-;CHECK: vld3dupi8_update:
+;CHECK-LABEL: vld3dupi8_update:
;CHECK: vld3.8 {d16[], d17[], d18[]}, [r2], r1
%A = load i8** %ptr
%tmp0 = tail call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 8)
@@ -146,7 +146,7 @@ define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
}
define <4 x i16> @vld3dupi16(i8* %A) nounwind {
-;CHECK: vld3dupi16:
+;CHECK-LABEL: vld3dupi16:
;Check the (default) alignment value. VLD3 does not support alignment.
;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0]
%tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8)
@@ -169,7 +169,7 @@ declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>,
;Check for a post-increment updating load.
define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind {
-;CHECK: vld4dupi16_update:
+;CHECK-LABEL: vld4dupi16_update:
;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]!
%A = load i16** %ptr
%A2 = bitcast i16* %A to i8*
@@ -191,7 +191,7 @@ define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind {
}
define <2 x i32> @vld4dupi32(i8* %A) nounwind {
-;CHECK: vld4dupi32:
+;CHECK-LABEL: vld4dupi32:
;Check the alignment value. An 8-byte alignment is allowed here even though
;it is smaller than the total size of the memory being loaded.
;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0:64]
diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll
index f35fa92f5dc7..7a83a4c0cac6 100644
--- a/test/CodeGen/ARM/vldlane.ll
+++ b/test/CodeGen/ARM/vldlane.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=arm -mattr=+neon -regalloc=basic | FileCheck %s
define <8 x i8> @vld1lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld1lanei8:
+;CHECK-LABEL: vld1lanei8:
;Check the (default) alignment value.
;CHECK: vld1.8 {d16[3]}, [r0]
%tmp1 = load <8 x i8>* %B
@@ -12,7 +12,7 @@ define <8 x i8> @vld1lanei8(i8* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vld1lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld1lanei16:
+;CHECK-LABEL: vld1lanei16:
;Check the alignment value. Max for this instruction is 16 bits:
;CHECK: vld1.16 {d16[2]}, [r0:16]
%tmp1 = load <4 x i16>* %B
@@ -22,7 +22,7 @@ define <4 x i16> @vld1lanei16(i16* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vld1lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld1lanei32:
+;CHECK-LABEL: vld1lanei32:
;Check the alignment value. Max for this instruction is 32 bits:
;CHECK: vld1.32 {d16[1]}, [r0:32]
%tmp1 = load <2 x i32>* %B
@@ -32,7 +32,7 @@ define <2 x i32> @vld1lanei32(i32* %A, <2 x i32>* %B) nounwind {
}
define <2 x i32> @vld1lanei32a32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld1lanei32a32:
+;CHECK-LABEL: vld1lanei32a32:
;Check the alignment value. Legal values are none or :32.
;CHECK: vld1.32 {d16[1]}, [r0:32]
%tmp1 = load <2 x i32>* %B
@@ -42,7 +42,7 @@ define <2 x i32> @vld1lanei32a32(i32* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vld1lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld1lanef:
+;CHECK-LABEL: vld1lanef:
;CHECK: vld1.32 {d16[1]}, [r0:32]
%tmp1 = load <2 x float>* %B
%tmp2 = load float* %A, align 4
@@ -51,7 +51,7 @@ define <2 x float> @vld1lanef(float* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vld1laneQi8(i8* %A, <16 x i8>* %B) nounwind {
-;CHECK: vld1laneQi8:
+;CHECK-LABEL: vld1laneQi8:
;CHECK: vld1.8 {d17[1]}, [r0]
%tmp1 = load <16 x i8>* %B
%tmp2 = load i8* %A, align 8
@@ -60,7 +60,7 @@ define <16 x i8> @vld1laneQi8(i8* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vld1laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld1laneQi16:
+;CHECK-LABEL: vld1laneQi16:
;CHECK: vld1.16 {d17[1]}, [r0:16]
%tmp1 = load <8 x i16>* %B
%tmp2 = load i16* %A, align 8
@@ -69,7 +69,7 @@ define <8 x i16> @vld1laneQi16(i16* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vld1laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld1laneQi32:
+;CHECK-LABEL: vld1laneQi32:
;CHECK: vld1.32 {d17[1]}, [r0:32]
%tmp1 = load <4 x i32>* %B
%tmp2 = load i32* %A, align 8
@@ -78,7 +78,7 @@ define <4 x i32> @vld1laneQi32(i32* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vld1laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld1laneQf:
+;CHECK-LABEL: vld1laneQf:
;CHECK: vld1.32 {d16[0]}, [r0:32]
%tmp1 = load <4 x float>* %B
%tmp2 = load float* %A
@@ -96,7 +96,7 @@ define <4 x float> @vld1laneQf(float* %A, <4 x float>* %B) nounwind {
%struct.__neon_float32x4x2_t = type { <4 x float>, <4 x float> }
define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld2lanei8:
+;CHECK-LABEL: vld2lanei8:
;Check the alignment value. Max for this instruction is 16 bits:
;CHECK: vld2.8 {d16[1], d17[1]}, [r0:16]
%tmp1 = load <8 x i8>* %B
@@ -108,7 +108,7 @@ define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vld2lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld2lanei16:
+;CHECK-LABEL: vld2lanei16:
;Check the alignment value. Max for this instruction is 32 bits:
;CHECK: vld2.16 {d16[1], d17[1]}, [r0:32]
%tmp0 = bitcast i16* %A to i8*
@@ -121,7 +121,7 @@ define <4 x i16> @vld2lanei16(i16* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vld2lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld2lanei32:
+;CHECK-LABEL: vld2lanei32:
;CHECK: vld2.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <2 x i32>* %B
@@ -134,7 +134,7 @@ define <2 x i32> @vld2lanei32(i32* %A, <2 x i32>* %B) nounwind {
;Check for a post-increment updating load.
define <2 x i32> @vld2lanei32_update(i32** %ptr, <2 x i32>* %B) nounwind {
-;CHECK: vld2lanei32_update:
+;CHECK-LABEL: vld2lanei32_update:
;CHECK: vld2.32 {d16[1], d17[1]}, [{{r[0-9]+}}]!
%A = load i32** %ptr
%tmp0 = bitcast i32* %A to i8*
@@ -149,7 +149,7 @@ define <2 x i32> @vld2lanei32_update(i32** %ptr, <2 x i32>* %B) nounwind {
}
define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld2lanef:
+;CHECK-LABEL: vld2lanef:
;CHECK: vld2.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <2 x float>* %B
@@ -161,7 +161,7 @@ define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
}
define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld2laneQi16:
+;CHECK-LABEL: vld2laneQi16:
;Check the (default) alignment.
;CHECK: vld2.16 {d17[1], d19[1]}, [{{r[0-9]+}}]
%tmp0 = bitcast i16* %A to i8*
@@ -174,7 +174,7 @@ define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vld2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld2laneQi32:
+;CHECK-LABEL: vld2laneQi32:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld2.32 {d17[0], d19[0]}, [{{r[0-9]+}}:64]
%tmp0 = bitcast i32* %A to i8*
@@ -187,7 +187,7 @@ define <4 x i32> @vld2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vld2laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld2laneQf:
+;CHECK-LABEL: vld2laneQf:
;CHECK: vld2.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <4 x float>* %B
@@ -217,7 +217,7 @@ declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8*, <4 x flo
%struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld3lanei8:
+;CHECK-LABEL: vld3lanei8:
;CHECK: vld3.8
%tmp1 = load <8 x i8>* %B
%tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
@@ -230,7 +230,7 @@ define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld3lanei16:
+;CHECK-LABEL: vld3lanei16:
;Check the (default) alignment value. VLD3 does not support alignment.
;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}]
%tmp0 = bitcast i16* %A to i8*
@@ -245,7 +245,7 @@ define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vld3lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld3lanei32:
+;CHECK-LABEL: vld3lanei32:
;CHECK: vld3.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <2 x i32>* %B
@@ -259,7 +259,7 @@ define <2 x i32> @vld3lanei32(i32* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld3lanef:
+;CHECK-LABEL: vld3lanef:
;CHECK: vld3.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <2 x float>* %B
@@ -273,7 +273,7 @@ define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind {
}
define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld3laneQi16:
+;CHECK-LABEL: vld3laneQi16:
;Check the (default) alignment value. VLD3 does not support alignment.
;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}]
%tmp0 = bitcast i16* %A to i8*
@@ -289,7 +289,7 @@ define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;Check for a post-increment updating load with register increment.
define <8 x i16> @vld3laneQi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
-;CHECK: vld3laneQi16_update:
+;CHECK-LABEL: vld3laneQi16_update:
;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}], {{r[0-9]+}}
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
@@ -306,7 +306,7 @@ define <8 x i16> @vld3laneQi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounw
}
define <4 x i32> @vld3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld3laneQi32:
+;CHECK-LABEL: vld3laneQi32:
;CHECK: vld3.32
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
@@ -320,7 +320,7 @@ define <4 x i32> @vld3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vld3laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld3laneQf:
+;CHECK-LABEL: vld3laneQf:
;CHECK: vld3.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <4 x float>* %B
@@ -352,7 +352,7 @@ declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x flo
%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld4lanei8:
+;CHECK-LABEL: vld4lanei8:
;Check the alignment value. Max for this instruction is 32 bits:
;CHECK: vld4.8 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}:32]
%tmp1 = load <8 x i8>* %B
@@ -369,7 +369,7 @@ define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
;Check for a post-increment updating load.
define <8 x i8> @vld4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
-;CHECK: vld4lanei8_update:
+;CHECK-LABEL: vld4lanei8_update:
;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}:32]!
%A = load i8** %ptr
%tmp1 = load <8 x i8>* %B
@@ -387,7 +387,7 @@ define <8 x i8> @vld4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld4lanei16:
+;CHECK-LABEL: vld4lanei16:
;Check that a power-of-two alignment smaller than the total size of the memory
;being loaded is ignored.
;CHECK: vld4.16 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}]
@@ -405,7 +405,7 @@ define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld4lanei32:
+;CHECK-LABEL: vld4lanei32:
;Check the alignment value. An 8-byte alignment is allowed here even though
;it is smaller than the total size of the memory being loaded.
;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [{{r[0-9]+}}:64]
@@ -423,7 +423,7 @@ define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld4lanef:
+;CHECK-LABEL: vld4lanef:
;CHECK: vld4.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <2 x float>* %B
@@ -439,7 +439,7 @@ define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
}
define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld4laneQi16:
+;CHECK-LABEL: vld4laneQi16:
;Check the alignment value. Max for this instruction is 64 bits:
;CHECK: vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [{{r[0-9]+}}:64]
%tmp0 = bitcast i16* %A to i8*
@@ -456,7 +456,7 @@ define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld4laneQi32:
+;CHECK-LABEL: vld4laneQi32:
;Check the (default) alignment.
;CHECK: vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [{{r[0-9]+}}]
%tmp0 = bitcast i32* %A to i8*
@@ -473,7 +473,7 @@ define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vld4laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld4laneQf:
+;CHECK-LABEL: vld4laneQf:
;CHECK: vld4.32
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <4 x float>* %B
@@ -502,7 +502,7 @@ declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x flo
; we don't currently have a QQQQ_VFP2 super-regclass. (The "0" for the low
; part of %ins67 is supposed to be loaded by a VLDRS instruction in this test.)
define <8 x i16> @test_qqqq_regsequence_subreg([6 x i64] %b) nounwind {
-;CHECK: test_qqqq_regsequence_subreg
+;CHECK-LABEL: test_qqqq_regsequence_subreg:
;CHECK: vld3.16
%tmp63 = extractvalue [6 x i64] %b, 5
%tmp64 = zext i64 %tmp63 to i128
diff --git a/test/CodeGen/ARM/vldm-liveness.ll b/test/CodeGen/ARM/vldm-liveness.ll
new file mode 100644
index 000000000000..751f447077be
--- /dev/null
+++ b/test/CodeGen/ARM/vldm-liveness.ll
@@ -0,0 +1,40 @@
+; RUN: llc -mtriple thumbv7-apple-ios -verify-machineinstrs -o - %s | FileCheck %s
+
+; ARM load store optimizer was dealing with a sequence like:
+; s1 = VLDRS [r0, 1], Q0<imp-def>
+; s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def>
+; s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def>
+; s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def>
+;
+; It decided to combine the {s0, s1} loads into a single instruction in the
+; third position. However, this leaves the instruction defining s3 with a stray
+; imp-use of Q0, which is undefined.
+;
+; The verifier catches this, so this test just makes sure that appropriate
+; liveness flags are added.
+;
+; I believe the change will be tested as long as the vldmia is not the first of
+; the loads. Earlier optimisations may perturb the output over time, but
+; fiddling the indices should be sufficient to restore the test.
+
+define arm_aapcs_vfpcc <4 x float> @foo(float* %ptr) {
+; CHECK-LABEL: foo:
+; CHECK: vldr s3, [r0, #8]
+; CHECK: vldmia r0, {s0, s1}
+; CHECK: vldr s2, [r0, #16]
+ %off0 = getelementptr float* %ptr, i32 0
+ %val0 = load float* %off0
+ %off1 = getelementptr float* %ptr, i32 1
+ %val1 = load float* %off1
+ %off4 = getelementptr float* %ptr, i32 4
+ %val4 = load float* %off4
+ %off2 = getelementptr float* %ptr, i32 2
+ %val2 = load float* %off2
+
+ %vec1 = insertelement <4 x float> undef, float %val0, i32 0
+ %vec2 = insertelement <4 x float> %vec1, float %val1, i32 1
+ %vec3 = insertelement <4 x float> %vec2, float %val4, i32 2
+ %vec4 = insertelement <4 x float> %vec3, float %val2, i32 3
+
+ ret <4 x float> %vec4
+}
diff --git a/test/CodeGen/ARM/vldm-sched-a9.ll b/test/CodeGen/ARM/vldm-sched-a9.ll
new file mode 100644
index 000000000000..d0a9ac6d2b56
--- /dev/null
+++ b/test/CodeGen/ARM/vldm-sched-a9.ll
@@ -0,0 +1,71 @@
+; RUN: llc < %s -march=arm -mtriple=armv7-linux-gnueabihf -float-abi=hard -mcpu=cortex-a9 -O3 | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32-S64"
+
+; This test will generate spills/fills using vldmia instructions that access 64 bytes of memory.
+; Check that we don't crash when we generate these instructions on Cortex-A9.
+
+; CHECK: test:
+; CHECK: vstmia
+; CHECK: vldmia
+define void @test(i64* %src) #0 {
+entry:
+ %arrayidx39 = getelementptr inbounds i64* %src, i32 13
+ %vecinit285 = shufflevector <16 x i64> undef, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
+ store <16 x i64> %vecinit285, <16 x i64>* undef, align 128
+ %0 = load i64* undef, align 8
+ %vecinit379 = insertelement <16 x i64> undef, i64 %0, i32 9
+ %1 = load i64* undef, align 8
+ %vecinit419 = insertelement <16 x i64> undef, i64 %1, i32 15
+ store <16 x i64> %vecinit419, <16 x i64>* undef, align 128
+ %vecinit579 = insertelement <16 x i64> undef, i64 0, i32 4
+ %vecinit582 = shufflevector <16 x i64> %vecinit579, <16 x i64> <i64 6, i64 7, i64 8, i64 9, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit584 = insertelement <16 x i64> %vecinit582, i64 undef, i32 9
+ %vecinit586 = insertelement <16 x i64> %vecinit584, i64 0, i32 10
+ %vecinit589 = shufflevector <16 x i64> %vecinit586, <16 x i64> <i64 12, i64 13, i64 14, i64 15, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 16, i32 17, i32 18, i32 19, i32 undef>
+ %2 = load i64* undef, align 8
+ %vecinit591 = insertelement <16 x i64> %vecinit589, i64 %2, i32 15
+ store <16 x i64> %vecinit591, <16 x i64>* undef, align 128
+ %vecinit694 = shufflevector <16 x i64> undef, <16 x i64> <i64 13, i64 14, i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+ store <16 x i64> %vecinit694, <16 x i64>* undef, align 128
+ %3 = load i64* undef, align 8
+ %vecinit1331 = insertelement <16 x i64> undef, i64 %3, i32 14
+ %4 = load i64* undef, align 8
+ %vecinit1468 = insertelement <16 x i64> undef, i64 %4, i32 11
+ %vecinit1471 = shufflevector <16 x i64> %vecinit1468, <16 x i64> <i64 13, i64 14, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 undef, i32 undef>
+ %vecinit1474 = shufflevector <16 x i64> %vecinit1471, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
+ store <16 x i64> %vecinit1474, <16 x i64>* undef, align 128
+ %vecinit1552 = shufflevector <16 x i64> undef, <16 x i64> <i64 10, i64 11, i64 12, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 16, i32 17, i32 18, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit1555 = shufflevector <16 x i64> %vecinit1552, <16 x i64> <i64 13, i64 14, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 undef, i32 undef>
+ %vecinit1558 = shufflevector <16 x i64> %vecinit1555, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
+ store <16 x i64> %vecinit1558, <16 x i64>* undef, align 128
+ %vecinit1591 = shufflevector <16 x i64> undef, <16 x i64> <i64 3, i64 4, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit1594 = shufflevector <16 x i64> %vecinit1591, <16 x i64> <i64 5, i64 6, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit1597 = shufflevector <16 x i64> %vecinit1594, <16 x i64> <i64 7, i64 8, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 16, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit1599 = insertelement <16 x i64> %vecinit1597, i64 undef, i32 8
+ %vecinit1601 = insertelement <16 x i64> %vecinit1599, i64 undef, i32 9
+ %vecinit1603 = insertelement <16 x i64> %vecinit1601, i64 undef, i32 10
+ %5 = load i64* undef, align 8
+ %vecinit1605 = insertelement <16 x i64> %vecinit1603, i64 %5, i32 11
+ %vecinit1608 = shufflevector <16 x i64> %vecinit1605, <16 x i64> <i64 13, i64 14, i64 15, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 undef>
+ %6 = load i64* undef, align 8
+ %vecinit1610 = insertelement <16 x i64> %vecinit1608, i64 %6, i32 15
+ store <16 x i64> %vecinit1610, <16 x i64>* undef, align 128
+ %vecinit2226 = shufflevector <16 x i64> undef, <16 x i64> <i64 6, i64 7, i64 8, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 16, i32 17, i32 18, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %7 = load i64* undef, align 8
+ %vecinit2228 = insertelement <16 x i64> %vecinit2226, i64 %7, i32 8
+ %vecinit2230 = insertelement <16 x i64> %vecinit2228, i64 undef, i32 9
+ %vecinit2233 = shufflevector <16 x i64> %vecinit2230, <16 x i64> <i64 11, i64 12, i64 13, i64 14, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef>
+ %vecinit2236 = shufflevector <16 x i64> %vecinit2233, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
+ store <16 x i64> %vecinit2236, <16 x i64>* undef, align 128
+ %vecinit2246 = shufflevector <16 x i64> undef, <16 x i64> <i64 4, i64 5, i64 6, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 16, i32 17, i32 18, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit2249 = shufflevector <16 x i64> %vecinit2246, <16 x i64> <i64 7, i64 8, i64 9, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 16, i32 17, i32 18, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit2252 = shufflevector <16 x i64> %vecinit2249, <16 x i64> <i64 10, i64 11, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 16, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %vecinit2255 = shufflevector <16 x i64> %vecinit2252, <16 x i64> <i64 12, i64 13, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 16, i32 17, i32 undef, i32 undef, i32 undef>
+ %8 = load i64* %arrayidx39, align 8
+ %vecinit2257 = insertelement <16 x i64> %vecinit2255, i64 %8, i32 13
+ %vecinit2260 = shufflevector <16 x i64> %vecinit2257, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
+ store <16 x i64> %vecinit2260, <16 x i64>* null, align 128
+ ret void
+}
+attributes #0 = { noredzone "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/vminmax.ll b/test/CodeGen/ARM/vminmax.ll
index e3527c1a4d9b..81f45782a96f 100644
--- a/test/CodeGen/ARM/vminmax.ll
+++ b/test/CodeGen/ARM/vminmax.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmins8:
+;CHECK-LABEL: vmins8:
;CHECK: vmin.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmins16:
+;CHECK-LABEL: vmins16:
;CHECK: vmin.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmins32:
+;CHECK-LABEL: vmins32:
;CHECK: vmin.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vminu8:
+;CHECK-LABEL: vminu8:
;CHECK: vmin.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -37,7 +37,7 @@ define <8 x i8> @vminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vminu16:
+;CHECK-LABEL: vminu16:
;CHECK: vmin.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <4 x i16> @vminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vminu32:
+;CHECK-LABEL: vminu32:
;CHECK: vmin.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -55,7 +55,7 @@ define <2 x i32> @vminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vminf32:
+;CHECK-LABEL: vminf32:
;CHECK: vmin.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -64,7 +64,7 @@ define <2 x float> @vminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vminQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vminQs8:
+;CHECK-LABEL: vminQs8:
;CHECK: vmin.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -73,7 +73,7 @@ define <16 x i8> @vminQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vminQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vminQs16:
+;CHECK-LABEL: vminQs16:
;CHECK: vmin.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -82,7 +82,7 @@ define <8 x i16> @vminQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vminQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vminQs32:
+;CHECK-LABEL: vminQs32:
;CHECK: vmin.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -91,7 +91,7 @@ define <4 x i32> @vminQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vminQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vminQu8:
+;CHECK-LABEL: vminQu8:
;CHECK: vmin.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -100,7 +100,7 @@ define <16 x i8> @vminQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vminQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vminQu16:
+;CHECK-LABEL: vminQu16:
;CHECK: vmin.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -109,7 +109,7 @@ define <8 x i16> @vminQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vminQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vminQu32:
+;CHECK-LABEL: vminQu32:
;CHECK: vmin.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -118,7 +118,7 @@ define <4 x i32> @vminQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vminQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vminQf32:
+;CHECK-LABEL: vminQf32:
;CHECK: vmin.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -147,7 +147,7 @@ declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind read
declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
define <8 x i8> @vmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmaxs8:
+;CHECK-LABEL: vmaxs8:
;CHECK: vmax.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -156,7 +156,7 @@ define <8 x i8> @vmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmaxs16:
+;CHECK-LABEL: vmaxs16:
;CHECK: vmax.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -165,7 +165,7 @@ define <4 x i16> @vmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmaxs32:
+;CHECK-LABEL: vmaxs32:
;CHECK: vmax.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -174,7 +174,7 @@ define <2 x i32> @vmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmaxu8:
+;CHECK-LABEL: vmaxu8:
;CHECK: vmax.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -183,7 +183,7 @@ define <8 x i8> @vmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmaxu16:
+;CHECK-LABEL: vmaxu16:
;CHECK: vmax.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -192,7 +192,7 @@ define <4 x i16> @vmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmaxu32:
+;CHECK-LABEL: vmaxu32:
;CHECK: vmax.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -201,7 +201,7 @@ define <2 x i32> @vmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vmaxf32:
+;CHECK-LABEL: vmaxf32:
;CHECK: vmax.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -210,7 +210,7 @@ define <2 x float> @vmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <16 x i8> @vmaxQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmaxQs8:
+;CHECK-LABEL: vmaxQs8:
;CHECK: vmax.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -219,7 +219,7 @@ define <16 x i8> @vmaxQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vmaxQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vmaxQs16:
+;CHECK-LABEL: vmaxQs16:
;CHECK: vmax.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -228,7 +228,7 @@ define <8 x i16> @vmaxQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vmaxQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vmaxQs32:
+;CHECK-LABEL: vmaxQs32:
;CHECK: vmax.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -237,7 +237,7 @@ define <4 x i32> @vmaxQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <16 x i8> @vmaxQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmaxQu8:
+;CHECK-LABEL: vmaxQu8:
;CHECK: vmax.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -246,7 +246,7 @@ define <16 x i8> @vmaxQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vmaxQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vmaxQu16:
+;CHECK-LABEL: vmaxQu16:
;CHECK: vmax.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -255,7 +255,7 @@ define <8 x i16> @vmaxQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vmaxQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vmaxQu32:
+;CHECK-LABEL: vmaxQu32:
;CHECK: vmax.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -264,7 +264,7 @@ define <4 x i32> @vmaxQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vmaxQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vmaxQf32:
+;CHECK-LABEL: vmaxQf32:
;CHECK: vmax.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
diff --git a/test/CodeGen/ARM/vminmaxnm.ll b/test/CodeGen/ARM/vminmaxnm.ll
new file mode 100644
index 000000000000..f6ce64c54a39
--- /dev/null
+++ b/test/CodeGen/ARM/vminmaxnm.ll
@@ -0,0 +1,88 @@
+; RUN: llc < %s -mtriple armv8 -mattr=+neon | FileCheck %s
+; RUN: llc < %s -mtriple armv8 -mattr=+neon,+fp-armv8 -enable-unsafe-fp-math | FileCheck %s --check-prefix=CHECK-FAST
+
+define <4 x float> @vmaxnmq(<4 x float>* %A, <4 x float>* %B) nounwind {
+; CHECK: vmaxnmq
+; CHECK: vmaxnm.f32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.arm.neon.vmaxnm.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x float> @vmaxnmd(<2 x float>* %A, <2 x float>* %B) nounwind {
+; CHECK: vmaxnmd
+; CHECK: vmaxnm.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.arm.neon.vmaxnm.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define <4 x float> @vminnmq(<4 x float>* %A, <4 x float>* %B) nounwind {
+; CHECK: vminnmq
+; CHECK: vminnm.f32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
+ %tmp1 = load <4 x float>* %A
+ %tmp2 = load <4 x float>* %B
+ %tmp3 = call <4 x float> @llvm.arm.neon.vminnm.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
+ ret <4 x float> %tmp3
+}
+
+define <2 x float> @vminnmd(<2 x float>* %A, <2 x float>* %B) nounwind {
+; CHECK: vminnmd
+; CHECK: vminnm.f32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
+ %tmp1 = load <2 x float>* %A
+ %tmp2 = load <2 x float>* %B
+ %tmp3 = call <2 x float> @llvm.arm.neon.vminnm.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
+ ret <2 x float> %tmp3
+}
+
+define float @fp-armv8_vminnm_o(float %a, float %b) {
+; CHECK-FAST: fp-armv8_vminnm_o
+; CHECK-FAST-NOT: vcmp
+; CHECK-FAST: vminnm.f32
+; CHECK: fp-armv8_vminnm_o
+; CHECK-NOT: vminnm.f32
+ %cmp = fcmp olt float %a, %b
+ %cond = select i1 %cmp, float %a, float %b
+ ret float %cond
+}
+
+define float @fp-armv8_vminnm_u(float %a, float %b) {
+; CHECK-FAST: fp-armv8_vminnm_u
+; CHECK-FAST-NOT: vcmp
+; CHECK-FAST: vminnm.f32
+; CHECK: fp-armv8_vminnm_u
+; CHECK-NOT: vminnm.f32
+ %cmp = fcmp ult float %a, %b
+ %cond = select i1 %cmp, float %a, float %b
+ ret float %cond
+}
+
+define float @fp-armv8_vmaxnm_o(float %a, float %b) {
+; CHECK-FAST: fp-armv8_vmaxnm_o
+; CHECK-FAST-NOT: vcmp
+; CHECK-FAST: vmaxnm.f32
+; CHECK: fp-armv8_vmaxnm_o
+; CHECK-NOT: vmaxnm.f32
+ %cmp = fcmp ogt float %a, %b
+ %cond = select i1 %cmp, float %a, float %b
+ ret float %cond
+}
+
+define float @fp-armv8_vmaxnm_u(float %a, float %b) {
+; CHECK-FAST: fp-armv8_vmaxnm_u
+; CHECK-FAST-NOT: vcmp
+; CHECK-FAST: vmaxnm.f32
+; CHECK: fp-armv8_vmaxnm_u
+; CHECK-NOT: vmaxnm.f32
+ %cmp = fcmp ugt float %a, %b
+ %cond = select i1 %cmp, float %a, float %b
+ ret float %cond
+}
+
+
+declare <4 x float> @llvm.arm.neon.vminnm.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x float> @llvm.arm.neon.vminnm.v2f32(<2 x float>, <2 x float>) nounwind readnone
+declare <4 x float> @llvm.arm.neon.vmaxnm.v4f32(<4 x float>, <4 x float>) nounwind readnone
+declare <2 x float> @llvm.arm.neon.vmaxnm.v2f32(<2 x float>, <2 x float>) nounwind readnone
diff --git a/test/CodeGen/ARM/vmla.ll b/test/CodeGen/ARM/vmla.ll
index 9c6b210be797..caf655609c2b 100644
--- a/test/CodeGen/ARM/vmla.ll
+++ b/test/CodeGen/ARM/vmla.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vmlai8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
-;CHECK: vmlai8:
+;CHECK-LABEL: vmlai8:
;CHECK: vmla.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -12,7 +12,7 @@ define <8 x i8> @vmlai8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
}
define <4 x i16> @vmlai16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlai16:
+;CHECK-LABEL: vmlai16:
;CHECK: vmla.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -23,7 +23,7 @@ define <4 x i16> @vmlai16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i32> @vmlai32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlai32:
+;CHECK-LABEL: vmlai32:
;CHECK: vmla.i32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -34,7 +34,7 @@ define <2 x i32> @vmlai32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <2 x float> @vmlaf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
-;CHECK: vmlaf32:
+;CHECK-LABEL: vmlaf32:
;CHECK: vmla.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -45,7 +45,7 @@ define <2 x float> @vmlaf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) n
}
define <16 x i8> @vmlaQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
-;CHECK: vmlaQi8:
+;CHECK-LABEL: vmlaQi8:
;CHECK: vmla.i8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -56,7 +56,7 @@ define <16 x i8> @vmlaQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind
}
define <8 x i16> @vmlaQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vmlaQi16:
+;CHECK-LABEL: vmlaQi16:
;CHECK: vmla.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -67,7 +67,7 @@ define <8 x i16> @vmlaQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind
}
define <4 x i32> @vmlaQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vmlaQi32:
+;CHECK-LABEL: vmlaQi32:
;CHECK: vmla.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -78,7 +78,7 @@ define <4 x i32> @vmlaQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind
}
define <4 x float> @vmlaQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
-;CHECK: vmlaQf32:
+;CHECK-LABEL: vmlaQf32:
;CHECK: vmla.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -89,7 +89,7 @@ define <4 x float> @vmlaQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C)
}
define <8 x i16> @vmlals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlals8:
+;CHECK-LABEL: vmlals8:
;CHECK: vmlal.s8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -102,7 +102,7 @@ define <8 x i16> @vmlals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlals16:
+;CHECK-LABEL: vmlals16:
;CHECK: vmlal.s16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -115,7 +115,7 @@ define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlals32:
+;CHECK-LABEL: vmlals32:
;CHECK: vmlal.s32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
@@ -128,7 +128,7 @@ define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlalu8:
+;CHECK-LABEL: vmlalu8:
;CHECK: vmlal.u8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -141,7 +141,7 @@ define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlalu16:
+;CHECK-LABEL: vmlalu16:
;CHECK: vmlal.u16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -154,7 +154,7 @@ define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlalu32:
+;CHECK-LABEL: vmlalu32:
;CHECK: vmlal.u32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
diff --git a/test/CodeGen/ARM/vmls.ll b/test/CodeGen/ARM/vmls.ll
index 65e7fe41bb3a..61f3424909e3 100644
--- a/test/CodeGen/ARM/vmls.ll
+++ b/test/CodeGen/ARM/vmls.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vmlsi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
-;CHECK: vmlsi8:
+;CHECK-LABEL: vmlsi8:
;CHECK: vmls.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -12,7 +12,7 @@ define <8 x i8> @vmlsi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
}
define <4 x i16> @vmlsi16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlsi16:
+;CHECK-LABEL: vmlsi16:
;CHECK: vmls.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -23,7 +23,7 @@ define <4 x i16> @vmlsi16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i32> @vmlsi32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlsi32:
+;CHECK-LABEL: vmlsi32:
;CHECK: vmls.i32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -34,7 +34,7 @@ define <2 x i32> @vmlsi32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <2 x float> @vmlsf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
-;CHECK: vmlsf32:
+;CHECK-LABEL: vmlsf32:
;CHECK: vmls.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -45,7 +45,7 @@ define <2 x float> @vmlsf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) n
}
define <16 x i8> @vmlsQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
-;CHECK: vmlsQi8:
+;CHECK-LABEL: vmlsQi8:
;CHECK: vmls.i8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -56,7 +56,7 @@ define <16 x i8> @vmlsQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind
}
define <8 x i16> @vmlsQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vmlsQi16:
+;CHECK-LABEL: vmlsQi16:
;CHECK: vmls.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -67,7 +67,7 @@ define <8 x i16> @vmlsQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind
}
define <4 x i32> @vmlsQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vmlsQi32:
+;CHECK-LABEL: vmlsQi32:
;CHECK: vmls.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -78,7 +78,7 @@ define <4 x i32> @vmlsQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind
}
define <4 x float> @vmlsQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
-;CHECK: vmlsQf32:
+;CHECK-LABEL: vmlsQf32:
;CHECK: vmls.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -89,7 +89,7 @@ define <4 x float> @vmlsQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C)
}
define <8 x i16> @vmlsls8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlsls8:
+;CHECK-LABEL: vmlsls8:
;CHECK: vmlsl.s8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -102,7 +102,7 @@ define <8 x i16> @vmlsls8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i32> @vmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlsls16:
+;CHECK-LABEL: vmlsls16:
;CHECK: vmlsl.s16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -115,7 +115,7 @@ define <4 x i32> @vmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i64> @vmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlsls32:
+;CHECK-LABEL: vmlsls32:
;CHECK: vmlsl.s32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
@@ -128,7 +128,7 @@ define <2 x i64> @vmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind
}
define <8 x i16> @vmlslu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlslu8:
+;CHECK-LABEL: vmlslu8:
;CHECK: vmlsl.u8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -141,7 +141,7 @@ define <8 x i16> @vmlslu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
}
define <4 x i32> @vmlslu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlslu16:
+;CHECK-LABEL: vmlslu16:
;CHECK: vmlsl.u16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -154,7 +154,7 @@ define <4 x i32> @vmlslu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind
}
define <2 x i64> @vmlslu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlslu32:
+;CHECK-LABEL: vmlslu32:
;CHECK: vmlsl.u32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i32>* %B
diff --git a/test/CodeGen/ARM/vmov.ll b/test/CodeGen/ARM/vmov.ll
index 0c2387960b4e..8b63138bda81 100644
--- a/test/CodeGen/ARM/vmov.ll
+++ b/test/CodeGen/ARM/vmov.ll
@@ -1,169 +1,169 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @v_movi8() nounwind {
-;CHECK: v_movi8:
+;CHECK-LABEL: v_movi8:
;CHECK: vmov.i8 d{{.*}}, #0x8
ret <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
}
define <4 x i16> @v_movi16a() nounwind {
-;CHECK: v_movi16a:
+;CHECK-LABEL: v_movi16a:
;CHECK: vmov.i16 d{{.*}}, #0x10
ret <4 x i16> < i16 16, i16 16, i16 16, i16 16 >
}
define <4 x i16> @v_movi16b() nounwind {
-;CHECK: v_movi16b:
+;CHECK-LABEL: v_movi16b:
;CHECK: vmov.i16 d{{.*}}, #0x1000
ret <4 x i16> < i16 4096, i16 4096, i16 4096, i16 4096 >
}
define <4 x i16> @v_mvni16a() nounwind {
-;CHECK: v_mvni16a:
+;CHECK-LABEL: v_mvni16a:
;CHECK: vmvn.i16 d{{.*}}, #0x10
ret <4 x i16> < i16 65519, i16 65519, i16 65519, i16 65519 >
}
define <4 x i16> @v_mvni16b() nounwind {
-;CHECK: v_mvni16b:
+;CHECK-LABEL: v_mvni16b:
;CHECK: vmvn.i16 d{{.*}}, #0x1000
ret <4 x i16> < i16 61439, i16 61439, i16 61439, i16 61439 >
}
define <2 x i32> @v_movi32a() nounwind {
-;CHECK: v_movi32a:
+;CHECK-LABEL: v_movi32a:
;CHECK: vmov.i32 d{{.*}}, #0x20
ret <2 x i32> < i32 32, i32 32 >
}
define <2 x i32> @v_movi32b() nounwind {
-;CHECK: v_movi32b:
+;CHECK-LABEL: v_movi32b:
;CHECK: vmov.i32 d{{.*}}, #0x2000
ret <2 x i32> < i32 8192, i32 8192 >
}
define <2 x i32> @v_movi32c() nounwind {
-;CHECK: v_movi32c:
+;CHECK-LABEL: v_movi32c:
;CHECK: vmov.i32 d{{.*}}, #0x200000
ret <2 x i32> < i32 2097152, i32 2097152 >
}
define <2 x i32> @v_movi32d() nounwind {
-;CHECK: v_movi32d:
+;CHECK-LABEL: v_movi32d:
;CHECK: vmov.i32 d{{.*}}, #0x20000000
ret <2 x i32> < i32 536870912, i32 536870912 >
}
define <2 x i32> @v_movi32e() nounwind {
-;CHECK: v_movi32e:
+;CHECK-LABEL: v_movi32e:
;CHECK: vmov.i32 d{{.*}}, #0x20ff
ret <2 x i32> < i32 8447, i32 8447 >
}
define <2 x i32> @v_movi32f() nounwind {
-;CHECK: v_movi32f:
+;CHECK-LABEL: v_movi32f:
;CHECK: vmov.i32 d{{.*}}, #0x20ffff
ret <2 x i32> < i32 2162687, i32 2162687 >
}
define <2 x i32> @v_mvni32a() nounwind {
-;CHECK: v_mvni32a:
+;CHECK-LABEL: v_mvni32a:
;CHECK: vmvn.i32 d{{.*}}, #0x20
ret <2 x i32> < i32 4294967263, i32 4294967263 >
}
define <2 x i32> @v_mvni32b() nounwind {
-;CHECK: v_mvni32b:
+;CHECK-LABEL: v_mvni32b:
;CHECK: vmvn.i32 d{{.*}}, #0x2000
ret <2 x i32> < i32 4294959103, i32 4294959103 >
}
define <2 x i32> @v_mvni32c() nounwind {
-;CHECK: v_mvni32c:
+;CHECK-LABEL: v_mvni32c:
;CHECK: vmvn.i32 d{{.*}}, #0x200000
ret <2 x i32> < i32 4292870143, i32 4292870143 >
}
define <2 x i32> @v_mvni32d() nounwind {
-;CHECK: v_mvni32d:
+;CHECK-LABEL: v_mvni32d:
;CHECK: vmvn.i32 d{{.*}}, #0x20000000
ret <2 x i32> < i32 3758096383, i32 3758096383 >
}
define <2 x i32> @v_mvni32e() nounwind {
-;CHECK: v_mvni32e:
+;CHECK-LABEL: v_mvni32e:
;CHECK: vmvn.i32 d{{.*}}, #0x20ff
ret <2 x i32> < i32 4294958848, i32 4294958848 >
}
define <2 x i32> @v_mvni32f() nounwind {
-;CHECK: v_mvni32f:
+;CHECK-LABEL: v_mvni32f:
;CHECK: vmvn.i32 d{{.*}}, #0x20ffff
ret <2 x i32> < i32 4292804608, i32 4292804608 >
}
define <1 x i64> @v_movi64() nounwind {
-;CHECK: v_movi64:
+;CHECK-LABEL: v_movi64:
;CHECK: vmov.i64 d{{.*}}, #0xff0000ff0000ffff
ret <1 x i64> < i64 18374687574888349695 >
}
define <16 x i8> @v_movQi8() nounwind {
-;CHECK: v_movQi8:
+;CHECK-LABEL: v_movQi8:
;CHECK: vmov.i8 q{{.*}}, #0x8
ret <16 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
}
define <8 x i16> @v_movQi16a() nounwind {
-;CHECK: v_movQi16a:
+;CHECK-LABEL: v_movQi16a:
;CHECK: vmov.i16 q{{.*}}, #0x10
ret <8 x i16> < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
}
define <8 x i16> @v_movQi16b() nounwind {
-;CHECK: v_movQi16b:
+;CHECK-LABEL: v_movQi16b:
;CHECK: vmov.i16 q{{.*}}, #0x1000
ret <8 x i16> < i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096 >
}
define <4 x i32> @v_movQi32a() nounwind {
-;CHECK: v_movQi32a:
+;CHECK-LABEL: v_movQi32a:
;CHECK: vmov.i32 q{{.*}}, #0x20
ret <4 x i32> < i32 32, i32 32, i32 32, i32 32 >
}
define <4 x i32> @v_movQi32b() nounwind {
-;CHECK: v_movQi32b:
+;CHECK-LABEL: v_movQi32b:
;CHECK: vmov.i32 q{{.*}}, #0x2000
ret <4 x i32> < i32 8192, i32 8192, i32 8192, i32 8192 >
}
define <4 x i32> @v_movQi32c() nounwind {
-;CHECK: v_movQi32c:
+;CHECK-LABEL: v_movQi32c:
;CHECK: vmov.i32 q{{.*}}, #0x200000
ret <4 x i32> < i32 2097152, i32 2097152, i32 2097152, i32 2097152 >
}
define <4 x i32> @v_movQi32d() nounwind {
-;CHECK: v_movQi32d:
+;CHECK-LABEL: v_movQi32d:
;CHECK: vmov.i32 q{{.*}}, #0x20000000
ret <4 x i32> < i32 536870912, i32 536870912, i32 536870912, i32 536870912 >
}
define <4 x i32> @v_movQi32e() nounwind {
-;CHECK: v_movQi32e:
+;CHECK-LABEL: v_movQi32e:
;CHECK: vmov.i32 q{{.*}}, #0x20ff
ret <4 x i32> < i32 8447, i32 8447, i32 8447, i32 8447 >
}
define <4 x i32> @v_movQi32f() nounwind {
-;CHECK: v_movQi32f:
+;CHECK-LABEL: v_movQi32f:
;CHECK: vmov.i32 q{{.*}}, #0x20ffff
ret <4 x i32> < i32 2162687, i32 2162687, i32 2162687, i32 2162687 >
}
define <2 x i64> @v_movQi64() nounwind {
-;CHECK: v_movQi64:
+;CHECK-LABEL: v_movQi64:
;CHECK: vmov.i64 q{{.*}}, #0xff0000ff0000ffff
ret <2 x i64> < i64 18374687574888349695, i64 18374687574888349695 >
}
@@ -172,7 +172,7 @@ define <2 x i64> @v_movQi64() nounwind {
%struct.int8x8_t = type { <8 x i8> }
define void @vdupn128(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
entry:
-;CHECK: vdupn128:
+;CHECK-LABEL: vdupn128:
;CHECK: vmov.i8 d{{.*}}, #0x80
%0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
store <8 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>, <8 x i8>* %0, align 8
@@ -181,7 +181,7 @@ entry:
define void @vdupnneg75(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
entry:
-;CHECK: vdupnneg75:
+;CHECK-LABEL: vdupnneg75:
;CHECK: vmov.i8 d{{.*}}, #0xb5
%0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
store <8 x i8> <i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75>, <8 x i8>* %0, align 8
@@ -189,7 +189,7 @@ entry:
}
define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
-;CHECK: vmovls8:
+;CHECK-LABEL: vmovls8:
;CHECK: vmovl.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
@@ -197,7 +197,7 @@ define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
}
define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
-;CHECK: vmovls16:
+;CHECK-LABEL: vmovls16:
;CHECK: vmovl.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
@@ -205,7 +205,7 @@ define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
}
define <2 x i64> @vmovls32(<2 x i32>* %A) nounwind {
-;CHECK: vmovls32:
+;CHECK-LABEL: vmovls32:
;CHECK: vmovl.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
@@ -213,7 +213,7 @@ define <2 x i64> @vmovls32(<2 x i32>* %A) nounwind {
}
define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
-;CHECK: vmovlu8:
+;CHECK-LABEL: vmovlu8:
;CHECK: vmovl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
@@ -221,7 +221,7 @@ define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
}
define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
-;CHECK: vmovlu16:
+;CHECK-LABEL: vmovlu16:
;CHECK: vmovl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
@@ -229,7 +229,7 @@ define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
}
define <2 x i64> @vmovlu32(<2 x i32>* %A) nounwind {
-;CHECK: vmovlu32:
+;CHECK-LABEL: vmovlu32:
;CHECK: vmovl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
@@ -237,7 +237,7 @@ define <2 x i64> @vmovlu32(<2 x i32>* %A) nounwind {
}
define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
-;CHECK: vmovni16:
+;CHECK-LABEL: vmovni16:
;CHECK: vmovn.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = trunc <8 x i16> %tmp1 to <8 x i8>
@@ -245,7 +245,7 @@ define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vmovni32(<4 x i32>* %A) nounwind {
-;CHECK: vmovni32:
+;CHECK-LABEL: vmovni32:
;CHECK: vmovn.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
@@ -253,7 +253,7 @@ define <4 x i16> @vmovni32(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vmovni64(<2 x i64>* %A) nounwind {
-;CHECK: vmovni64:
+;CHECK-LABEL: vmovni64:
;CHECK: vmovn.i64
%tmp1 = load <2 x i64>* %A
%tmp2 = trunc <2 x i64> %tmp1 to <2 x i32>
@@ -261,7 +261,7 @@ define <2 x i32> @vmovni64(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqmovns16(<8 x i16>* %A) nounwind {
-;CHECK: vqmovns16:
+;CHECK-LABEL: vqmovns16:
;CHECK: vqmovn.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %tmp1)
@@ -269,7 +269,7 @@ define <8 x i8> @vqmovns16(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqmovns32(<4 x i32>* %A) nounwind {
-;CHECK: vqmovns32:
+;CHECK-LABEL: vqmovns32:
;CHECK: vqmovn.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %tmp1)
@@ -277,7 +277,7 @@ define <4 x i16> @vqmovns32(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqmovns64(<2 x i64>* %A) nounwind {
-;CHECK: vqmovns64:
+;CHECK-LABEL: vqmovns64:
;CHECK: vqmovn.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %tmp1)
@@ -285,7 +285,7 @@ define <2 x i32> @vqmovns64(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqmovnu16(<8 x i16>* %A) nounwind {
-;CHECK: vqmovnu16:
+;CHECK-LABEL: vqmovnu16:
;CHECK: vqmovn.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %tmp1)
@@ -293,7 +293,7 @@ define <8 x i8> @vqmovnu16(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqmovnu32(<4 x i32>* %A) nounwind {
-;CHECK: vqmovnu32:
+;CHECK-LABEL: vqmovnu32:
;CHECK: vqmovn.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %tmp1)
@@ -301,7 +301,7 @@ define <4 x i16> @vqmovnu32(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqmovnu64(<2 x i64>* %A) nounwind {
-;CHECK: vqmovnu64:
+;CHECK-LABEL: vqmovnu64:
;CHECK: vqmovn.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %tmp1)
@@ -309,7 +309,7 @@ define <2 x i32> @vqmovnu64(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqmovuns16(<8 x i16>* %A) nounwind {
-;CHECK: vqmovuns16:
+;CHECK-LABEL: vqmovuns16:
;CHECK: vqmovun.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %tmp1)
@@ -317,7 +317,7 @@ define <8 x i8> @vqmovuns16(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqmovuns32(<4 x i32>* %A) nounwind {
-;CHECK: vqmovuns32:
+;CHECK-LABEL: vqmovuns32:
;CHECK: vqmovun.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %tmp1)
@@ -325,7 +325,7 @@ define <4 x i16> @vqmovuns32(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqmovuns64(<2 x i64>* %A) nounwind {
-;CHECK: vqmovuns64:
+;CHECK-LABEL: vqmovuns64:
;CHECK: vqmovun.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %tmp1)
@@ -358,7 +358,7 @@ define void @noTruncStore(<4 x i32>* %a, <4 x i16>* %b) nounwind {
; rdar://10437054
define void @v_mov_v2f32(<2 x float>* nocapture %p) nounwind {
entry:
-;CHECK: v_mov_v2f32:
+;CHECK-LABEL: v_mov_v2f32:
;CHECK: vmov.f32 d{{.*}}, #-1.600000e+01
store <2 x float> <float -1.600000e+01, float -1.600000e+01>, <2 x float>* %p, align 4
ret void
@@ -366,7 +366,7 @@ entry:
define void @v_mov_v4f32(<4 x float>* nocapture %p) nounwind {
entry:
-;CHECK: v_mov_v4f32:
+;CHECK-LABEL: v_mov_v4f32:
;CHECK: vmov.f32 q{{.*}}, #3.100000e+01
store <4 x float> <float 3.100000e+01, float 3.100000e+01, float 3.100000e+01, float 3.100000e+01>, <4 x float>* %p, align 4
ret void
@@ -374,7 +374,7 @@ entry:
define void @v_mov_v4f32_undef(<4 x float> * nocapture %p) nounwind {
entry:
-;CHECK: v_mov_v4f32_undef:
+;CHECK-LABEL: v_mov_v4f32_undef:
;CHECK: vmov.f32 q{{.*}}, #1.000000e+00
%a = load <4 x float> *%p
%b = fadd <4 x float> %a, <float undef, float 1.0, float 1.0, float 1.0>
@@ -386,7 +386,7 @@ entry:
; rdar://10723651
define void @any_extend(<4 x i1> %x, <4 x i32> %y) nounwind ssp {
entry:
-;CHECK: any_extend
+;CHECK-LABEL: any_extend:
;CHECK: vmovl
%and.i186 = zext <4 x i1> %x to <4 x i32>
%add.i185 = sub <4 x i32> %and.i186, %y
diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll
index eb5ad8f0c3d0..de329acdf3c7 100644
--- a/test/CodeGen/ARM/vmul.ll
+++ b/test/CodeGen/ARM/vmul.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmuli8:
+;CHECK-LABEL: vmuli8:
;CHECK: vmul.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmuli16:
+;CHECK-LABEL: vmuli16:
;CHECK: vmul.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmuli32:
+;CHECK-LABEL: vmuli32:
;CHECK: vmul.i32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vmulf32:
+;CHECK-LABEL: vmulf32:
;CHECK: vmul.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -37,7 +37,7 @@ define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmulp8:
+;CHECK-LABEL: vmulp8:
;CHECK: vmul.p8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -46,7 +46,7 @@ define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmulQi8:
+;CHECK-LABEL: vmulQi8:
;CHECK: vmul.i8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -55,7 +55,7 @@ define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vmulQi16:
+;CHECK-LABEL: vmulQi16:
;CHECK: vmul.i16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -64,7 +64,7 @@ define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vmulQi32:
+;CHECK-LABEL: vmulQi32:
;CHECK: vmul.i32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -73,7 +73,7 @@ define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vmulQf32:
+;CHECK-LABEL: vmulQf32:
;CHECK: vmul.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -82,7 +82,7 @@ define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
}
define <16 x i8> @vmulQp8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmulQp8:
+;CHECK-LABEL: vmulQp8:
;CHECK: vmul.p8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -95,7 +95,7 @@ declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>) nounwind rea
define arm_aapcs_vfpcc <2 x float> @test_vmul_lanef32(<2 x float> %arg0_float32x2_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
entry:
-; CHECK: test_vmul_lanef32:
+; CHECK-LABEL: test_vmul_lanef32:
; CHECK: vmul.f32 d0, d0, d1[0]
%0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <2 x i32> zeroinitializer ; <<2 x float>> [#uses=1]
%1 = fmul <2 x float> %0, %arg0_float32x2_t ; <<2 x float>> [#uses=1]
@@ -104,7 +104,7 @@ entry:
define arm_aapcs_vfpcc <4 x i16> @test_vmul_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
entry:
-; CHECK: test_vmul_lanes16:
+; CHECK-LABEL: test_vmul_lanes16:
; CHECK: vmul.i16 d0, d0, d1[1]
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses$
%1 = mul <4 x i16> %0, %arg0_int16x4_t ; <<4 x i16>> [#uses=1]
@@ -113,7 +113,7 @@ entry:
define arm_aapcs_vfpcc <2 x i32> @test_vmul_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
entry:
-; CHECK: test_vmul_lanes32:
+; CHECK-LABEL: test_vmul_lanes32:
; CHECK: vmul.i32 d0, d0, d1[1]
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
%1 = mul <2 x i32> %0, %arg0_int32x2_t ; <<2 x i32>> [#uses=1]
@@ -122,7 +122,7 @@ entry:
define arm_aapcs_vfpcc <4 x float> @test_vmulQ_lanef32(<4 x float> %arg0_float32x4_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
entry:
-; CHECK: test_vmulQ_lanef32:
+; CHECK-LABEL: test_vmulQ_lanef32:
; CHECK: vmul.f32 q0, q0, d2[1]
%0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>$
%1 = fmul <4 x float> %0, %arg0_float32x4_t ; <<4 x float>> [#uses=1]
@@ -131,7 +131,7 @@ entry:
define arm_aapcs_vfpcc <8 x i16> @test_vmulQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
entry:
-; CHECK: test_vmulQ_lanes16:
+; CHECK-LABEL: test_vmulQ_lanes16:
; CHECK: vmul.i16 q0, q0, d2[1]
%0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
%1 = mul <8 x i16> %0, %arg0_int16x8_t ; <<8 x i16>> [#uses=1]
@@ -140,7 +140,7 @@ entry:
define arm_aapcs_vfpcc <4 x i32> @test_vmulQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
entry:
-; CHECK: test_vmulQ_lanes32:
+; CHECK-LABEL: test_vmulQ_lanes32:
; CHECK: vmul.i32 q0, q0, d2[1]
%0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses$
%1 = mul <4 x i32> %0, %arg0_int32x4_t ; <<4 x i32>> [#uses=1]
@@ -148,7 +148,7 @@ entry:
}
define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmulls8:
+;CHECK-LABEL: vmulls8:
;CHECK: vmull.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -159,7 +159,7 @@ define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <8 x i16> @vmulls8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmulls8_int:
+;CHECK-LABEL: vmulls8_int:
;CHECK: vmull.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -168,7 +168,7 @@ define <8 x i16> @vmulls8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmulls16:
+;CHECK-LABEL: vmulls16:
;CHECK: vmull.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -179,7 +179,7 @@ define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <4 x i32> @vmulls16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmulls16_int:
+;CHECK-LABEL: vmulls16_int:
;CHECK: vmull.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -188,7 +188,7 @@ define <4 x i32> @vmulls16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmulls32:
+;CHECK-LABEL: vmulls32:
;CHECK: vmull.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -199,7 +199,7 @@ define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x i64> @vmulls32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmulls32_int:
+;CHECK-LABEL: vmulls32_int:
;CHECK: vmull.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -208,7 +208,7 @@ define <2 x i64> @vmulls32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmullu8:
+;CHECK-LABEL: vmullu8:
;CHECK: vmull.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -219,7 +219,7 @@ define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <8 x i16> @vmullu8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmullu8_int:
+;CHECK-LABEL: vmullu8_int:
;CHECK: vmull.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -228,7 +228,7 @@ define <8 x i16> @vmullu8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmullu16:
+;CHECK-LABEL: vmullu16:
;CHECK: vmull.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -239,7 +239,7 @@ define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <4 x i32> @vmullu16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmullu16_int:
+;CHECK-LABEL: vmullu16_int:
;CHECK: vmull.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -248,7 +248,7 @@ define <4 x i32> @vmullu16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmullu32:
+;CHECK-LABEL: vmullu32:
;CHECK: vmull.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -259,7 +259,7 @@ define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x i64> @vmullu32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmullu32_int:
+;CHECK-LABEL: vmullu32_int:
;CHECK: vmull.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -268,7 +268,7 @@ define <2 x i64> @vmullu32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmullp8:
+;CHECK-LABEL: vmullp8:
;CHECK: vmull.p8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -441,7 +441,7 @@ define <2 x i64> @vmull_extvec_u32(<2 x i32> %arg) nounwind {
; rdar://9197392
define void @distribute(i16* %dst, i8* %src, i32 %mul) nounwind {
entry:
-; CHECK: distribute:
+; CHECK-LABEL: distribute:
; CHECK: vmull.u8 [[REG1:(q[0-9]+)]], d{{.*}}, [[REG2:(d[0-9]+)]]
; CHECK: vmlal.u8 [[REG1]], d{{.*}}, [[REG2]]
%0 = trunc i32 %mul to i8
@@ -515,6 +515,17 @@ entry:
ret void
}
+define <8 x i8> @no_distribute(<8 x i8> %a, <8 x i8> %b) nounwind {
+entry:
+; CHECK: no_distribute
+; CHECK: vadd.i8
+; CHECK: vmul.i8
+; CHECK-NOT: vmla.i8
+ %0 = add <8 x i8> %a, %b
+ %1 = mul <8x i8> %0, %0
+ ret <8 x i8> %1
+}
+
; If one operand has a zero-extend and the other a sign-extend, vmull
; cannot be used.
define i16 @vmullWithInconsistentExtensions(<8 x i8> %vec) {
@@ -623,3 +634,21 @@ entry:
store <4 x i32> %predphi290.v.i, <4 x i32>* undef, align 4
ret void
}
+
+define void @foo(<4 x float> * %a, <4 x float>* nocapture %dst, float* nocapture readonly %src) nounwind {
+; Look for doing a normal scalar FP load rather than an to-all-lanes load.
+; e.g., "ldr s0, [r2]" rathern than "vld1.32 {d18[], d19[]}, [r2:32]"
+; Then check that the vector multiply has folded the splat to all lanes
+; and used a vector * scalar instruction.
+; CHECK: vldr {{s[0-9]+}}, [r2]
+; CHECK: vmul.f32 q8, q8, d0[0]
+ %tmp = load float* %src, align 4
+ %tmp5 = load <4 x float>* %a, align 4
+ %tmp6 = insertelement <4 x float> undef, float %tmp, i32 0
+ %tmp7 = insertelement <4 x float> %tmp6, float %tmp, i32 1
+ %tmp8 = insertelement <4 x float> %tmp7, float %tmp, i32 2
+ %tmp9 = insertelement <4 x float> %tmp8, float %tmp, i32 3
+ %tmp10 = fmul <4 x float> %tmp9, %tmp5
+ store <4 x float> %tmp10, <4 x float>* %dst, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/vneg.ll b/test/CodeGen/ARM/vneg.ll
index 4a10732458e8..1be4f748213a 100644
--- a/test/CodeGen/ARM/vneg.ll
+++ b/test/CodeGen/ARM/vneg.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
-;CHECK: vnegs8:
+;CHECK-LABEL: vnegs8:
;CHECK: vneg.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = sub <8 x i8> zeroinitializer, %tmp1
@@ -9,7 +9,7 @@ define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind {
-;CHECK: vnegs16:
+;CHECK-LABEL: vnegs16:
;CHECK: vneg.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = sub <4 x i16> zeroinitializer, %tmp1
@@ -17,7 +17,7 @@ define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind {
-;CHECK: vnegs32:
+;CHECK-LABEL: vnegs32:
;CHECK: vneg.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = sub <2 x i32> zeroinitializer, %tmp1
@@ -25,7 +25,7 @@ define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind {
}
define <2 x float> @vnegf32(<2 x float>* %A) nounwind {
-;CHECK: vnegf32:
+;CHECK-LABEL: vnegf32:
;CHECK: vneg.f32
%tmp1 = load <2 x float>* %A
%tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
@@ -33,7 +33,7 @@ define <2 x float> @vnegf32(<2 x float>* %A) nounwind {
}
define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind {
-;CHECK: vnegQs8:
+;CHECK-LABEL: vnegQs8:
;CHECK: vneg.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = sub <16 x i8> zeroinitializer, %tmp1
@@ -41,7 +41,7 @@ define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind {
-;CHECK: vnegQs16:
+;CHECK-LABEL: vnegQs16:
;CHECK: vneg.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = sub <8 x i16> zeroinitializer, %tmp1
@@ -49,7 +49,7 @@ define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind {
-;CHECK: vnegQs32:
+;CHECK-LABEL: vnegQs32:
;CHECK: vneg.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = sub <4 x i32> zeroinitializer, %tmp1
@@ -57,7 +57,7 @@ define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind {
}
define <4 x float> @vnegQf32(<4 x float>* %A) nounwind {
-;CHECK: vnegQf32:
+;CHECK-LABEL: vnegQf32:
;CHECK: vneg.f32
%tmp1 = load <4 x float>* %A
%tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1
@@ -65,7 +65,7 @@ define <4 x float> @vnegQf32(<4 x float>* %A) nounwind {
}
define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind {
-;CHECK: vqnegs8:
+;CHECK-LABEL: vqnegs8:
;CHECK: vqneg.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1)
@@ -73,7 +73,7 @@ define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind {
-;CHECK: vqnegs16:
+;CHECK-LABEL: vqnegs16:
;CHECK: vqneg.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1)
@@ -81,7 +81,7 @@ define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind {
-;CHECK: vqnegs32:
+;CHECK-LABEL: vqnegs32:
;CHECK: vqneg.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1)
@@ -89,7 +89,7 @@ define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind {
}
define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind {
-;CHECK: vqnegQs8:
+;CHECK-LABEL: vqnegQs8:
;CHECK: vqneg.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1)
@@ -97,7 +97,7 @@ define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind {
-;CHECK: vqnegQs16:
+;CHECK-LABEL: vqnegQs16:
;CHECK: vqneg.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1)
@@ -105,7 +105,7 @@ define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vqnegQs32(<4 x i32>* %A) nounwind {
-;CHECK: vqnegQs32:
+;CHECK-LABEL: vqnegQs32:
;CHECK: vqneg.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1)
diff --git a/test/CodeGen/ARM/vpadal.ll b/test/CodeGen/ARM/vpadal.ll
index 7296e936cd73..a616a8d270a7 100644
--- a/test/CodeGen/ARM/vpadal.ll
+++ b/test/CodeGen/ARM/vpadal.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <4 x i16> @vpadals8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpadals8:
+;CHECK-LABEL: vpadals8:
;CHECK: vpadal.s8
%tmp1 = load <4 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <4 x i16> @vpadals8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
}
define <2 x i32> @vpadals16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpadals16:
+;CHECK-LABEL: vpadals16:
;CHECK: vpadal.s16
%tmp1 = load <2 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <2 x i32> @vpadals16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
}
define <1 x i64> @vpadals32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpadals32:
+;CHECK-LABEL: vpadals32:
;CHECK: vpadal.s32
%tmp1 = load <1 x i64>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <1 x i64> @vpadals32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
}
define <4 x i16> @vpadalu8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpadalu8:
+;CHECK-LABEL: vpadalu8:
;CHECK: vpadal.u8
%tmp1 = load <4 x i16>* %A
%tmp2 = load <8 x i8>* %B
@@ -37,7 +37,7 @@ define <4 x i16> @vpadalu8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
}
define <2 x i32> @vpadalu16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpadalu16:
+;CHECK-LABEL: vpadalu16:
;CHECK: vpadal.u16
%tmp1 = load <2 x i32>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <2 x i32> @vpadalu16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
}
define <1 x i64> @vpadalu32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpadalu32:
+;CHECK-LABEL: vpadalu32:
;CHECK: vpadal.u32
%tmp1 = load <1 x i64>* %A
%tmp2 = load <2 x i32>* %B
@@ -55,7 +55,7 @@ define <1 x i64> @vpadalu32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vpadalQs8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vpadalQs8:
+;CHECK-LABEL: vpadalQs8:
;CHECK: vpadal.s8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <16 x i8>* %B
@@ -64,7 +64,7 @@ define <8 x i16> @vpadalQs8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
}
define <4 x i32> @vpadalQs16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vpadalQs16:
+;CHECK-LABEL: vpadalQs16:
;CHECK: vpadal.s16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <8 x i16>* %B
@@ -73,7 +73,7 @@ define <4 x i32> @vpadalQs16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
}
define <2 x i64> @vpadalQs32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vpadalQs32:
+;CHECK-LABEL: vpadalQs32:
;CHECK: vpadal.s32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <4 x i32>* %B
@@ -82,7 +82,7 @@ define <2 x i64> @vpadalQs32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
}
define <8 x i16> @vpadalQu8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vpadalQu8:
+;CHECK-LABEL: vpadalQu8:
;CHECK: vpadal.u8
%tmp1 = load <8 x i16>* %A
%tmp2 = load <16 x i8>* %B
@@ -91,7 +91,7 @@ define <8 x i16> @vpadalQu8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
}
define <4 x i32> @vpadalQu16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vpadalQu16:
+;CHECK-LABEL: vpadalQu16:
;CHECK: vpadal.u16
%tmp1 = load <4 x i32>* %A
%tmp2 = load <8 x i16>* %B
@@ -100,7 +100,7 @@ define <4 x i32> @vpadalQu16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
}
define <2 x i64> @vpadalQu32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vpadalQu32:
+;CHECK-LABEL: vpadalQu32:
;CHECK: vpadal.u32
%tmp1 = load <2 x i64>* %A
%tmp2 = load <4 x i32>* %B
diff --git a/test/CodeGen/ARM/vpadd.ll b/test/CodeGen/ARM/vpadd.ll
index 1ba68f552385..f84721f996cd 100644
--- a/test/CodeGen/ARM/vpadd.ll
+++ b/test/CodeGen/ARM/vpadd.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpaddi8:
+;CHECK-LABEL: vpaddi8:
;CHECK: vpadd.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpaddi16:
+;CHECK-LABEL: vpaddi16:
;CHECK: vpadd.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpaddi32:
+;CHECK-LABEL: vpaddi32:
;CHECK: vpadd.i32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vpaddf32:
+;CHECK-LABEL: vpaddf32:
;CHECK: vpadd.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -43,7 +43,7 @@ declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind read
declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
-;CHECK: vpaddls8:
+;CHECK-LABEL: vpaddls8:
;CHECK: vpaddl.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
@@ -51,7 +51,7 @@ define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
}
define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
-;CHECK: vpaddls16:
+;CHECK-LABEL: vpaddls16:
;CHECK: vpaddl.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
@@ -59,7 +59,7 @@ define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
}
define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
-;CHECK: vpaddls32:
+;CHECK-LABEL: vpaddls32:
;CHECK: vpaddl.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1)
@@ -67,7 +67,7 @@ define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
}
define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
-;CHECK: vpaddlu8:
+;CHECK-LABEL: vpaddlu8:
;CHECK: vpaddl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
@@ -75,7 +75,7 @@ define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
}
define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
-;CHECK: vpaddlu16:
+;CHECK-LABEL: vpaddlu16:
;CHECK: vpaddl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
@@ -83,7 +83,7 @@ define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
}
define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
-;CHECK: vpaddlu32:
+;CHECK-LABEL: vpaddlu32:
;CHECK: vpaddl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1)
@@ -91,7 +91,7 @@ define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
}
define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
-;CHECK: vpaddlQs8:
+;CHECK-LABEL: vpaddlQs8:
;CHECK: vpaddl.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1)
@@ -99,7 +99,7 @@ define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
}
define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
-;CHECK: vpaddlQs16:
+;CHECK-LABEL: vpaddlQs16:
;CHECK: vpaddl.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1)
@@ -107,7 +107,7 @@ define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
}
define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
-;CHECK: vpaddlQs32:
+;CHECK-LABEL: vpaddlQs32:
;CHECK: vpaddl.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1)
@@ -115,7 +115,7 @@ define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
}
define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
-;CHECK: vpaddlQu8:
+;CHECK-LABEL: vpaddlQu8:
;CHECK: vpaddl.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1)
@@ -123,7 +123,7 @@ define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
}
define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
-;CHECK: vpaddlQu16:
+;CHECK-LABEL: vpaddlQu16:
;CHECK: vpaddl.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1)
@@ -131,7 +131,7 @@ define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
}
define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
-;CHECK: vpaddlQu32:
+;CHECK-LABEL: vpaddlQu32:
;CHECK: vpaddl.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1)
diff --git a/test/CodeGen/ARM/vpminmax.ll b/test/CodeGen/ARM/vpminmax.ll
index b75bcc99f6b6..c68b3193c19a 100644
--- a/test/CodeGen/ARM/vpminmax.ll
+++ b/test/CodeGen/ARM/vpminmax.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vpmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpmins8:
+;CHECK-LABEL: vpmins8:
;CHECK: vpmin.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vpmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vpmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpmins16:
+;CHECK-LABEL: vpmins16:
;CHECK: vpmin.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vpmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vpmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpmins32:
+;CHECK-LABEL: vpmins32:
;CHECK: vpmin.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vpmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vpminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpminu8:
+;CHECK-LABEL: vpminu8:
;CHECK: vpmin.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -37,7 +37,7 @@ define <8 x i8> @vpminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vpminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpminu16:
+;CHECK-LABEL: vpminu16:
;CHECK: vpmin.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -46,7 +46,7 @@ define <4 x i16> @vpminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vpminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpminu32:
+;CHECK-LABEL: vpminu32:
;CHECK: vpmin.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -55,7 +55,7 @@ define <2 x i32> @vpminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vpminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vpminf32:
+;CHECK-LABEL: vpminf32:
;CHECK: vpmin.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -74,7 +74,7 @@ declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind rea
declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
define <8 x i8> @vpmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpmaxs8:
+;CHECK-LABEL: vpmaxs8:
;CHECK: vpmax.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -83,7 +83,7 @@ define <8 x i8> @vpmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vpmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpmaxs16:
+;CHECK-LABEL: vpmaxs16:
;CHECK: vpmax.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -92,7 +92,7 @@ define <4 x i16> @vpmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vpmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpmaxs32:
+;CHECK-LABEL: vpmaxs32:
;CHECK: vpmax.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -101,7 +101,7 @@ define <2 x i32> @vpmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i8> @vpmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpmaxu8:
+;CHECK-LABEL: vpmaxu8:
;CHECK: vpmax.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -110,7 +110,7 @@ define <8 x i8> @vpmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vpmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpmaxu16:
+;CHECK-LABEL: vpmaxu16:
;CHECK: vpmax.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -119,7 +119,7 @@ define <4 x i16> @vpmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vpmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpmaxu32:
+;CHECK-LABEL: vpmaxu32:
;CHECK: vpmax.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -128,7 +128,7 @@ define <2 x i32> @vpmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <2 x float> @vpmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vpmaxf32:
+;CHECK-LABEL: vpmaxf32:
;CHECK: vpmax.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
diff --git a/test/CodeGen/ARM/vqadd.ll b/test/CodeGen/ARM/vqadd.ll
index a1669b60ab56..784076685462 100644
--- a/test/CodeGen/ARM/vqadd.ll
+++ b/test/CodeGen/ARM/vqadd.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vqadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqadds8:
+;CHECK-LABEL: vqadds8:
;CHECK: vqadd.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vqadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqadds16:
+;CHECK-LABEL: vqadds16:
;CHECK: vqadd.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vqadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqadds32:
+;CHECK-LABEL: vqadds32:
;CHECK: vqadd.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vqadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqadds64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqadds64:
+;CHECK-LABEL: vqadds64:
;CHECK: vqadd.s64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -37,7 +37,7 @@ define <1 x i64> @vqadds64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <8 x i8> @vqaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqaddu8:
+;CHECK-LABEL: vqaddu8:
;CHECK: vqadd.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -46,7 +46,7 @@ define <8 x i8> @vqaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqaddu16:
+;CHECK-LABEL: vqaddu16:
;CHECK: vqadd.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -55,7 +55,7 @@ define <4 x i16> @vqaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqaddu32:
+;CHECK-LABEL: vqaddu32:
;CHECK: vqadd.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -64,7 +64,7 @@ define <2 x i32> @vqaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqaddu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqaddu64:
+;CHECK-LABEL: vqaddu64:
;CHECK: vqadd.u64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -73,7 +73,7 @@ define <1 x i64> @vqaddu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @vqaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqaddQs8:
+;CHECK-LABEL: vqaddQs8:
;CHECK: vqadd.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -82,7 +82,7 @@ define <16 x i8> @vqaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqaddQs16:
+;CHECK-LABEL: vqaddQs16:
;CHECK: vqadd.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -91,7 +91,7 @@ define <8 x i16> @vqaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqaddQs32:
+;CHECK-LABEL: vqaddQs32:
;CHECK: vqadd.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -100,7 +100,7 @@ define <4 x i32> @vqaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqaddQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqaddQs64:
+;CHECK-LABEL: vqaddQs64:
;CHECK: vqadd.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -109,7 +109,7 @@ define <2 x i64> @vqaddQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <16 x i8> @vqaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqaddQu8:
+;CHECK-LABEL: vqaddQu8:
;CHECK: vqadd.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -118,7 +118,7 @@ define <16 x i8> @vqaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqaddQu16:
+;CHECK-LABEL: vqaddQu16:
;CHECK: vqadd.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -127,7 +127,7 @@ define <8 x i16> @vqaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqaddQu32:
+;CHECK-LABEL: vqaddQu32:
;CHECK: vqadd.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -136,7 +136,7 @@ define <4 x i32> @vqaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqaddQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqaddQu64:
+;CHECK-LABEL: vqaddQu64:
;CHECK: vqadd.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
diff --git a/test/CodeGen/ARM/vqdmul.ll b/test/CodeGen/ARM/vqdmul.ll
index 08e7d2b2c0d4..d298167d3a91 100644
--- a/test/CodeGen/ARM/vqdmul.ll
+++ b/test/CodeGen/ARM/vqdmul.ll
@@ -3,7 +3,7 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-
target triple = "thumbv7-elf"
define <4 x i16> @vqdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqdmulhs16:
+;CHECK-LABEL: vqdmulhs16:
;CHECK: vqdmulh.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -12,7 +12,7 @@ define <4 x i16> @vqdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqdmulhs32:
+;CHECK-LABEL: vqdmulhs32:
;CHECK: vqdmulh.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -21,7 +21,7 @@ define <2 x i32> @vqdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vqdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqdmulhQs16:
+;CHECK-LABEL: vqdmulhQs16:
;CHECK: vqdmulh.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -30,7 +30,7 @@ define <8 x i16> @vqdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqdmulhQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqdmulhQs32:
+;CHECK-LABEL: vqdmulhQs32:
;CHECK: vqdmulh.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -81,7 +81,7 @@ declare <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16>, <8 x i16>) nounwind re
declare <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i16> @vqrdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqrdmulhs16:
+;CHECK-LABEL: vqrdmulhs16:
;CHECK: vqrdmulh.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -90,7 +90,7 @@ define <4 x i16> @vqrdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqrdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqrdmulhs32:
+;CHECK-LABEL: vqrdmulhs32:
;CHECK: vqrdmulh.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -99,7 +99,7 @@ define <2 x i32> @vqrdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <8 x i16> @vqrdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqrdmulhQs16:
+;CHECK-LABEL: vqrdmulhQs16:
;CHECK: vqrdmulh.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -108,7 +108,7 @@ define <8 x i16> @vqrdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqrdmulhQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqrdmulhQs32:
+;CHECK-LABEL: vqrdmulhQs32:
;CHECK: vqrdmulh.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -159,7 +159,7 @@ declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>) nounwind r
declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x i32> @vqdmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqdmulls16:
+;CHECK-LABEL: vqdmulls16:
;CHECK: vqdmull.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -168,7 +168,7 @@ define <4 x i32> @vqdmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i64> @vqdmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqdmulls32:
+;CHECK-LABEL: vqdmulls32:
;CHECK: vqdmull.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -197,84 +197,92 @@ entry:
declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-define <4 x i32> @vqdmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vqdmlals16:
+define <4 x i32> @vqdmlals16_natural(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+;CHECK-LABEL: vqdmlals16_natural:
;CHECK: vqdmlal.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i16>* %C
+ %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp2, <4 x i16> %tmp3)
+ %tmp5 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp4)
+ ret <4 x i32> %tmp5
}
-define <2 x i64> @vqdmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vqdmlals32:
+define <2 x i64> @vqdmlals32_natural(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+;CHECK-LABEL: vqdmlals32_natural:
;CHECK: vqdmlal.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i32>* %C
+ %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
+ %tmp5 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
+ ret <2 x i64> %tmp5
}
-define arm_aapcs_vfpcc <4 x i32> @test_vqdmlal_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmlal_lanes16_natural(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
entry:
-; CHECK: test_vqdmlal_lanes16
+; CHECK-LABEL: test_vqdmlal_lanes16_natural:
; CHECK: vqdmlal.s16 q0, d2, d3[1]
%0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
+ %1 = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %arg1_int16x4_t, <4 x i16> %0)
+ %2 = tail call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i32> %1)
+ ret <4 x i32> %2
}
-define arm_aapcs_vfpcc <2 x i64> @test_vqdmlal_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
+define arm_aapcs_vfpcc <2 x i64> @test_vqdmlal_lanes32_natural(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
entry:
-; CHECK: test_vqdmlal_lanes32
+; CHECK-LABEL: test_vqdmlal_lanes32_natural:
; CHECK: vqdmlal.s32 q0, d2, d3[1]
%0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
+ %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg1_int32x2_t, <2 x i32> %0)
+ %2 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i64> %1)
+ ret <2 x i64> %2
}
-declare <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-define <4 x i32> @vqdmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vqdmlsls16:
+define <4 x i32> @vqdmlsls16_natural(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
+;CHECK-LABEL: vqdmlsls16_natural:
;CHECK: vqdmlsl.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = load <4 x i16>* %C
+ %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp2, <4 x i16> %tmp3)
+ %tmp5 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp4)
+ ret <4 x i32> %tmp5
}
-define <2 x i64> @vqdmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vqdmlsls32:
+define <2 x i64> @vqdmlsls32_natural(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
+;CHECK-LABEL: vqdmlsls32_natural:
;CHECK: vqdmlsl.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = load <2 x i32>* %C
+ %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp2, <2 x i32> %tmp3)
+ %tmp5 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp4)
+ ret <2 x i64> %tmp5
}
-define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsl_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
+define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsl_lanes16_natural(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
entry:
-; CHECK: test_vqdmlsl_lanes16
+; CHECK-LABEL: test_vqdmlsl_lanes16_natural:
; CHECK: vqdmlsl.s16 q0, d2, d3[1]
%0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
+ %1 = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %arg1_int16x4_t, <4 x i16> %0)
+ %2 = tail call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i32> %1)
+ ret <4 x i32> %2
}
-define arm_aapcs_vfpcc <2 x i64> @test_vqdmlsl_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
+define arm_aapcs_vfpcc <2 x i64> @test_vqdmlsl_lanes32_natural(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
entry:
-; CHECK: test_vqdmlsl_lanes32
+; CHECK-LABEL: test_vqdmlsl_lanes32_natural:
; CHECK: vqdmlsl.s32 q0, d2, d3[1]
%0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
+ %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg1_int32x2_t, <2 x i32> %0)
+ %2 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i64> %1)
+ ret <2 x i64> %2
}
-declare <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/test/CodeGen/ARM/vqshl.ll b/test/CodeGen/ARM/vqshl.ll
index e4d29a337cf0..b5cd71613d4a 100644
--- a/test/CodeGen/ARM/vqshl.ll
+++ b/test/CodeGen/ARM/vqshl.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vqshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqshls8:
+;CHECK-LABEL: vqshls8:
;CHECK: vqshl.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vqshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqshls16:
+;CHECK-LABEL: vqshls16:
;CHECK: vqshl.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vqshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqshls32:
+;CHECK-LABEL: vqshls32:
;CHECK: vqshl.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vqshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqshls64:
+;CHECK-LABEL: vqshls64:
;CHECK: vqshl.s64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -37,7 +37,7 @@ define <1 x i64> @vqshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <8 x i8> @vqshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqshlu8:
+;CHECK-LABEL: vqshlu8:
;CHECK: vqshl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -46,7 +46,7 @@ define <8 x i8> @vqshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqshlu16:
+;CHECK-LABEL: vqshlu16:
;CHECK: vqshl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -55,7 +55,7 @@ define <4 x i16> @vqshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqshlu32:
+;CHECK-LABEL: vqshlu32:
;CHECK: vqshl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -64,7 +64,7 @@ define <2 x i32> @vqshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqshlu64:
+;CHECK-LABEL: vqshlu64:
;CHECK: vqshl.u64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -73,7 +73,7 @@ define <1 x i64> @vqshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @vqshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqshlQs8:
+;CHECK-LABEL: vqshlQs8:
;CHECK: vqshl.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -82,7 +82,7 @@ define <16 x i8> @vqshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqshlQs16:
+;CHECK-LABEL: vqshlQs16:
;CHECK: vqshl.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -91,7 +91,7 @@ define <8 x i16> @vqshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqshlQs32:
+;CHECK-LABEL: vqshlQs32:
;CHECK: vqshl.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -100,7 +100,7 @@ define <4 x i32> @vqshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqshlQs64:
+;CHECK-LABEL: vqshlQs64:
;CHECK: vqshl.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -109,7 +109,7 @@ define <2 x i64> @vqshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <16 x i8> @vqshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqshlQu8:
+;CHECK-LABEL: vqshlQu8:
;CHECK: vqshl.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -118,7 +118,7 @@ define <16 x i8> @vqshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqshlQu16:
+;CHECK-LABEL: vqshlQu16:
;CHECK: vqshl.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -127,7 +127,7 @@ define <8 x i16> @vqshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqshlQu32:
+;CHECK-LABEL: vqshlQu32:
;CHECK: vqshl.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -136,7 +136,7 @@ define <4 x i32> @vqshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqshlQu64:
+;CHECK-LABEL: vqshlQu64:
;CHECK: vqshl.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -145,7 +145,7 @@ define <2 x i64> @vqshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <8 x i8> @vqshls_n8(<8 x i8>* %A) nounwind {
-;CHECK: vqshls_n8:
+;CHECK-LABEL: vqshls_n8:
;CHECK: vqshl.s8{{.*#7}}
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
@@ -153,7 +153,7 @@ define <8 x i8> @vqshls_n8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vqshls_n16(<4 x i16>* %A) nounwind {
-;CHECK: vqshls_n16:
+;CHECK-LABEL: vqshls_n16:
;CHECK: vqshl.s16{{.*#15}}
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
@@ -161,7 +161,7 @@ define <4 x i16> @vqshls_n16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vqshls_n32(<2 x i32>* %A) nounwind {
-;CHECK: vqshls_n32:
+;CHECK-LABEL: vqshls_n32:
;CHECK: vqshl.s32{{.*#31}}
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
@@ -169,7 +169,7 @@ define <2 x i32> @vqshls_n32(<2 x i32>* %A) nounwind {
}
define <1 x i64> @vqshls_n64(<1 x i64>* %A) nounwind {
-;CHECK: vqshls_n64:
+;CHECK-LABEL: vqshls_n64:
;CHECK: vqshl.s64{{.*#63}}
%tmp1 = load <1 x i64>* %A
%tmp2 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
@@ -177,7 +177,7 @@ define <1 x i64> @vqshls_n64(<1 x i64>* %A) nounwind {
}
define <8 x i8> @vqshlu_n8(<8 x i8>* %A) nounwind {
-;CHECK: vqshlu_n8:
+;CHECK-LABEL: vqshlu_n8:
;CHECK: vqshl.u8{{.*#7}}
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
@@ -185,7 +185,7 @@ define <8 x i8> @vqshlu_n8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vqshlu_n16(<4 x i16>* %A) nounwind {
-;CHECK: vqshlu_n16:
+;CHECK-LABEL: vqshlu_n16:
;CHECK: vqshl.u16{{.*#15}}
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
@@ -193,7 +193,7 @@ define <4 x i16> @vqshlu_n16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vqshlu_n32(<2 x i32>* %A) nounwind {
-;CHECK: vqshlu_n32:
+;CHECK-LABEL: vqshlu_n32:
;CHECK: vqshl.u32{{.*#31}}
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
@@ -201,7 +201,7 @@ define <2 x i32> @vqshlu_n32(<2 x i32>* %A) nounwind {
}
define <1 x i64> @vqshlu_n64(<1 x i64>* %A) nounwind {
-;CHECK: vqshlu_n64:
+;CHECK-LABEL: vqshlu_n64:
;CHECK: vqshl.u64{{.*#63}}
%tmp1 = load <1 x i64>* %A
%tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
@@ -209,7 +209,7 @@ define <1 x i64> @vqshlu_n64(<1 x i64>* %A) nounwind {
}
define <8 x i8> @vqshlsu_n8(<8 x i8>* %A) nounwind {
-;CHECK: vqshlsu_n8:
+;CHECK-LABEL: vqshlsu_n8:
;CHECK: vqshlu.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftsu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
@@ -217,7 +217,7 @@ define <8 x i8> @vqshlsu_n8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vqshlsu_n16(<4 x i16>* %A) nounwind {
-;CHECK: vqshlsu_n16:
+;CHECK-LABEL: vqshlsu_n16:
;CHECK: vqshlu.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftsu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
@@ -225,7 +225,7 @@ define <4 x i16> @vqshlsu_n16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @vqshlsu_n32(<2 x i32>* %A) nounwind {
-;CHECK: vqshlsu_n32:
+;CHECK-LABEL: vqshlsu_n32:
;CHECK: vqshlu.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftsu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
@@ -233,7 +233,7 @@ define <2 x i32> @vqshlsu_n32(<2 x i32>* %A) nounwind {
}
define <1 x i64> @vqshlsu_n64(<1 x i64>* %A) nounwind {
-;CHECK: vqshlsu_n64:
+;CHECK-LABEL: vqshlsu_n64:
;CHECK: vqshlu.s64
%tmp1 = load <1 x i64>* %A
%tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
@@ -241,7 +241,7 @@ define <1 x i64> @vqshlsu_n64(<1 x i64>* %A) nounwind {
}
define <16 x i8> @vqshlQs_n8(<16 x i8>* %A) nounwind {
-;CHECK: vqshlQs_n8:
+;CHECK-LABEL: vqshlQs_n8:
;CHECK: vqshl.s8{{.*#7}}
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
@@ -249,7 +249,7 @@ define <16 x i8> @vqshlQs_n8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vqshlQs_n16(<8 x i16>* %A) nounwind {
-;CHECK: vqshlQs_n16:
+;CHECK-LABEL: vqshlQs_n16:
;CHECK: vqshl.s16{{.*#15}}
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
@@ -257,7 +257,7 @@ define <8 x i16> @vqshlQs_n16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vqshlQs_n32(<4 x i32>* %A) nounwind {
-;CHECK: vqshlQs_n32:
+;CHECK-LABEL: vqshlQs_n32:
;CHECK: vqshl.s32{{.*#31}}
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
@@ -265,7 +265,7 @@ define <4 x i32> @vqshlQs_n32(<4 x i32>* %A) nounwind {
}
define <2 x i64> @vqshlQs_n64(<2 x i64>* %A) nounwind {
-;CHECK: vqshlQs_n64:
+;CHECK-LABEL: vqshlQs_n64:
;CHECK: vqshl.s64{{.*#63}}
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
@@ -273,7 +273,7 @@ define <2 x i64> @vqshlQs_n64(<2 x i64>* %A) nounwind {
}
define <16 x i8> @vqshlQu_n8(<16 x i8>* %A) nounwind {
-;CHECK: vqshlQu_n8:
+;CHECK-LABEL: vqshlQu_n8:
;CHECK: vqshl.u8{{.*#7}}
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
@@ -281,7 +281,7 @@ define <16 x i8> @vqshlQu_n8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vqshlQu_n16(<8 x i16>* %A) nounwind {
-;CHECK: vqshlQu_n16:
+;CHECK-LABEL: vqshlQu_n16:
;CHECK: vqshl.u16{{.*#15}}
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
@@ -289,7 +289,7 @@ define <8 x i16> @vqshlQu_n16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vqshlQu_n32(<4 x i32>* %A) nounwind {
-;CHECK: vqshlQu_n32:
+;CHECK-LABEL: vqshlQu_n32:
;CHECK: vqshl.u32{{.*#31}}
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
@@ -297,7 +297,7 @@ define <4 x i32> @vqshlQu_n32(<4 x i32>* %A) nounwind {
}
define <2 x i64> @vqshlQu_n64(<2 x i64>* %A) nounwind {
-;CHECK: vqshlQu_n64:
+;CHECK-LABEL: vqshlQu_n64:
;CHECK: vqshl.u64{{.*#63}}
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
@@ -305,7 +305,7 @@ define <2 x i64> @vqshlQu_n64(<2 x i64>* %A) nounwind {
}
define <16 x i8> @vqshlQsu_n8(<16 x i8>* %A) nounwind {
-;CHECK: vqshlQsu_n8:
+;CHECK-LABEL: vqshlQsu_n8:
;CHECK: vqshlu.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
@@ -313,7 +313,7 @@ define <16 x i8> @vqshlQsu_n8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @vqshlQsu_n16(<8 x i16>* %A) nounwind {
-;CHECK: vqshlQsu_n16:
+;CHECK-LABEL: vqshlQsu_n16:
;CHECK: vqshlu.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftsu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
@@ -321,7 +321,7 @@ define <8 x i16> @vqshlQsu_n16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @vqshlQsu_n32(<4 x i32>* %A) nounwind {
-;CHECK: vqshlQsu_n32:
+;CHECK-LABEL: vqshlQsu_n32:
;CHECK: vqshlu.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
@@ -329,7 +329,7 @@ define <4 x i32> @vqshlQsu_n32(<4 x i32>* %A) nounwind {
}
define <2 x i64> @vqshlQsu_n64(<2 x i64>* %A) nounwind {
-;CHECK: vqshlQsu_n64:
+;CHECK-LABEL: vqshlQsu_n64:
;CHECK: vqshlu.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
@@ -367,7 +367,7 @@ declare <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32>, <4 x i32>) nounwind
declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i8> @vqrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqrshls8:
+;CHECK-LABEL: vqrshls8:
;CHECK: vqrshl.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -376,7 +376,7 @@ define <8 x i8> @vqrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqrshls16:
+;CHECK-LABEL: vqrshls16:
;CHECK: vqrshl.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -385,7 +385,7 @@ define <4 x i16> @vqrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqrshls32:
+;CHECK-LABEL: vqrshls32:
;CHECK: vqrshl.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -394,7 +394,7 @@ define <2 x i32> @vqrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqrshls64:
+;CHECK-LABEL: vqrshls64:
;CHECK: vqrshl.s64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -403,7 +403,7 @@ define <1 x i64> @vqrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <8 x i8> @vqrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqrshlu8:
+;CHECK-LABEL: vqrshlu8:
;CHECK: vqrshl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -412,7 +412,7 @@ define <8 x i8> @vqrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqrshlu16:
+;CHECK-LABEL: vqrshlu16:
;CHECK: vqrshl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -421,7 +421,7 @@ define <4 x i16> @vqrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqrshlu32:
+;CHECK-LABEL: vqrshlu32:
;CHECK: vqrshl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -430,7 +430,7 @@ define <2 x i32> @vqrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqrshlu64:
+;CHECK-LABEL: vqrshlu64:
;CHECK: vqrshl.u64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -439,7 +439,7 @@ define <1 x i64> @vqrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @vqrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqrshlQs8:
+;CHECK-LABEL: vqrshlQs8:
;CHECK: vqrshl.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -448,7 +448,7 @@ define <16 x i8> @vqrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqrshlQs16:
+;CHECK-LABEL: vqrshlQs16:
;CHECK: vqrshl.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -457,7 +457,7 @@ define <8 x i16> @vqrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqrshlQs32:
+;CHECK-LABEL: vqrshlQs32:
;CHECK: vqrshl.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -466,7 +466,7 @@ define <4 x i32> @vqrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqrshlQs64:
+;CHECK-LABEL: vqrshlQs64:
;CHECK: vqrshl.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -475,7 +475,7 @@ define <2 x i64> @vqrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <16 x i8> @vqrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqrshlQu8:
+;CHECK-LABEL: vqrshlQu8:
;CHECK: vqrshl.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -484,7 +484,7 @@ define <16 x i8> @vqrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqrshlQu16:
+;CHECK-LABEL: vqrshlQu16:
;CHECK: vqrshl.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -493,7 +493,7 @@ define <8 x i16> @vqrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqrshlQu32:
+;CHECK-LABEL: vqrshlQu32:
;CHECK: vqrshl.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -502,7 +502,7 @@ define <4 x i32> @vqrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqrshlQu64:
+;CHECK-LABEL: vqrshlQu64:
;CHECK: vqrshl.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
diff --git a/test/CodeGen/ARM/vqshrn.ll b/test/CodeGen/ARM/vqshrn.ll
index 5da79432bb42..4abae700f877 100644
--- a/test/CodeGen/ARM/vqshrn.ll
+++ b/test/CodeGen/ARM/vqshrn.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vqshrns8(<8 x i16>* %A) nounwind {
-;CHECK: vqshrns8:
+;CHECK-LABEL: vqshrns8:
;CHECK: vqshrn.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
@@ -9,7 +9,7 @@ define <8 x i8> @vqshrns8(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqshrns16(<4 x i32>* %A) nounwind {
-;CHECK: vqshrns16:
+;CHECK-LABEL: vqshrns16:
;CHECK: vqshrn.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
@@ -17,7 +17,7 @@ define <4 x i16> @vqshrns16(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqshrns32(<2 x i64>* %A) nounwind {
-;CHECK: vqshrns32:
+;CHECK-LABEL: vqshrns32:
;CHECK: vqshrn.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
@@ -25,7 +25,7 @@ define <2 x i32> @vqshrns32(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqshrnu8(<8 x i16>* %A) nounwind {
-;CHECK: vqshrnu8:
+;CHECK-LABEL: vqshrnu8:
;CHECK: vqshrn.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
@@ -33,7 +33,7 @@ define <8 x i8> @vqshrnu8(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqshrnu16(<4 x i32>* %A) nounwind {
-;CHECK: vqshrnu16:
+;CHECK-LABEL: vqshrnu16:
;CHECK: vqshrn.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
@@ -41,7 +41,7 @@ define <4 x i16> @vqshrnu16(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqshrnu32(<2 x i64>* %A) nounwind {
-;CHECK: vqshrnu32:
+;CHECK-LABEL: vqshrnu32:
;CHECK: vqshrn.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
@@ -49,7 +49,7 @@ define <2 x i32> @vqshrnu32(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqshruns8(<8 x i16>* %A) nounwind {
-;CHECK: vqshruns8:
+;CHECK-LABEL: vqshruns8:
;CHECK: vqshrun.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
@@ -57,7 +57,7 @@ define <8 x i8> @vqshruns8(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqshruns16(<4 x i32>* %A) nounwind {
-;CHECK: vqshruns16:
+;CHECK-LABEL: vqshruns16:
;CHECK: vqshrun.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
@@ -65,7 +65,7 @@ define <4 x i16> @vqshruns16(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqshruns32(<2 x i64>* %A) nounwind {
-;CHECK: vqshruns32:
+;CHECK-LABEL: vqshruns32:
;CHECK: vqshrun.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
@@ -85,7 +85,7 @@ declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind
declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
define <8 x i8> @vqrshrns8(<8 x i16>* %A) nounwind {
-;CHECK: vqrshrns8:
+;CHECK-LABEL: vqrshrns8:
;CHECK: vqrshrn.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
@@ -93,7 +93,7 @@ define <8 x i8> @vqrshrns8(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqrshrns16(<4 x i32>* %A) nounwind {
-;CHECK: vqrshrns16:
+;CHECK-LABEL: vqrshrns16:
;CHECK: vqrshrn.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
@@ -101,7 +101,7 @@ define <4 x i16> @vqrshrns16(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqrshrns32(<2 x i64>* %A) nounwind {
-;CHECK: vqrshrns32:
+;CHECK-LABEL: vqrshrns32:
;CHECK: vqrshrn.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
@@ -109,7 +109,7 @@ define <2 x i32> @vqrshrns32(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqrshrnu8(<8 x i16>* %A) nounwind {
-;CHECK: vqrshrnu8:
+;CHECK-LABEL: vqrshrnu8:
;CHECK: vqrshrn.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
@@ -117,7 +117,7 @@ define <8 x i8> @vqrshrnu8(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqrshrnu16(<4 x i32>* %A) nounwind {
-;CHECK: vqrshrnu16:
+;CHECK-LABEL: vqrshrnu16:
;CHECK: vqrshrn.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
@@ -125,7 +125,7 @@ define <4 x i16> @vqrshrnu16(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqrshrnu32(<2 x i64>* %A) nounwind {
-;CHECK: vqrshrnu32:
+;CHECK-LABEL: vqrshrnu32:
;CHECK: vqrshrn.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
@@ -133,7 +133,7 @@ define <2 x i32> @vqrshrnu32(<2 x i64>* %A) nounwind {
}
define <8 x i8> @vqrshruns8(<8 x i16>* %A) nounwind {
-;CHECK: vqrshruns8:
+;CHECK-LABEL: vqrshruns8:
;CHECK: vqrshrun.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
@@ -141,7 +141,7 @@ define <8 x i8> @vqrshruns8(<8 x i16>* %A) nounwind {
}
define <4 x i16> @vqrshruns16(<4 x i32>* %A) nounwind {
-;CHECK: vqrshruns16:
+;CHECK-LABEL: vqrshruns16:
;CHECK: vqrshrun.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
@@ -149,7 +149,7 @@ define <4 x i16> @vqrshruns16(<4 x i32>* %A) nounwind {
}
define <2 x i32> @vqrshruns32(<2 x i64>* %A) nounwind {
-;CHECK: vqrshruns32:
+;CHECK-LABEL: vqrshruns32:
;CHECK: vqrshrun.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
diff --git a/test/CodeGen/ARM/vqsub.ll b/test/CodeGen/ARM/vqsub.ll
index 4231fca37e37..90bc3492fc53 100644
--- a/test/CodeGen/ARM/vqsub.ll
+++ b/test/CodeGen/ARM/vqsub.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vqsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqsubs8:
+;CHECK-LABEL: vqsubs8:
;CHECK: vqsub.s8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vqsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqsubs16:
+;CHECK-LABEL: vqsubs16:
;CHECK: vqsub.s16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vqsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqsubs32:
+;CHECK-LABEL: vqsubs32:
;CHECK: vqsub.s32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vqsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqsubs64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqsubs64:
+;CHECK-LABEL: vqsubs64:
;CHECK: vqsub.s64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -37,7 +37,7 @@ define <1 x i64> @vqsubs64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <8 x i8> @vqsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqsubu8:
+;CHECK-LABEL: vqsubu8:
;CHECK: vqsub.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -46,7 +46,7 @@ define <8 x i8> @vqsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vqsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqsubu16:
+;CHECK-LABEL: vqsubu16:
;CHECK: vqsub.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -55,7 +55,7 @@ define <4 x i16> @vqsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vqsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqsubu32:
+;CHECK-LABEL: vqsubu32:
;CHECK: vqsub.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -64,7 +64,7 @@ define <2 x i32> @vqsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vqsubu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqsubu64:
+;CHECK-LABEL: vqsubu64:
;CHECK: vqsub.u64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -73,7 +73,7 @@ define <1 x i64> @vqsubu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <16 x i8> @vqsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqsubQs8:
+;CHECK-LABEL: vqsubQs8:
;CHECK: vqsub.s8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -82,7 +82,7 @@ define <16 x i8> @vqsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqsubQs16:
+;CHECK-LABEL: vqsubQs16:
;CHECK: vqsub.s16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -91,7 +91,7 @@ define <8 x i16> @vqsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqsubQs32:
+;CHECK-LABEL: vqsubQs32:
;CHECK: vqsub.s32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -100,7 +100,7 @@ define <4 x i32> @vqsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqsubQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqsubQs64:
+;CHECK-LABEL: vqsubQs64:
;CHECK: vqsub.s64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
@@ -109,7 +109,7 @@ define <2 x i64> @vqsubQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
}
define <16 x i8> @vqsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqsubQu8:
+;CHECK-LABEL: vqsubQu8:
;CHECK: vqsub.u8
%tmp1 = load <16 x i8>* %A
%tmp2 = load <16 x i8>* %B
@@ -118,7 +118,7 @@ define <16 x i8> @vqsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
}
define <8 x i16> @vqsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqsubQu16:
+;CHECK-LABEL: vqsubQu16:
;CHECK: vqsub.u16
%tmp1 = load <8 x i16>* %A
%tmp2 = load <8 x i16>* %B
@@ -127,7 +127,7 @@ define <8 x i16> @vqsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
}
define <4 x i32> @vqsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqsubQu32:
+;CHECK-LABEL: vqsubQu32:
;CHECK: vqsub.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = load <4 x i32>* %B
@@ -136,7 +136,7 @@ define <4 x i32> @vqsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
}
define <2 x i64> @vqsubQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqsubQu64:
+;CHECK-LABEL: vqsubQu64:
;CHECK: vqsub.u64
%tmp1 = load <2 x i64>* %A
%tmp2 = load <2 x i64>* %B
diff --git a/test/CodeGen/ARM/vrec.ll b/test/CodeGen/ARM/vrec.ll
index 99989e9d6144..c0deca995764 100644
--- a/test/CodeGen/ARM/vrec.ll
+++ b/test/CodeGen/ARM/vrec.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <2 x i32> @vrecpei32(<2 x i32>* %A) nounwind {
-;CHECK: vrecpei32:
+;CHECK-LABEL: vrecpei32:
;CHECK: vrecpe.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %tmp1)
@@ -9,7 +9,7 @@ define <2 x i32> @vrecpei32(<2 x i32>* %A) nounwind {
}
define <4 x i32> @vrecpeQi32(<4 x i32>* %A) nounwind {
-;CHECK: vrecpeQi32:
+;CHECK-LABEL: vrecpeQi32:
;CHECK: vrecpe.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %tmp1)
@@ -17,7 +17,7 @@ define <4 x i32> @vrecpeQi32(<4 x i32>* %A) nounwind {
}
define <2 x float> @vrecpef32(<2 x float>* %A) nounwind {
-;CHECK: vrecpef32:
+;CHECK-LABEL: vrecpef32:
;CHECK: vrecpe.f32
%tmp1 = load <2 x float>* %A
%tmp2 = call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %tmp1)
@@ -25,7 +25,7 @@ define <2 x float> @vrecpef32(<2 x float>* %A) nounwind {
}
define <4 x float> @vrecpeQf32(<4 x float>* %A) nounwind {
-;CHECK: vrecpeQf32:
+;CHECK-LABEL: vrecpeQf32:
;CHECK: vrecpe.f32
%tmp1 = load <4 x float>* %A
%tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1)
@@ -39,7 +39,7 @@ declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) nounwind readnone
declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
define <2 x float> @vrecpsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vrecpsf32:
+;CHECK-LABEL: vrecpsf32:
;CHECK: vrecps.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -48,7 +48,7 @@ define <2 x float> @vrecpsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <4 x float> @vrecpsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vrecpsQf32:
+;CHECK-LABEL: vrecpsQf32:
;CHECK: vrecps.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
@@ -60,7 +60,7 @@ declare <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float>, <2 x float>) nounwi
declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
define <2 x i32> @vrsqrtei32(<2 x i32>* %A) nounwind {
-;CHECK: vrsqrtei32:
+;CHECK-LABEL: vrsqrtei32:
;CHECK: vrsqrte.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32> %tmp1)
@@ -68,7 +68,7 @@ define <2 x i32> @vrsqrtei32(<2 x i32>* %A) nounwind {
}
define <4 x i32> @vrsqrteQi32(<4 x i32>* %A) nounwind {
-;CHECK: vrsqrteQi32:
+;CHECK-LABEL: vrsqrteQi32:
;CHECK: vrsqrte.u32
%tmp1 = load <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32> %tmp1)
@@ -76,7 +76,7 @@ define <4 x i32> @vrsqrteQi32(<4 x i32>* %A) nounwind {
}
define <2 x float> @vrsqrtef32(<2 x float>* %A) nounwind {
-;CHECK: vrsqrtef32:
+;CHECK-LABEL: vrsqrtef32:
;CHECK: vrsqrte.f32
%tmp1 = load <2 x float>* %A
%tmp2 = call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %tmp1)
@@ -84,7 +84,7 @@ define <2 x float> @vrsqrtef32(<2 x float>* %A) nounwind {
}
define <4 x float> @vrsqrteQf32(<4 x float>* %A) nounwind {
-;CHECK: vrsqrteQf32:
+;CHECK-LABEL: vrsqrteQf32:
;CHECK: vrsqrte.f32
%tmp1 = load <4 x float>* %A
%tmp2 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %tmp1)
@@ -98,7 +98,7 @@ declare <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float>) nounwind readnone
declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
define <2 x float> @vrsqrtsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vrsqrtsf32:
+;CHECK-LABEL: vrsqrtsf32:
;CHECK: vrsqrts.f32
%tmp1 = load <2 x float>* %A
%tmp2 = load <2 x float>* %B
@@ -107,7 +107,7 @@ define <2 x float> @vrsqrtsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
}
define <4 x float> @vrsqrtsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vrsqrtsQf32:
+;CHECK-LABEL: vrsqrtsQf32:
;CHECK: vrsqrts.f32
%tmp1 = load <4 x float>* %A
%tmp2 = load <4 x float>* %B
diff --git a/test/CodeGen/ARM/vrev.ll b/test/CodeGen/ARM/vrev.ll
index 122ec0357fbe..b6da694e1805 100644
--- a/test/CodeGen/ARM/vrev.ll
+++ b/test/CodeGen/ARM/vrev.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev64D8:
+;CHECK-LABEL: test_vrev64D8:
;CHECK: vrev64.8
%tmp1 = load <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -9,7 +9,7 @@ define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
-;CHECK: test_vrev64D16:
+;CHECK-LABEL: test_vrev64D16:
;CHECK: vrev64.16
%tmp1 = load <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -17,7 +17,7 @@ define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
}
define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
-;CHECK: test_vrev64D32:
+;CHECK-LABEL: test_vrev64D32:
;CHECK: vrev64.32
%tmp1 = load <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
@@ -25,7 +25,7 @@ define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
}
define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
-;CHECK: test_vrev64Df:
+;CHECK-LABEL: test_vrev64Df:
;CHECK: vrev64.32
%tmp1 = load <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
@@ -33,7 +33,7 @@ define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
}
define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
-;CHECK: test_vrev64Q8:
+;CHECK-LABEL: test_vrev64Q8:
;CHECK: vrev64.8
%tmp1 = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
@@ -41,7 +41,7 @@ define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
-;CHECK: test_vrev64Q16:
+;CHECK-LABEL: test_vrev64Q16:
;CHECK: vrev64.16
%tmp1 = load <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -49,7 +49,7 @@ define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
}
define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
-;CHECK: test_vrev64Q32:
+;CHECK-LABEL: test_vrev64Q32:
;CHECK: vrev64.32
%tmp1 = load <4 x i32>* %A
%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -57,7 +57,7 @@ define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
}
define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
-;CHECK: test_vrev64Qf:
+;CHECK-LABEL: test_vrev64Qf:
;CHECK: vrev64.32
%tmp1 = load <4 x float>* %A
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -65,7 +65,7 @@ define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
}
define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev32D8:
+;CHECK-LABEL: test_vrev32D8:
;CHECK: vrev32.8
%tmp1 = load <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
@@ -73,7 +73,7 @@ define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
-;CHECK: test_vrev32D16:
+;CHECK-LABEL: test_vrev32D16:
;CHECK: vrev32.16
%tmp1 = load <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
@@ -81,7 +81,7 @@ define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
}
define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
-;CHECK: test_vrev32Q8:
+;CHECK-LABEL: test_vrev32Q8:
;CHECK: vrev32.8
%tmp1 = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
@@ -89,7 +89,7 @@ define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
}
define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
-;CHECK: test_vrev32Q16:
+;CHECK-LABEL: test_vrev32Q16:
;CHECK: vrev32.16
%tmp1 = load <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -97,7 +97,7 @@ define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
}
define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev16D8:
+;CHECK-LABEL: test_vrev16D8:
;CHECK: vrev16.8
%tmp1 = load <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
@@ -105,7 +105,7 @@ define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
}
define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
-;CHECK: test_vrev16Q8:
+;CHECK-LABEL: test_vrev16Q8:
;CHECK: vrev16.8
%tmp1 = load <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
@@ -115,7 +115,7 @@ define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
; Undef shuffle indices should not prevent matching to VREV:
define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev64D8_undef:
+;CHECK-LABEL: test_vrev64D8_undef:
;CHECK: vrev64.8
%tmp1 = load <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
@@ -123,7 +123,7 @@ define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
}
define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
-;CHECK: test_vrev32Q16_undef:
+;CHECK-LABEL: test_vrev32Q16_undef:
;CHECK: vrev32.16
%tmp1 = load <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
@@ -133,7 +133,7 @@ define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
; A vcombine feeding a VREV should not obscure things. Radar 8597007.
define void @test_with_vcombine(<4 x float>* %v) nounwind {
-;CHECK: test_with_vcombine:
+;CHECK-LABEL: test_with_vcombine:
;CHECK-NOT: vext
;CHECK: vrev64.32
%tmp1 = load <4 x float>* %v, align 16
@@ -151,7 +151,7 @@ define void @test_with_vcombine(<4 x float>* %v) nounwind {
; The type <2 x i16> is legalized to <2 x i32> and need to be trunc-stored
; to <2 x i16> when stored to memory.
define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst) nounwind ssp {
-; CHECK: test_vrev64:
+; CHECK-LABEL: test_vrev64:
; CHECK: vst1.32
entry:
%0 = bitcast <4 x i16>* %source to <8 x i16>*
diff --git a/test/CodeGen/ARM/vsel.ll b/test/CodeGen/ARM/vsel.ll
new file mode 100644
index 000000000000..7e1f7146fd1c
--- /dev/null
+++ b/test/CodeGen/ARM/vsel.ll
@@ -0,0 +1,309 @@
+; RUN: llc < %s -mtriple=armv8-linux-gnueabihf -mattr=+fp-armv8 -float-abi=hard | FileCheck %s
+@varfloat = global float 0.0
+@vardouble = global double 0.0
+define void @test_vsel32sgt(i32 %lhs32, i32 %rhs32, float %a, float %b) {
+; CHECK: test_vsel32sgt
+ %tst1 = icmp sgt i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: cmp r0, r1
+; CHECK: vselgt.f32 s0, s0, s1
+ ret void
+}
+define void @test_vsel64sgt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
+; CHECK: test_vsel64sgt
+ %tst1 = icmp sgt i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: cmp r0, r1
+; CHECK: vselgt.f64 d16, d0, d1
+ ret void
+}
+define void @test_vsel32sge(i32 %lhs32, i32 %rhs32, float %a, float %b) {
+; CHECK: test_vsel32sge
+ %tst1 = icmp sge i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: cmp r0, r1
+; CHECK: vselge.f32 s0, s0, s1
+ ret void
+}
+define void @test_vsel64sge(i32 %lhs32, i32 %rhs32, double %a, double %b) {
+; CHECK: test_vsel64sge
+ %tst1 = icmp sge i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: cmp r0, r1
+; CHECK: vselge.f64 d16, d0, d1
+ ret void
+}
+define void @test_vsel32eq(i32 %lhs32, i32 %rhs32, float %a, float %b) {
+; CHECK: test_vsel32eq
+ %tst1 = icmp eq i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: cmp r0, r1
+; CHECK: vseleq.f32 s0, s0, s1
+ ret void
+}
+define void @test_vsel64eq(i32 %lhs32, i32 %rhs32, double %a, double %b) {
+; CHECK: test_vsel64eq
+ %tst1 = icmp eq i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: cmp r0, r1
+; CHECK: vseleq.f64 d16, d0, d1
+ ret void
+}
+define void @test_vsel32slt(i32 %lhs32, i32 %rhs32, float %a, float %b) {
+; CHECK: test_vsel32slt
+ %tst1 = icmp slt i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: cmp r0, r1
+; CHECK: vselgt.f32 s0, s1, s0
+ ret void
+}
+define void @test_vsel64slt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
+; CHECK: test_vsel64slt
+ %tst1 = icmp slt i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: cmp r0, r1
+; CHECK: vselgt.f64 d16, d1, d0
+ ret void
+}
+define void @test_vsel32sle(i32 %lhs32, i32 %rhs32, float %a, float %b) {
+; CHECK: test_vsel32sle
+ %tst1 = icmp sle i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: cmp r0, r1
+; CHECK: vselge.f32 s0, s1, s0
+ ret void
+}
+define void @test_vsel64sle(i32 %lhs32, i32 %rhs32, double %a, double %b) {
+; CHECK: test_vsel64sle
+ %tst1 = icmp sle i32 %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: cmp r0, r1
+; CHECK: vselge.f64 d16, d1, d0
+ ret void
+}
+define void @test_vsel32ogt(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32ogt
+ %tst1 = fcmp ogt float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselgt.f32 s0, s2, s3
+ ret void
+}
+define void @test_vsel64ogt(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64ogt
+ %tst1 = fcmp ogt float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselgt.f64 d16, d1, d2
+ ret void
+}
+define void @test_vsel32oge(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32oge
+ %tst1 = fcmp oge float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselge.f32 s0, s2, s3
+ ret void
+}
+define void @test_vsel64oge(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64oge
+ %tst1 = fcmp oge float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselge.f64 d16, d1, d2
+ ret void
+}
+define void @test_vsel32oeq(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32oeq
+ %tst1 = fcmp oeq float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vseleq.f32 s0, s2, s3
+ ret void
+}
+define void @test_vsel64oeq(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64oeq
+ %tst1 = fcmp oeq float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vseleq.f64 d16, d1, d2
+ ret void
+}
+define void @test_vsel32ugt(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32ugt
+ %tst1 = fcmp ugt float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselge.f32 s0, s3, s2
+ ret void
+}
+define void @test_vsel64ugt(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64ugt
+ %tst1 = fcmp ugt float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselge.f64 d16, d2, d1
+ ret void
+}
+define void @test_vsel32uge(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32uge
+ %tst1 = fcmp uge float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselgt.f32 s0, s3, s2
+ ret void
+}
+define void @test_vsel64uge(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64uge
+ %tst1 = fcmp uge float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselgt.f64 d16, d2, d1
+ ret void
+}
+define void @test_vsel32olt(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32olt
+ %tst1 = fcmp olt float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselgt.f32 s0, s2, s3
+ ret void
+}
+define void @test_vsel64olt(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64olt
+ %tst1 = fcmp olt float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselgt.f64 d16, d1, d2
+ ret void
+}
+define void @test_vsel32ult(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32ult
+ %tst1 = fcmp ult float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselge.f32 s0, s3, s2
+ ret void
+}
+define void @test_vsel64ult(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64ult
+ %tst1 = fcmp ult float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselge.f64 d16, d2, d1
+ ret void
+}
+define void @test_vsel32ole(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32ole
+ %tst1 = fcmp ole float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselge.f32 s0, s2, s3
+ ret void
+}
+define void @test_vsel64ole(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64ole
+ %tst1 = fcmp ole float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s1, s0
+; CHECK: vselge.f64 d16, d1, d2
+ ret void
+}
+define void @test_vsel32ule(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32ule
+ %tst1 = fcmp ule float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselgt.f32 s0, s3, s2
+ ret void
+}
+define void @test_vsel64ule(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64ule
+ %tst1 = fcmp ule float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselgt.f64 d16, d2, d1
+ ret void
+}
+define void @test_vsel32ord(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32ord
+ %tst1 = fcmp ord float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselvs.f32 s0, s3, s2
+ ret void
+}
+define void @test_vsel64ord(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64ord
+ %tst1 = fcmp ord float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselvs.f64 d16, d2, d1
+ ret void
+}
+define void @test_vsel32une(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32une
+ %tst1 = fcmp une float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vseleq.f32 s0, s3, s2
+ ret void
+}
+define void @test_vsel64une(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64une
+ %tst1 = fcmp une float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vseleq.f64 d16, d2, d1
+ ret void
+}
+define void @test_vsel32uno(float %lhs32, float %rhs32, float %a, float %b) {
+; CHECK: test_vsel32uno
+ %tst1 = fcmp uno float %lhs32, %rhs32
+ %val1 = select i1 %tst1, float %a, float %b
+ store float %val1, float* @varfloat
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselvs.f32 s0, s2, s3
+ ret void
+}
+define void @test_vsel64uno(float %lhs32, float %rhs32, double %a, double %b) {
+; CHECK: test_vsel64uno
+ %tst1 = fcmp uno float %lhs32, %rhs32
+ %val1 = select i1 %tst1, double %a, double %b
+ store double %val1, double* @vardouble
+; CHECK: vcmpe.f32 s0, s1
+; CHECK: vselvs.f64 d16, d1, d2
+ ret void
+}
diff --git a/test/CodeGen/ARM/vselect_imax.ll b/test/CodeGen/ARM/vselect_imax.ll
index 7e79d6c68c2b..9ea56a47bd23 100644
--- a/test/CodeGen/ARM/vselect_imax.ll
+++ b/test/CodeGen/ARM/vselect_imax.ll
@@ -1,3 +1,4 @@
+; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
; Make sure that ARM backend with NEON handles vselect.
@@ -14,17 +15,14 @@ define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) {
; lowering we also need to adjust the cost.
%T0_10 = type <16 x i16>
%T1_10 = type <16 x i1>
-; CHECK: func_blend10:
+; CHECK-LABEL: func_blend10:
define void @func_blend10(%T0_10* %loadaddr, %T0_10* %loadaddr2,
%T1_10* %blend, %T0_10* %storeaddr) {
%v0 = load %T0_10* %loadaddr
%v1 = load %T0_10* %loadaddr2
%c = icmp slt %T0_10 %v0, %v1
-; CHECK: vst1
-; CHECK: vst1
-; CHECK: vst1
-; CHECK: vst1
-; CHECK: vld
+; CHECK: vbsl
+; CHECK: vbsl
; COST: func_blend10
; COST: cost of 40 {{.*}} select
%r = select %T1_10 %c, %T0_10 %v0, %T0_10 %v1
@@ -33,16 +31,14 @@ define void @func_blend10(%T0_10* %loadaddr, %T0_10* %loadaddr2,
}
%T0_14 = type <8 x i32>
%T1_14 = type <8 x i1>
-; CHECK: func_blend14:
+; CHECK-LABEL: func_blend14:
define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2,
%T1_14* %blend, %T0_14* %storeaddr) {
%v0 = load %T0_14* %loadaddr
%v1 = load %T0_14* %loadaddr2
%c = icmp slt %T0_14 %v0, %v1
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
+; CHECK: vbsl
+; CHECK: vbsl
; COST: func_blend14
; COST: cost of 41 {{.*}} select
%r = select %T1_14 %c, %T0_14 %v0, %T0_14 %v1
@@ -51,16 +47,14 @@ define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2,
}
%T0_15 = type <16 x i32>
%T1_15 = type <16 x i1>
-; CHECK: func_blend15:
+; CHECK-LABEL: func_blend15:
define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2,
%T1_15* %blend, %T0_15* %storeaddr) {
+; CHECK: vbsl
+; CHECK: vbsl
%v0 = load %T0_15* %loadaddr
%v1 = load %T0_15* %loadaddr2
%c = icmp slt %T0_15 %v0, %v1
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
; COST: func_blend15
; COST: cost of 82 {{.*}} select
%r = select %T1_15 %c, %T0_15 %v0, %T0_15 %v1
@@ -69,16 +63,14 @@ define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2,
}
%T0_18 = type <4 x i64>
%T1_18 = type <4 x i1>
-; CHECK: func_blend18:
+; CHECK-LABEL: func_blend18:
define void @func_blend18(%T0_18* %loadaddr, %T0_18* %loadaddr2,
%T1_18* %blend, %T0_18* %storeaddr) {
+; CHECK: vbsl
+; CHECK: vbsl
%v0 = load %T0_18* %loadaddr
%v1 = load %T0_18* %loadaddr2
%c = icmp slt %T0_18 %v0, %v1
-; CHECK: strh
-; CHECK: strh
-; CHECK: strh
-; CHECK: strh
; COST: func_blend18
; COST: cost of 19 {{.*}} select
%r = select %T1_18 %c, %T0_18 %v0, %T0_18 %v1
@@ -87,16 +79,16 @@ define void @func_blend18(%T0_18* %loadaddr, %T0_18* %loadaddr2,
}
%T0_19 = type <8 x i64>
%T1_19 = type <8 x i1>
-; CHECK: func_blend19:
+; CHECK-LABEL: func_blend19:
define void @func_blend19(%T0_19* %loadaddr, %T0_19* %loadaddr2,
%T1_19* %blend, %T0_19* %storeaddr) {
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
%v0 = load %T0_19* %loadaddr
%v1 = load %T0_19* %loadaddr2
%c = icmp slt %T0_19 %v0, %v1
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
; COST: func_blend19
; COST: cost of 50 {{.*}} select
%r = select %T1_19 %c, %T0_19 %v0, %T0_19 %v1
@@ -105,16 +97,20 @@ define void @func_blend19(%T0_19* %loadaddr, %T0_19* %loadaddr2,
}
%T0_20 = type <16 x i64>
%T1_20 = type <16 x i1>
-; CHECK: func_blend20:
+; CHECK-LABEL: func_blend20:
define void @func_blend20(%T0_20* %loadaddr, %T0_20* %loadaddr2,
%T1_20* %blend, %T0_20* %storeaddr) {
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
+; CHECK: vbsl
%v0 = load %T0_20* %loadaddr
%v1 = load %T0_20* %loadaddr2
%c = icmp slt %T0_20 %v0, %v1
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
; COST: func_blend20
; COST: cost of 100 {{.*}} select
%r = select %T1_20 %c, %T0_20 %v0, %T0_20 %v1
diff --git a/test/CodeGen/ARM/vshift.ll b/test/CodeGen/ARM/vshift.ll
index f3cbec7457d0..de380d3d12b3 100644
--- a/test/CodeGen/ARM/vshift.ll
+++ b/test/CodeGen/ARM/vshift.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vshls8:
+;CHECK-LABEL: vshls8:
;CHECK: vshl.u8
%tmp1 = load <8 x i8>* %A
%tmp2 = load <8 x i8>* %B
@@ -10,7 +10,7 @@ define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
}
define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vshls16:
+;CHECK-LABEL: vshls16:
;CHECK: vshl.u16
%tmp1 = load <4 x i16>* %A
%tmp2 = load <4 x i16>* %B
@@ -19,7 +19,7 @@ define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
}
define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vshls32:
+;CHECK-LABEL: vshls32:
;CHECK: vshl.u32
%tmp1 = load <2 x i32>* %A
%tmp2 = load <2 x i32>* %B
@@ -28,7 +28,7 @@ define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
}
define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vshls64:
+;CHECK-LABEL: vshls64:
;CHECK: vshl.u64
%tmp1 = load <1 x i64>* %A
%tmp2 = load <1 x i64>* %B
@@ -37,7 +37,7 @@ define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
}
define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
-;CHECK: vshli8:
+;CHECK-LABEL: vshli8:
;CHECK: vshl.i8
%tmp1 = load <8 x i8>* %A
%tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
@@ -45,7 +45,7 @@ define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
}
define <4 x i16> @vshli16(<4 x i16>* %A) nounwind {
-;CHECK: vshli16:
+;CHECK-LABEL: vshli16:
;CHECK: vshl.i16
%tmp1 = load <4 x i16>* %A
%tmp2 = shl <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15