aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/adrp-relocation.ll22
-rw-r--r--test/CodeGen/AArch64/atomic-ops-not-barriers.ll6
-rw-r--r--test/CodeGen/AArch64/atomic-ops.ll381
-rw-r--r--test/CodeGen/AArch64/blockaddress.ll9
-rw-r--r--test/CodeGen/AArch64/code-model-large-abs.ll61
-rw-r--r--test/CodeGen/AArch64/elf-extern.ll16
-rw-r--r--test/CodeGen/AArch64/extern-weak.ll19
-rw-r--r--test/CodeGen/AArch64/jump-table.ll28
-rw-r--r--test/CodeGen/AArch64/literal_pools.ll40
-rw-r--r--test/CodeGen/ARM/2010-08-04-StackVariable.ll2
-rw-r--r--test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll59
-rw-r--r--test/CodeGen/ARM/2010-11-30-reloc-movt.ll39
-rw-r--r--test/CodeGen/ARM/2010-12-08-tpsoft.ll24
-rw-r--r--test/CodeGen/ARM/2010-12-15-elf-lcomm.ll27
-rw-r--r--test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll2
-rw-r--r--test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll2
-rw-r--r--test/CodeGen/ARM/2011-12-14-machine-sink.ll9
-rw-r--r--test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll18
-rw-r--r--test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll5
-rw-r--r--test/CodeGen/ARM/2012-01-26-CopyPropKills.ll9
-rw-r--r--test/CodeGen/ARM/2012-04-02-TwoAddrInstrCrash.ll11
-rw-r--r--test/CodeGen/ARM/2012-04-10-DAGCombine.ll5
-rw-r--r--test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll12
-rw-r--r--test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll8
-rw-r--r--test/CodeGen/ARM/2013-01-21-PR14992.ll10
-rw-r--r--test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll73
-rw-r--r--test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll95
-rw-r--r--test/CodeGen/ARM/2013-04-16-AAPCS-C5-vs-VFP.ll61
-rw-r--r--test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll (renamed from test/CodeGen/ARM/2013-04-05-overridden-loads-PR14824.ll)58
-rw-r--r--test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll28
-rw-r--r--test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll48
-rw-r--r--test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll45
-rw-r--r--test/CodeGen/ARM/2013-05-05-IfConvertBug.ll71
-rw-r--r--test/CodeGen/ARM/avoid-cpsr-rmw.ll2
-rw-r--r--test/CodeGen/ARM/commute-movcc.ll8
-rw-r--r--test/CodeGen/ARM/dagcombine-concatvector.ll23
-rw-r--r--test/CodeGen/ARM/debug-info-arg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-branch-folding.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-d16-reg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-qreg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-s16-reg.ll2
-rw-r--r--test/CodeGen/ARM/debug-info-sreg2.ll2
-rw-r--r--test/CodeGen/ARM/ehabi-filters.ll6
-rw-r--r--test/CodeGen/ARM/ehabi-mc-compact-pr0.ll49
-rw-r--r--test/CodeGen/ARM/ehabi-mc-compact-pr1.ll62
-rw-r--r--test/CodeGen/ARM/ehabi-mc-section-group.ll29
-rw-r--r--test/CodeGen/ARM/ehabi-mc-section.ll18
-rw-r--r--test/CodeGen/ARM/ehabi-mc-sh_link.ll63
-rw-r--r--test/CodeGen/ARM/ehabi-mc.ll18
-rw-r--r--test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll30
-rw-r--r--test/CodeGen/ARM/gpr-paired-spill.ll44
-rw-r--r--test/CodeGen/ARM/lsr-unfolded-offset.ll12
-rw-r--r--test/CodeGen/ARM/misched-copy-arm.ll30
-rw-r--r--test/CodeGen/ARM/neon_vabs.ll91
-rw-r--r--test/CodeGen/ARM/nop_concat_vectors.ll13
-rw-r--r--test/CodeGen/ARM/private.ll11
-rw-r--r--test/CodeGen/ARM/returned-ext.ll178
-rw-r--r--test/CodeGen/ARM/tail-dup.ll10
-rw-r--r--test/CodeGen/ARM/this-return.ll105
-rw-r--r--test/CodeGen/ARM/v1-constant-fold.ll18
-rw-r--r--test/CodeGen/ARM/vcvt-cost.ll153
-rw-r--r--test/CodeGen/ARM/vcvt.ll172
-rw-r--r--test/CodeGen/ARM/vcvt_combine.ll16
-rw-r--r--test/CodeGen/ARM/vdiv_combine.ll17
-rw-r--r--test/CodeGen/ARM/vmul.ll24
-rw-r--r--test/CodeGen/Generic/annotate.ll15
-rw-r--r--test/CodeGen/Generic/crash.ll5
-rw-r--r--test/CodeGen/Generic/ptr-annotate.ll18
-rw-r--r--test/CodeGen/Hexagon/absimm.ll18
-rw-r--r--test/CodeGen/Hexagon/always-ext.ll45
-rw-r--r--test/CodeGen/Hexagon/cmp_pred2.ll87
-rw-r--r--test/CodeGen/Hexagon/cmpb_pred.ll9
-rw-r--r--test/CodeGen/Hexagon/combine_ir.ll7
-rw-r--r--test/CodeGen/Hexagon/hwloop-const.ll8
-rw-r--r--test/CodeGen/Hexagon/hwloop-dbg.ll7
-rw-r--r--test/CodeGen/Hexagon/memops2.ll12
-rw-r--r--test/CodeGen/Hexagon/memops3.ll11
-rw-r--r--test/CodeGen/Hexagon/remove_lsr.ll12
-rw-r--r--test/CodeGen/Hexagon/union-1.ll23
-rw-r--r--test/CodeGen/Mips/alloca.ll18
-rw-r--r--test/CodeGen/Mips/divrem.ll8
-rw-r--r--test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll641
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll244
-rw-r--r--test/CodeGen/Mips/dsp-r1.ll11
-rw-r--r--test/CodeGen/Mips/eh.ll8
-rw-r--r--test/CodeGen/Mips/fpneeded.ll149
-rw-r--r--test/CodeGen/Mips/fpnotneeded.ll77
-rw-r--r--test/CodeGen/Mips/inlineasmmemop.ll40
-rw-r--r--test/CodeGen/Mips/mips16_32_1.ll14
-rw-r--r--test/CodeGen/Mips/mips16_32_10.ll59
-rw-r--r--test/CodeGen/Mips/mips16_32_3.ll70
-rw-r--r--test/CodeGen/Mips/mips16_32_4.ll65
-rw-r--r--test/CodeGen/Mips/mips16_32_5.ll80
-rw-r--r--test/CodeGen/Mips/mips16_32_6.ll86
-rw-r--r--test/CodeGen/Mips/mips16_32_7.ll76
-rw-r--r--test/CodeGen/Mips/mips16_32_8.ll74
-rw-r--r--test/CodeGen/Mips/mips16_32_9.ll51
-rw-r--r--test/CodeGen/Mips/select.ll16
-rw-r--r--test/CodeGen/Mips/spill-copy-acreg.ll41
-rw-r--r--test/CodeGen/Mips/tnaked.ll29
-rw-r--r--test/CodeGen/Mips/zeroreg.ll8
-rw-r--r--test/CodeGen/NVPTX/generic-to-nvvm.ll25
-rw-r--r--test/CodeGen/NVPTX/i1-global.ll19
-rw-r--r--test/CodeGen/NVPTX/i1-param.ll18
-rw-r--r--test/CodeGen/NVPTX/intrinsics.ll7
-rw-r--r--test/CodeGen/NVPTX/refl1.ll37
-rw-r--r--test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll6
-rw-r--r--test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll35
-rw-r--r--test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll41
-rw-r--r--test/CodeGen/PowerPC/bdzlr.ll64
-rw-r--r--test/CodeGen/PowerPC/crsave.ll30
-rw-r--r--test/CodeGen/PowerPC/ctrloop-s000.ll68
-rw-r--r--test/CodeGen/PowerPC/ctrloop-sums.ll12
-rw-r--r--test/CodeGen/PowerPC/ctrloops.ll16
-rw-r--r--test/CodeGen/PowerPC/early-ret.ll48
-rw-r--r--test/CodeGen/PowerPC/early-ret2.ll25
-rw-r--r--test/CodeGen/PowerPC/fma.ll27
-rw-r--r--test/CodeGen/PowerPC/fold-zero.ll14
-rw-r--r--test/CodeGen/PowerPC/fsel.ll137
-rw-r--r--test/CodeGen/PowerPC/ifcvt.ll34
-rw-r--r--test/CodeGen/PowerPC/lbzux.ll6
-rw-r--r--test/CodeGen/PowerPC/lsa.ll43
-rw-r--r--test/CodeGen/PowerPC/mcm-obj-2.ll45
-rw-r--r--test/CodeGen/PowerPC/mcm-obj.ll151
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll143
-rw-r--r--test/CodeGen/PowerPC/pr15359.ll12
-rw-r--r--test/CodeGen/PowerPC/rounding-ops.ll10
-rw-r--r--test/CodeGen/PowerPC/s000-alias-misched.ll20
-rw-r--r--test/CodeGen/PowerPC/stubs.ll10
-rw-r--r--test/CodeGen/PowerPC/stwu-gta.ll8
-rw-r--r--test/CodeGen/PowerPC/stwu8.ll6
-rw-r--r--test/CodeGen/PowerPC/tls-gd-obj.ll28
-rw-r--r--test/CodeGen/PowerPC/tls-ie-obj.ll23
-rw-r--r--test/CodeGen/PowerPC/tls-ld-obj.ll38
-rw-r--r--test/CodeGen/R600/README21
-rw-r--r--test/CodeGen/R600/add.ll (renamed from test/CodeGen/R600/add.v4i32.ll)6
-rw-r--r--test/CodeGen/R600/alu-split.ll1
-rw-r--r--test/CodeGen/R600/and.ll (renamed from test/CodeGen/R600/and.v4i32.ll)6
-rw-r--r--test/CodeGen/R600/bfe_uint.ll26
-rw-r--r--test/CodeGen/R600/bfi_int.ll52
-rw-r--r--test/CodeGen/R600/call_fs.ll15
-rw-r--r--test/CodeGen/R600/cf_end.ll9
-rw-r--r--test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll4
-rw-r--r--test/CodeGen/R600/disconnected-predset-break-bug.ll2
-rw-r--r--test/CodeGen/R600/elf.ll20
-rw-r--r--test/CodeGen/R600/elf.r600.ll17
-rw-r--r--test/CodeGen/R600/fabs.ll2
-rw-r--r--test/CodeGen/R600/fadd.ll21
-rw-r--r--test/CodeGen/R600/fadd.v4f32.ll15
-rw-r--r--test/CodeGen/R600/fcmp-cnd.ll2
-rw-r--r--test/CodeGen/R600/fcmp.ll5
-rw-r--r--test/CodeGen/R600/fdiv.ll (renamed from test/CodeGen/R600/fdiv.v4f32.ll)14
-rw-r--r--test/CodeGen/R600/floor.ll2
-rw-r--r--test/CodeGen/R600/fmad.ll2
-rw-r--r--test/CodeGen/R600/fmax.ll2
-rw-r--r--test/CodeGen/R600/fmin.ll2
-rw-r--r--test/CodeGen/R600/fmul.ll21
-rw-r--r--test/CodeGen/R600/fmul.v4f32.ll6
-rw-r--r--test/CodeGen/R600/fp_to_sint.ll14
-rw-r--r--test/CodeGen/R600/fp_to_uint.ll14
-rw-r--r--test/CodeGen/R600/fsub.ll21
-rw-r--r--test/CodeGen/R600/fsub.v4f32.ll15
-rw-r--r--test/CodeGen/R600/i8-to-double-to-float.ll (renamed from test/CodeGen/R600/i8_to_double_to_float.ll)2
-rw-r--r--test/CodeGen/R600/icmp-select-sete-reverse-args.ll2
-rw-r--r--test/CodeGen/R600/imm.ll23
-rw-r--r--test/CodeGen/R600/jump-address.ll (renamed from test/CodeGen/R600/jump_address.ll)4
-rw-r--r--test/CodeGen/R600/literals.ll171
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.mul.ll2
-rw-r--r--test/CodeGen/R600/llvm.AMDGPU.trunc.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.fs.interp.constant.ll2
-rw-r--r--test/CodeGen/R600/llvm.SI.sample.ll132
-rw-r--r--test/CodeGen/R600/llvm.cos.ll2
-rw-r--r--test/CodeGen/R600/llvm.pow.ll6
-rw-r--r--test/CodeGen/R600/llvm.sin.ll2
-rw-r--r--test/CodeGen/R600/load.constant_addrspace.f32.ll9
-rw-r--r--test/CodeGen/R600/load.i8.ll10
-rw-r--r--test/CodeGen/R600/load.ll20
-rw-r--r--test/CodeGen/R600/loop-address.ll41
-rw-r--r--test/CodeGen/R600/lshl.ll2
-rw-r--r--test/CodeGen/R600/lshr.ll2
-rw-r--r--test/CodeGen/R600/mul.ll16
-rw-r--r--test/CodeGen/R600/mulhu.ll2
-rw-r--r--test/CodeGen/R600/or.ll13
-rw-r--r--test/CodeGen/R600/predicates.ll24
-rw-r--r--test/CodeGen/R600/pv.ll244
-rw-r--r--test/CodeGen/R600/r600-encoding.ll24
-rw-r--r--test/CodeGen/R600/reciprocal.ll2
-rw-r--r--test/CodeGen/R600/sdiv.ll2
-rw-r--r--test/CodeGen/R600/selectcc-cnd.ll (renamed from test/CodeGen/R600/selectcc_cnde.ll)3
-rw-r--r--test/CodeGen/R600/selectcc-cnde-int.ll (renamed from test/CodeGen/R600/selectcc_cnde_int.ll)3
-rw-r--r--test/CodeGen/R600/selectcc-icmp-select-float.ll3
-rw-r--r--test/CodeGen/R600/set-dx10.ll36
-rw-r--r--test/CodeGen/R600/setcc.ll (renamed from test/CodeGen/R600/setcc.v4i32.ll)0
-rw-r--r--test/CodeGen/R600/seto.ll2
-rw-r--r--test/CodeGen/R600/setuo.ll2
-rw-r--r--test/CodeGen/R600/shl.ll13
-rw-r--r--test/CodeGen/R600/sint_to_fp.ll14
-rw-r--r--test/CodeGen/R600/sra.ll13
-rw-r--r--test/CodeGen/R600/srl.ll13
-rw-r--r--test/CodeGen/R600/store.ll13
-rw-r--r--test/CodeGen/R600/store.r600.ll22
-rw-r--r--test/CodeGen/R600/store.v4f32.ll9
-rw-r--r--test/CodeGen/R600/store.v4i32.ll9
-rw-r--r--test/CodeGen/R600/sub.ll15
-rw-r--r--test/CodeGen/R600/udiv.ll (renamed from test/CodeGen/R600/udiv.v4i32.ll)2
-rw-r--r--test/CodeGen/R600/uint_to_fp.ll14
-rw-r--r--test/CodeGen/R600/unsupported-cc.ll24
-rw-r--r--test/CodeGen/R600/urecip.ll12
-rw-r--r--test/CodeGen/R600/urem.ll (renamed from test/CodeGen/R600/urem.v4i32.ll)2
-rw-r--r--test/CodeGen/R600/vec4-expand.ll53
-rw-r--r--test/CodeGen/R600/vselect.ll17
-rw-r--r--test/CodeGen/R600/xor.ll13
-rw-r--r--test/CodeGen/SPARC/64abi.ll378
-rw-r--r--test/CodeGen/SPARC/64bit.ll37
-rw-r--r--test/CodeGen/SPARC/constpool.ll48
-rw-r--r--test/CodeGen/SPARC/globals.ll50
-rw-r--r--test/CodeGen/SPARC/varargs.ll75
-rw-r--r--test/CodeGen/SystemZ/addr-01.ll107
-rw-r--r--test/CodeGen/SystemZ/addr-02.ll116
-rw-r--r--test/CodeGen/SystemZ/addr-03.ll48
-rw-r--r--test/CodeGen/SystemZ/alloca-01.ll81
-rw-r--r--test/CodeGen/SystemZ/alloca-02.ll49
-rw-r--r--test/CodeGen/SystemZ/and-01.ll129
-rw-r--r--test/CodeGen/SystemZ/and-02.ll93
-rw-r--r--test/CodeGen/SystemZ/and-03.ll94
-rw-r--r--test/CodeGen/SystemZ/and-04.ll180
-rw-r--r--test/CodeGen/SystemZ/and-05.ll165
-rw-r--r--test/CodeGen/SystemZ/and-06.ll108
-rw-r--r--test/CodeGen/SystemZ/args-01.ll74
-rw-r--r--test/CodeGen/SystemZ/args-02.ll76
-rw-r--r--test/CodeGen/SystemZ/args-03.ll78
-rw-r--r--test/CodeGen/SystemZ/args-04.ll126
-rw-r--r--test/CodeGen/SystemZ/args-05.ll47
-rw-r--r--test/CodeGen/SystemZ/args-06.ll76
-rw-r--r--test/CodeGen/SystemZ/asm-01.ll61
-rw-r--r--test/CodeGen/SystemZ/asm-02.ll52
-rw-r--r--test/CodeGen/SystemZ/asm-03.ll16
-rw-r--r--test/CodeGen/SystemZ/asm-04.ll16
-rw-r--r--test/CodeGen/SystemZ/asm-05.ll15
-rw-r--r--test/CodeGen/SystemZ/asm-06.ll39
-rw-r--r--test/CodeGen/SystemZ/asm-07.ll39
-rw-r--r--test/CodeGen/SystemZ/asm-08.ll39
-rw-r--r--test/CodeGen/SystemZ/asm-09.ll83
-rw-r--r--test/CodeGen/SystemZ/asm-10.ll30
-rw-r--r--test/CodeGen/SystemZ/asm-11.ll41
-rw-r--r--test/CodeGen/SystemZ/asm-12.ll41
-rw-r--r--test/CodeGen/SystemZ/asm-13.ll41
-rw-r--r--test/CodeGen/SystemZ/asm-14.ll41
-rw-r--r--test/CodeGen/SystemZ/asm-15.ll32
-rw-r--r--test/CodeGen/SystemZ/asm-16.ll32
-rw-r--r--test/CodeGen/SystemZ/atomic-load-01.ll13
-rw-r--r--test/CodeGen/SystemZ/atomic-load-02.ll13
-rw-r--r--test/CodeGen/SystemZ/atomic-load-03.ll14
-rw-r--r--test/CodeGen/SystemZ/atomic-load-04.ll14
-rw-r--r--test/CodeGen/SystemZ/atomic-store-01.ll13
-rw-r--r--test/CodeGen/SystemZ/atomic-store-02.ll13
-rw-r--r--test/CodeGen/SystemZ/atomic-store-03.ll16
-rw-r--r--test/CodeGen/SystemZ/atomic-store-04.ll16
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-01.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-02.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-03.ll94
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-add-04.ll112
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-01.ll133
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-02.ll133
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-03.ll85
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-and-04.ll157
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-01.ll228
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-02.ll228
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-03.ll176
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-minmax-04.ll143
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-01.ll139
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-02.ll139
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-03.ll93
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-nand-04.ll183
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-01.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-02.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-03.ll85
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-or-04.ll158
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-01.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-02.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-03.ll94
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-sub-04.ll112
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-01.ll55
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-02.ll55
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-03.ll122
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xchg-04.ll88
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-01.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-02.ll132
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-03.ll49
-rw-r--r--test/CodeGen/SystemZ/atomicrmw-xor-04.ll77
-rw-r--r--test/CodeGen/SystemZ/branch-01.ll14
-rw-r--r--test/CodeGen/SystemZ/branch-02.ll94
-rw-r--r--test/CodeGen/SystemZ/branch-03.ll63
-rw-r--r--test/CodeGen/SystemZ/branch-04.ll218
-rw-r--r--test/CodeGen/SystemZ/branch-05.ll58
-rw-r--r--test/CodeGen/SystemZ/bswap-01.ll24
-rw-r--r--test/CodeGen/SystemZ/bswap-02.ll87
-rw-r--r--test/CodeGen/SystemZ/bswap-03.ll87
-rw-r--r--test/CodeGen/SystemZ/bswap-04.ll87
-rw-r--r--test/CodeGen/SystemZ/bswap-05.ll87
-rw-r--r--test/CodeGen/SystemZ/call-01.ll18
-rw-r--r--test/CodeGen/SystemZ/call-02.ll16
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-01.ll56
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-02.ll56
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-03.ll131
-rw-r--r--test/CodeGen/SystemZ/cmpxchg-04.ll98
-rw-r--r--test/CodeGen/SystemZ/fp-abs-01.ll40
-rw-r--r--test/CodeGen/SystemZ/fp-abs-02.ll43
-rw-r--r--test/CodeGen/SystemZ/fp-add-01.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-add-02.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-add-03.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-01.ll89
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-02.ll89
-rw-r--r--test/CodeGen/SystemZ/fp-cmp-03.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-const-01.ll30
-rw-r--r--test/CodeGen/SystemZ/fp-const-02.ll31
-rw-r--r--test/CodeGen/SystemZ/fp-const-03.ll14
-rw-r--r--test/CodeGen/SystemZ/fp-const-04.ll15
-rw-r--r--test/CodeGen/SystemZ/fp-const-05.ll18
-rw-r--r--test/CodeGen/SystemZ/fp-const-06.ll14
-rw-r--r--test/CodeGen/SystemZ/fp-const-07.ll18
-rw-r--r--test/CodeGen/SystemZ/fp-const-08.ll21
-rw-r--r--test/CodeGen/SystemZ/fp-const-09.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-conv-01.ll61
-rw-r--r--test/CodeGen/SystemZ/fp-conv-02.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-conv-03.ll89
-rw-r--r--test/CodeGen/SystemZ/fp-conv-04.ll89
-rw-r--r--test/CodeGen/SystemZ/fp-conv-05.ll33
-rw-r--r--test/CodeGen/SystemZ/fp-conv-06.ll37
-rw-r--r--test/CodeGen/SystemZ/fp-conv-07.ll33
-rw-r--r--test/CodeGen/SystemZ/fp-conv-08.ll35
-rw-r--r--test/CodeGen/SystemZ/fp-conv-09.ll33
-rw-r--r--test/CodeGen/SystemZ/fp-conv-10.ll45
-rw-r--r--test/CodeGen/SystemZ/fp-conv-11.ll33
-rw-r--r--test/CodeGen/SystemZ/fp-conv-12.ll44
-rw-r--r--test/CodeGen/SystemZ/fp-copysign-01.ll128
-rw-r--r--test/CodeGen/SystemZ/fp-div-01.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-div-02.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-div-03.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-move-01.ll30
-rw-r--r--test/CodeGen/SystemZ/fp-move-02.ll103
-rw-r--r--test/CodeGen/SystemZ/fp-move-03.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-move-04.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-move-05.ll151
-rw-r--r--test/CodeGen/SystemZ/fp-move-06.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-move-07.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-move-08.ll151
-rw-r--r--test/CodeGen/SystemZ/fp-mul-01.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-mul-02.ll83
-rw-r--r--test/CodeGen/SystemZ/fp-mul-03.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-mul-04.ll103
-rw-r--r--test/CodeGen/SystemZ/fp-mul-05.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-mul-06.ll102
-rw-r--r--test/CodeGen/SystemZ/fp-mul-07.ll102
-rw-r--r--test/CodeGen/SystemZ/fp-mul-08.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-mul-09.ll110
-rw-r--r--test/CodeGen/SystemZ/fp-neg-01.ll38
-rw-r--r--test/CodeGen/SystemZ/fp-round-01.ll36
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-01.ll73
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-02.ll73
-rw-r--r--test/CodeGen/SystemZ/fp-sqrt-03.ll20
-rw-r--r--test/CodeGen/SystemZ/fp-sub-01.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-sub-02.ll71
-rw-r--r--test/CodeGen/SystemZ/fp-sub-03.ll20
-rw-r--r--test/CodeGen/SystemZ/frame-01.ll110
-rw-r--r--test/CodeGen/SystemZ/frame-02.ll257
-rw-r--r--test/CodeGen/SystemZ/frame-03.ll259
-rw-r--r--test/CodeGen/SystemZ/frame-04.ll187
-rw-r--r--test/CodeGen/SystemZ/frame-05.ll219
-rw-r--r--test/CodeGen/SystemZ/frame-06.ll216
-rw-r--r--test/CodeGen/SystemZ/frame-07.ll249
-rw-r--r--test/CodeGen/SystemZ/frame-08.ll277
-rw-r--r--test/CodeGen/SystemZ/frame-09.ll153
-rw-r--r--test/CodeGen/SystemZ/frame-10.ll14
-rw-r--r--test/CodeGen/SystemZ/frame-11.ll18
-rw-r--r--test/CodeGen/SystemZ/frame-13.ll299
-rw-r--r--test/CodeGen/SystemZ/frame-14.ll322
-rw-r--r--test/CodeGen/SystemZ/frame-15.ll352
-rw-r--r--test/CodeGen/SystemZ/frame-16.ll327
-rw-r--r--test/CodeGen/SystemZ/frame-17.ll177
-rw-r--r--test/CodeGen/SystemZ/frame-18.ll91
-rw-r--r--test/CodeGen/SystemZ/insert-01.ll230
-rw-r--r--test/CodeGen/SystemZ/insert-02.ll230
-rw-r--r--test/CodeGen/SystemZ/insert-03.ll71
-rw-r--r--test/CodeGen/SystemZ/insert-04.ll137
-rw-r--r--test/CodeGen/SystemZ/insert-05.ll224
-rw-r--r--test/CodeGen/SystemZ/insert-06.ll167
-rw-r--r--test/CodeGen/SystemZ/int-add-01.ll131
-rw-r--r--test/CodeGen/SystemZ/int-add-02.ll129
-rw-r--r--test/CodeGen/SystemZ/int-add-03.ll102
-rw-r--r--test/CodeGen/SystemZ/int-add-04.ll102
-rw-r--r--test/CodeGen/SystemZ/int-add-05.ll94
-rw-r--r--test/CodeGen/SystemZ/int-add-06.ll93
-rw-r--r--test/CodeGen/SystemZ/int-add-07.ll131
-rw-r--r--test/CodeGen/SystemZ/int-add-08.ll110
-rw-r--r--test/CodeGen/SystemZ/int-add-09.ll56
-rw-r--r--test/CodeGen/SystemZ/int-add-10.ll165
-rw-r--r--test/CodeGen/SystemZ/int-add-11.ll128
-rw-r--r--test/CodeGen/SystemZ/int-add-12.ll128
-rw-r--r--test/CodeGen/SystemZ/int-cmp-01.ll151
-rw-r--r--test/CodeGen/SystemZ/int-cmp-02.ll162
-rw-r--r--test/CodeGen/SystemZ/int-cmp-03.ll162
-rw-r--r--test/CodeGen/SystemZ/int-cmp-04.ll107
-rw-r--r--test/CodeGen/SystemZ/int-cmp-05.ll203
-rw-r--r--test/CodeGen/SystemZ/int-cmp-06.ll253
-rw-r--r--test/CodeGen/SystemZ/int-cmp-07.ll118
-rw-r--r--test/CodeGen/SystemZ/int-cmp-08.ll118
-rw-r--r--test/CodeGen/SystemZ/int-cmp-09.ll135
-rw-r--r--test/CodeGen/SystemZ/int-cmp-10.ll28
-rw-r--r--test/CodeGen/SystemZ/int-cmp-11.ll135
-rw-r--r--test/CodeGen/SystemZ/int-cmp-12.ll40
-rw-r--r--test/CodeGen/SystemZ/int-cmp-13.ll147
-rw-r--r--test/CodeGen/SystemZ/int-cmp-14.ll147
-rw-r--r--test/CodeGen/SystemZ/int-cmp-15.ll241
-rw-r--r--test/CodeGen/SystemZ/int-cmp-16.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-17.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-18.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-19.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-20.ll220
-rw-r--r--test/CodeGen/SystemZ/int-cmp-21.ll220
-rw-r--r--test/CodeGen/SystemZ/int-cmp-22.ll128
-rw-r--r--test/CodeGen/SystemZ/int-cmp-23.ll89
-rw-r--r--test/CodeGen/SystemZ/int-cmp-24.ll55
-rw-r--r--test/CodeGen/SystemZ/int-cmp-25.ll55
-rw-r--r--test/CodeGen/SystemZ/int-cmp-26.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-27.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-28.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-29.ll133
-rw-r--r--test/CodeGen/SystemZ/int-cmp-30.ll225
-rw-r--r--test/CodeGen/SystemZ/int-cmp-31.ll225
-rw-r--r--test/CodeGen/SystemZ/int-cmp-32.ll237
-rw-r--r--test/CodeGen/SystemZ/int-cmp-33.ll139
-rw-r--r--test/CodeGen/SystemZ/int-cmp-34.ll237
-rw-r--r--test/CodeGen/SystemZ/int-cmp-35.ll139
-rw-r--r--test/CodeGen/SystemZ/int-cmp-36.ll81
-rw-r--r--test/CodeGen/SystemZ/int-cmp-37.ll81
-rw-r--r--test/CodeGen/SystemZ/int-cmp-38.ll78
-rw-r--r--test/CodeGen/SystemZ/int-cmp-39.ll81
-rw-r--r--test/CodeGen/SystemZ/int-cmp-40.ll81
-rw-r--r--test/CodeGen/SystemZ/int-cmp-41.ll81
-rw-r--r--test/CodeGen/SystemZ/int-cmp-42.ll81
-rw-r--r--test/CodeGen/SystemZ/int-cmp-43.ll78
-rw-r--r--test/CodeGen/SystemZ/int-const-01.ll91
-rw-r--r--test/CodeGen/SystemZ/int-const-02.ll251
-rw-r--r--test/CodeGen/SystemZ/int-const-03.ll166
-rw-r--r--test/CodeGen/SystemZ/int-const-04.ll111
-rw-r--r--test/CodeGen/SystemZ/int-const-05.ll102
-rw-r--r--test/CodeGen/SystemZ/int-const-06.ll102
-rw-r--r--test/CodeGen/SystemZ/int-conv-01.ll105
-rw-r--r--test/CodeGen/SystemZ/int-conv-02.ll114
-rw-r--r--test/CodeGen/SystemZ/int-conv-03.ll105
-rw-r--r--test/CodeGen/SystemZ/int-conv-04.ll114
-rw-r--r--test/CodeGen/SystemZ/int-conv-05.ll140
-rw-r--r--test/CodeGen/SystemZ/int-conv-06.ll114
-rw-r--r--test/CodeGen/SystemZ/int-conv-07.ll105
-rw-r--r--test/CodeGen/SystemZ/int-conv-08.ll114
-rw-r--r--test/CodeGen/SystemZ/int-conv-09.ll104
-rw-r--r--test/CodeGen/SystemZ/int-conv-10.ll113
-rw-r--r--test/CodeGen/SystemZ/int-div-01.ll190
-rw-r--r--test/CodeGen/SystemZ/int-div-02.ll166
-rw-r--r--test/CodeGen/SystemZ/int-div-03.ll189
-rw-r--r--test/CodeGen/SystemZ/int-div-04.ll154
-rw-r--r--test/CodeGen/SystemZ/int-div-05.ll166
-rw-r--r--test/CodeGen/SystemZ/int-move-01.ll35
-rw-r--r--test/CodeGen/SystemZ/int-move-02.ll110
-rw-r--r--test/CodeGen/SystemZ/int-move-03.ll78
-rw-r--r--test/CodeGen/SystemZ/int-move-04.ll130
-rw-r--r--test/CodeGen/SystemZ/int-move-05.ll130
-rw-r--r--test/CodeGen/SystemZ/int-move-06.ll117
-rw-r--r--test/CodeGen/SystemZ/int-move-07.ll78
-rw-r--r--test/CodeGen/SystemZ/int-move-08.ll49
-rw-r--r--test/CodeGen/SystemZ/int-move-09.ll81
-rw-r--r--test/CodeGen/SystemZ/int-mul-01.ll131
-rw-r--r--test/CodeGen/SystemZ/int-mul-02.ll129
-rw-r--r--test/CodeGen/SystemZ/int-mul-03.ll102
-rw-r--r--test/CodeGen/SystemZ/int-mul-04.ll94
-rw-r--r--test/CodeGen/SystemZ/int-mul-05.ll159
-rw-r--r--test/CodeGen/SystemZ/int-mul-06.ll159
-rw-r--r--test/CodeGen/SystemZ/int-mul-07.ll64
-rw-r--r--test/CodeGen/SystemZ/int-mul-08.ll188
-rw-r--r--test/CodeGen/SystemZ/int-neg-01.ll42
-rw-r--r--test/CodeGen/SystemZ/int-sub-01.ll129
-rw-r--r--test/CodeGen/SystemZ/int-sub-02.ll102
-rw-r--r--test/CodeGen/SystemZ/int-sub-03.ll102
-rw-r--r--test/CodeGen/SystemZ/int-sub-04.ll94
-rw-r--r--test/CodeGen/SystemZ/int-sub-05.ll118
-rw-r--r--test/CodeGen/SystemZ/int-sub-06.ll165
-rw-r--r--test/CodeGen/SystemZ/la-01.ll80
-rw-r--r--test/CodeGen/SystemZ/la-02.ll87
-rw-r--r--test/CodeGen/SystemZ/la-03.ll85
-rw-r--r--test/CodeGen/SystemZ/la-04.ll18
-rw-r--r--test/CodeGen/SystemZ/lit.local.cfg6
-rw-r--r--test/CodeGen/SystemZ/or-01.ll129
-rw-r--r--test/CodeGen/SystemZ/or-02.ll66
-rw-r--r--test/CodeGen/SystemZ/or-03.ll94
-rw-r--r--test/CodeGen/SystemZ/or-04.ll182
-rw-r--r--test/CodeGen/SystemZ/or-05.ll165
-rw-r--r--test/CodeGen/SystemZ/or-06.ll108
-rw-r--r--test/CodeGen/SystemZ/shift-01.ll114
-rw-r--r--test/CodeGen/SystemZ/shift-02.ll114
-rw-r--r--test/CodeGen/SystemZ/shift-03.ll114
-rw-r--r--test/CodeGen/SystemZ/shift-04.ll189
-rw-r--r--test/CodeGen/SystemZ/shift-05.ll149
-rw-r--r--test/CodeGen/SystemZ/shift-06.ll149
-rw-r--r--test/CodeGen/SystemZ/shift-07.ll149
-rw-r--r--test/CodeGen/SystemZ/shift-08.ll190
-rw-r--r--test/CodeGen/SystemZ/tls-01.ll22
-rw-r--r--test/CodeGen/SystemZ/xor-01.ll129
-rw-r--r--test/CodeGen/SystemZ/xor-02.ll40
-rw-r--r--test/CodeGen/SystemZ/xor-03.ll94
-rw-r--r--test/CodeGen/SystemZ/xor-04.ll69
-rw-r--r--test/CodeGen/SystemZ/xor-05.ll165
-rw-r--r--test/CodeGen/SystemZ/xor-06.ll108
-rw-r--r--test/CodeGen/Thumb/large-stack.ll4
-rw-r--r--test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll10
-rw-r--r--test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll4
-rw-r--r--test/CodeGen/X86/2006-07-31-SingleRegClass.ll7
-rw-r--r--test/CodeGen/X86/2006-11-27-SelectLegalize.ll4
-rw-r--r--test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll5
-rw-r--r--test/CodeGen/X86/2007-04-24-Huge-Stack.ll4
-rw-r--r--test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll5
-rw-r--r--test/CodeGen/X86/2007-06-15-IntToMMX.ll5
-rw-r--r--test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll4
-rw-r--r--test/CodeGen/X86/2007-10-19-SpillerUnfold.ll6
-rw-r--r--test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll4
-rw-r--r--test/CodeGen/X86/2008-01-09-LongDoubleSin.ll4
-rw-r--r--test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll10
-rw-r--r--test/CodeGen/X86/2008-11-06-testb.ll4
-rw-r--r--test/CodeGen/X86/2009-02-25-CommuteBug.ll4
-rw-r--r--test/CodeGen/X86/2009-03-25-TestBug.ll7
-rw-r--r--test/CodeGen/X86/2009-04-16-SpillerUnfold.ll4
-rw-r--r--test/CodeGen/X86/2009-04-24.ll7
-rw-r--r--test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll7
-rw-r--r--test/CodeGen/X86/2009-05-23-available_externally.ll5
-rw-r--r--test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll4
-rw-r--r--test/CodeGen/X86/2009-08-08-CastError.ll4
-rw-r--r--test/CodeGen/X86/2010-05-25-DotDebugLoc.ll2
-rw-r--r--test/CodeGen/X86/2010-05-26-DotDebugLoc.ll2
-rw-r--r--test/CodeGen/X86/2010-05-28-Crash.ll2
-rw-r--r--test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll3
-rw-r--r--test/CodeGen/X86/2010-08-04-StackVariable.ll2
-rw-r--r--test/CodeGen/X86/2010-11-02-DbgParameter.ll2
-rw-r--r--test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll2
-rw-r--r--test/CodeGen/X86/2011-09-14-valcoalesce.ll6
-rw-r--r--test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll8
-rw-r--r--test/CodeGen/X86/2012-11-30-handlemove-dbg.ll5
-rw-r--r--test/CodeGen/X86/2013-03-13-VEX-DestReg.ll7
-rw-r--r--test/CodeGen/X86/2013-05-06-ConactVectorCrash.ll14
-rw-r--r--test/CodeGen/X86/MachineSink-DbgValue.ll2
-rw-r--r--test/CodeGen/X86/add.ll4
-rw-r--r--test/CodeGen/X86/asm-invalid-register-class-crasher.ll9
-rw-r--r--test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll15
-rw-r--r--test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll14
-rw-r--r--test/CodeGen/X86/atom-fixup-lea1.ll38
-rw-r--r--test/CodeGen/X86/atom-fixup-lea2.ll84
-rw-r--r--test/CodeGen/X86/atom-fixup-lea3.ll51
-rw-r--r--test/CodeGen/X86/atomic-dagsched.ll18
-rw-r--r--test/CodeGen/X86/avx-basic.ll10
-rw-r--r--test/CodeGen/X86/avx-brcond.ll150
-rw-r--r--test/CodeGen/X86/block-placement.ll4
-rw-r--r--test/CodeGen/X86/brcond.ll147
-rw-r--r--test/CodeGen/X86/bswap-inline-asm.ll7
-rw-r--r--test/CodeGen/X86/bt.ll5
-rw-r--r--test/CodeGen/X86/call-imm.ll13
-rw-r--r--test/CodeGen/X86/coalescer-identity.ll12
-rw-r--r--test/CodeGen/X86/code_placement_align_all.ll22
-rw-r--r--test/CodeGen/X86/codegen-prepare.ll44
-rw-r--r--test/CodeGen/X86/commute-intrinsic.ll4
-rw-r--r--test/CodeGen/X86/compact-unwind.ll30
-rw-r--r--test/CodeGen/X86/compiler_used.ll7
-rw-r--r--test/CodeGen/X86/crash.ll2
-rw-r--r--test/CodeGen/X86/dbg-byval-parameter.ll2
-rw-r--r--test/CodeGen/X86/dbg-const-int.ll2
-rw-r--r--test/CodeGen/X86/dbg-const.ll2
-rw-r--r--test/CodeGen/X86/dbg-i128-const.ll2
-rw-r--r--test/CodeGen/X86/dbg-large-unsigned-const.ll7
-rw-r--r--test/CodeGen/X86/dbg-merge-loc-entry.ll2
-rw-r--r--test/CodeGen/X86/dbg-prolog-end.ll2
-rw-r--r--test/CodeGen/X86/dbg-subrange.ll2
-rw-r--r--test/CodeGen/X86/dbg-value-dag-combine.ll2
-rw-r--r--test/CodeGen/X86/dbg-value-isel.ll2
-rw-r--r--test/CodeGen/X86/dbg-value-location.ll2
-rw-r--r--test/CodeGen/X86/dbg-value-range.ll7
-rw-r--r--test/CodeGen/X86/fast-cc-merge-stack-adj.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll3
-rw-r--r--test/CodeGen/X86/fast-isel-constpool.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-divrem-x86-64.ll41
-rw-r--r--test/CodeGen/X86/fast-isel-divrem.ll122
-rw-r--r--test/CodeGen/X86/fast-isel-fneg.ll6
-rw-r--r--test/CodeGen/X86/fast-isel-gv.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-tailcall.ll3
-rw-r--r--test/CodeGen/X86/fast-isel-unaligned-store.ll18
-rw-r--r--test/CodeGen/X86/fastcall-correct-mangling.ll5
-rw-r--r--test/CodeGen/X86/fastcc-2.ll5
-rw-r--r--test/CodeGen/X86/fastcc-byval.ll6
-rw-r--r--test/CodeGen/X86/fastcc-sret.ll6
-rw-r--r--test/CodeGen/X86/fastcc3struct.ll9
-rw-r--r--test/CodeGen/X86/fold-imm.ll11
-rw-r--r--test/CodeGen/X86/fp-elim-and-no-fp-elim.ll32
-rw-r--r--test/CodeGen/X86/fp-immediate-shorten.ll5
-rw-r--r--test/CodeGen/X86/fp_load_cast_fold.ll8
-rw-r--r--test/CodeGen/X86/long-setcc.ll19
-rw-r--r--test/CodeGen/X86/lsr-normalization.ll5
-rw-r--r--test/CodeGen/X86/lsr-static-addr.ll2
-rw-r--r--test/CodeGen/X86/misched-copy.ll49
-rw-r--r--test/CodeGen/X86/misched-matmul.ll68
-rw-r--r--test/CodeGen/X86/misched-matrix.ll76
-rw-r--r--test/CodeGen/X86/mmx-pinsrw.ll4
-rw-r--r--test/CodeGen/X86/mul-legalize.ll4
-rw-r--r--test/CodeGen/X86/negative_zero.ll4
-rw-r--r--test/CodeGen/X86/no-compact-unwind.ll56
-rw-r--r--test/CodeGen/X86/nosse-error1.ll11
-rw-r--r--test/CodeGen/X86/nosse-error2.ll11
-rw-r--r--test/CodeGen/X86/optimize-max-2.ll8
-rw-r--r--test/CodeGen/X86/peep-test-2.ll4
-rw-r--r--test/CodeGen/X86/phys_subreg_coalesce.ll4
-rw-r--r--test/CodeGen/X86/pr12889.ll5
-rw-r--r--test/CodeGen/X86/pr2656.ll5
-rw-r--r--test/CodeGen/X86/private-2.ll4
-rw-r--r--test/CodeGen/X86/rd-mod-wr-eflags.ll21
-rw-r--r--test/CodeGen/X86/select-with-and-or.ll72
-rw-r--r--test/CodeGen/X86/sincos-opt.ll4
-rw-r--r--test/CodeGen/X86/stdcall.ll24
-rw-r--r--test/CodeGen/X86/store-fp-constant.ll7
-rw-r--r--test/CodeGen/X86/subreg-to-reg-1.ll5
-rw-r--r--test/CodeGen/X86/subreg-to-reg-3.ll4
-rw-r--r--test/CodeGen/X86/subtarget-feature-change.ll16
-rw-r--r--test/CodeGen/X86/switch-crit-edge-constant.ll6
-rw-r--r--test/CodeGen/X86/tailcall-64.ll96
-rw-r--r--test/CodeGen/X86/this-return-64.ll89
-rw-r--r--test/CodeGen/X86/unwindraise.ll53
-rw-r--r--test/CodeGen/X86/v4f32-immediate.ll4
-rw-r--r--test/CodeGen/X86/vararg_tailcall.ll32
-rw-r--r--test/CodeGen/X86/vec_compare.ll156
-rw-r--r--test/CodeGen/X86/vec_set-9.ll9
-rw-r--r--test/CodeGen/X86/vec_set-B.ll4
-rw-r--r--test/CodeGen/X86/vec_set-D.ll4
-rw-r--r--test/CodeGen/X86/vec_set-I.ll7
-rw-r--r--test/CodeGen/X86/vec_shuffle-28.ll6
-rw-r--r--test/CodeGen/X86/vec_zero_cse.ll10
-rw-r--r--test/CodeGen/X86/vector.ll4
-rw-r--r--test/CodeGen/X86/viabs.ll183
-rw-r--r--test/CodeGen/X86/win32_sret.ll13
-rw-r--r--test/CodeGen/X86/x86-64-frameaddr.ll7
-rw-r--r--test/CodeGen/X86/x86-64-pic-3.ll9
-rw-r--r--test/CodeGen/X86/x86-64-shortint.ll4
-rw-r--r--test/CodeGen/X86/zext-extract_subreg.ll6
-rw-r--r--test/CodeGen/X86/zext-inreg-0.ll15
-rw-r--r--test/CodeGen/XCore/offset_folding.ll (renamed from test/CodeGen/XCore/global_negative_offset.ll)31
-rw-r--r--test/CodeGen/XCore/unaligned_load.ll25
-rw-r--r--test/CodeGen/XCore/unaligned_store.ll14
651 files changed, 39640 insertions, 1817 deletions
diff --git a/test/CodeGen/AArch64/adrp-relocation.ll b/test/CodeGen/AArch64/adrp-relocation.ll
index c33b442624a5..cf411166a3a0 100644
--- a/test/CodeGen/AArch64/adrp-relocation.ll
+++ b/test/CodeGen/AArch64/adrp-relocation.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -filetype=obj < %s | elf-dump | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -filetype=obj < %s | llvm-readobj -s -r | FileCheck %s
define i64 @testfn() nounwind {
entry:
@@ -19,17 +19,9 @@ entry:
; relative offsets of testfn and foo) because its value depends on where this
; object file's .text section gets relocated in memory.
-; CHECK: .rela.text
-
-; CHECK: # Relocation 0
-; CHECK-NEXT: (('r_offset', 0x0000000000000010)
-; CHECK-NEXT: ('r_sym', 0x00000007)
-; CHECK-NEXT: ('r_type', 0x00000113)
-; CHECK-NEXT: ('r_addend', 0x0000000000000000)
-; CHECK-NEXT: ),
-; CHECK-NEXT: Relocation 1
-; CHECK-NEXT: (('r_offset', 0x0000000000000014)
-; CHECK-NEXT: ('r_sym', 0x00000007)
-; CHECK-NEXT: ('r_type', 0x00000115)
-; CHECK-NEXT: ('r_addend', 0x0000000000000000)
-; CHECK-NEXT: ),
+; CHECK: Relocations [
+; CHECK-NEXT: Section (1) .text {
+; CHECK-NEXT: 0x10 R_AARCH64_ADR_PREL_PG_HI21 testfn 0x0
+; CHECK-NEXT: 0x14 R_AARCH64_ADD_ABS_LO12_NC testfn 0x0
+; CHECK-NEXT: }
+; CHECK-NEXT: ]
diff --git a/test/CodeGen/AArch64/atomic-ops-not-barriers.ll b/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
index 3c03e47147b0..9888a742e32b 100644
--- a/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
+++ b/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
define i32 @foo(i32* %var, i1 %cond) {
; CHECK: foo:
@@ -9,7 +9,9 @@ simple_ver:
store i32 %newval, i32* %var
br label %somewhere
atomic_ver:
- %val = atomicrmw add i32* %var, i32 -1 seq_cst
+ fence seq_cst
+ %val = atomicrmw add i32* %var, i32 -1 monotonic
+ fence seq_cst
br label %somewhere
; CHECK: dmb
; CHECK: ldxr
diff --git a/test/CodeGen/AArch64/atomic-ops.ll b/test/CodeGen/AArch64/atomic-ops.ll
index f3c16171cc83..5e87f21a217d 100644
--- a/test/CodeGen/AArch64/atomic-ops.ll
+++ b/test/CodeGen/AArch64/atomic-ops.ll
@@ -8,18 +8,18 @@
define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_add_i8:
%old = atomicrmw add i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -27,19 +27,19 @@ define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_add_i16:
- %old = atomicrmw add i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw add i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -47,8 +47,8 @@ define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_add_i32:
- %old = atomicrmw add i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw add i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
@@ -57,9 +57,9 @@ define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -67,8 +67,8 @@ define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_add_i64:
- %old = atomicrmw add i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw add i64* @var64, i64 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
@@ -79,7 +79,7 @@ define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
; CHECK-NEXT: add [[NEW:x[0-9]+]], x[[OLD]], x0
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -87,8 +87,8 @@ define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_sub_i8:
- %old = atomicrmw sub i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw sub i8* @var8, i8 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
@@ -99,7 +99,7 @@ define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -107,8 +107,8 @@ define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_sub_i16:
- %old = atomicrmw sub i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw sub i16* @var16, i16 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -117,9 +117,9 @@ define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -127,19 +127,19 @@ define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_sub_i32:
- %old = atomicrmw sub i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw sub i32* @var32, i32 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -148,18 +148,18 @@ define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_sub_i64:
%old = atomicrmw sub i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: sub [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -167,8 +167,8 @@ define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_and_i8:
- %old = atomicrmw and i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw and i8* @var8, i8 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
@@ -177,9 +177,9 @@ define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -187,8 +187,8 @@ define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_and_i16:
- %old = atomicrmw and i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw and i16* @var16, i16 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -199,7 +199,7 @@ define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -208,18 +208,18 @@ define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_and_i32:
%old = atomicrmw and i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -227,19 +227,19 @@ define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_and_i64:
- %old = atomicrmw and i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw and i64* @var64, i64 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: and [[NEW:x[0-9]+]], x[[OLD]], x0
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -248,18 +248,18 @@ define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_or_i8:
%old = atomicrmw or i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -267,8 +267,8 @@ define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_or_i16:
- %old = atomicrmw or i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw or i16* @var16, i16 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -279,7 +279,7 @@ define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -287,19 +287,19 @@ define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_or_i32:
- %old = atomicrmw or i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw or i32* @var32, i32 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -307,8 +307,8 @@ define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_or_i64:
- %old = atomicrmw or i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw or i64* @var64, i64 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
@@ -317,9 +317,9 @@ define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: orr [[NEW:x[0-9]+]], x[[OLD]], x0
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -327,19 +327,19 @@ define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_xor_i8:
- %old = atomicrmw xor i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw xor i8* @var8, i8 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -347,8 +347,8 @@ define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_xor_i16:
- %old = atomicrmw xor i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw xor i16* @var16, i16 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -357,9 +357,9 @@ define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -368,18 +368,18 @@ define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_xor_i32:
%old = atomicrmw xor i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -387,8 +387,8 @@ define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_xor_i64:
- %old = atomicrmw xor i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw xor i64* @var64, i64 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
@@ -399,7 +399,7 @@ define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
; CHECK-NEXT: eor [[NEW:x[0-9]+]], x[[OLD]], x0
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -407,8 +407,8 @@ define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_xchg_i8:
- %old = atomicrmw xchg i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
@@ -418,7 +418,7 @@ define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
; function there.
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -427,17 +427,17 @@ define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_xchg_i16:
%old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
+; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -445,8 +445,8 @@ define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_xchg_i32:
- %old = atomicrmw xchg i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw xchg i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
@@ -454,9 +454,9 @@ define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -464,18 +464,18 @@ define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_xchg_i64:
- %old = atomicrmw xchg i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw xchg i64* @var64, i64 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], x0, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -484,20 +484,20 @@ define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_min_i8:
- %old = atomicrmw min i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw min i8* @var8, i8 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -505,8 +505,8 @@ define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_min_i16:
- %old = atomicrmw min i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw min i16* @var16, i16 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -516,9 +516,9 @@ define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]], sxth
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
-; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -526,8 +526,8 @@ define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_min_i32:
- %old = atomicrmw min i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw min i32* @var32, i32 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
@@ -539,7 +539,7 @@ define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -548,19 +548,19 @@ define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_min_i64:
%old = atomicrmw min i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp x0, x[[OLD]]
; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -569,19 +569,19 @@ define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_max_i8:
%old = atomicrmw max i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
-; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -589,20 +589,20 @@ define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_max_i16:
- %old = atomicrmw max i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw max i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]], sxth
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -610,8 +610,8 @@ define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_max_i32:
- %old = atomicrmw max i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw max i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
@@ -621,9 +621,9 @@ define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]]
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -631,8 +631,8 @@ define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_max_i64:
- %old = atomicrmw max i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw max i64* @var64, i64 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
@@ -644,7 +644,7 @@ define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lt
; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -652,8 +652,8 @@ define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_umin_i8:
- %old = atomicrmw umin i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw umin i8* @var8, i8 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
@@ -665,7 +665,7 @@ define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -673,20 +673,20 @@ define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_umin_i16:
- %old = atomicrmw umin i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw umin i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]], uxth
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -695,19 +695,19 @@ define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_umin_i32:
%old = atomicrmw umin i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]]
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -715,20 +715,20 @@ define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_umin_i64:
- %old = atomicrmw umin i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
; x0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp x0, x[[OLD]]
; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -736,20 +736,20 @@ define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
; CHECK: test_atomic_load_umax_i8:
- %old = atomicrmw umax i8* @var8, i8 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
-; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -757,8 +757,8 @@ define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
; CHECK: test_atomic_load_umax_i16:
- %old = atomicrmw umax i16* @var16, i16 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw umax i16* @var16, i16 %offset monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
@@ -770,7 +770,7 @@ define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -779,19 +779,19 @@ define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
; CHECK: test_atomic_load_umax_i32:
%old = atomicrmw umax i32* @var32, i32 %offset seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
; CHECK: .LBB{{[0-9]+}}_1:
-; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w0, w[[OLD]]
; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -799,8 +799,8 @@ define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
; CHECK: test_atomic_load_umax_i64:
- %old = atomicrmw umax i64* @var64, i64 %offset seq_cst
-; CHECK: dmb ish
+ %old = atomicrmw umax i64* @var64, i64 %offset release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
@@ -810,9 +810,9 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
; function there.
; CHECK-NEXT: cmp x0, x[[OLD]]
; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lo
-; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
+; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -820,13 +820,13 @@ define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; CHECK: test_atomic_cmpxchg_i8:
- %old = cmpxchg i8* @var8, i8 %wanted, i8 %new seq_cst
-; CHECK: dmb ish
+ %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
@@ -834,7 +834,7 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; As above, w1 is a reasonable guess.
; CHECK: stxrb [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i8 %old
@@ -843,20 +843,20 @@ define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; CHECK: test_atomic_cmpxchg_i16:
%old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
-; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
+; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
; w0 below is a reasonable guess but could change: it certainly comes into the
; function there.
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
; As above, w1 is a reasonable guess.
-; CHECK: stxrh [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stlxrh [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i16 %old
@@ -864,8 +864,8 @@ define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK: test_atomic_cmpxchg_i32:
- %old = cmpxchg i32* @var32, i32 %wanted, i32 %new seq_cst
-; CHECK: dmb ish
+ %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
@@ -876,9 +876,9 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; CHECK-NEXT: cmp w[[OLD]], w0
; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
; As above, w1 is a reasonable guess.
-; CHECK: stxr [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
+; CHECK: stlxr [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i32 %old
@@ -886,8 +886,8 @@ define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; CHECK: test_atomic_cmpxchg_i64:
- %old = cmpxchg i64* @var64, i64 %wanted, i64 %new seq_cst
-; CHECK: dmb ish
+ %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
@@ -900,7 +900,7 @@ define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; As above, w1 is a reasonable guess.
; CHECK: stxr [[STATUS:w[0-9]+]], x1, [x[[ADDR]]]
; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
; CHECK: mov x0, x[[OLD]]
ret i64 %old
@@ -933,19 +933,26 @@ define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
define i8 @test_atomic_load_acquire_i8() nounwind {
; CHECK: test_atomic_load_acquire_i8:
%val = load atomic i8* @var8 acquire, align 1
+; CHECK-NOT: dmb
; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
+; CHECK-NOT: dmb
; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
-
+; CHECK-NOT: dmb
; CHECK: ldarb w0, [x[[ADDR]]]
+; CHECK-NOT: dmb
ret i8 %val
}
define i8 @test_atomic_load_seq_cst_i8() nounwind {
; CHECK: test_atomic_load_seq_cst_i8:
%val = load atomic i8* @var8 seq_cst, align 1
-; CHECK: adrp x[[HIADDR:[0-9]+]], var8
-; CHECK: ldrb w0, [x[[HIADDR]], #:lo12:var8]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
+; CHECK: adrp [[HIADDR:x[0-9]+]], var8
+; CHECK-NOT: dmb
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK-NOT: dmb
+; CHECK: ldarb w0, [x[[ADDR]]]
+; CHECK-NOT: dmb
ret i8 %val
}
@@ -954,6 +961,7 @@ define i16 @test_atomic_load_monotonic_i16() nounwind {
%val = load atomic i16* @var16 monotonic, align 2
; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var16
+; CHECK-NOT: dmb
; CHECK: ldrh w0, [x[[HIADDR]], #:lo12:var16]
; CHECK-NOT: dmb
@@ -976,9 +984,13 @@ define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind
define i64 @test_atomic_load_seq_cst_i64() nounwind {
; CHECK: test_atomic_load_seq_cst_i64:
%val = load atomic i64* @var64 seq_cst, align 8
-; CHECK: adrp x[[HIADDR:[0-9]+]], var64
-; CHECK: ldr x0, [x[[HIADDR]], #:lo12:var64]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
+; CHECK: adrp [[HIADDR:x[0-9]+]], var64
+; CHECK-NOT: dmb
+; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
+; CHECK-NOT: dmb
+; CHECK: ldar x0, [x[[ADDR]]]
+; CHECK-NOT: dmb
ret i64 %val
}
@@ -1005,20 +1017,26 @@ define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val)
define void @test_atomic_store_release_i8(i8 %val) nounwind {
; CHECK: test_atomic_store_release_i8:
store atomic i8 %val, i8* @var8 release, align 1
+; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
+; CHECK-NOT: dmb
; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK-NOT: dmb
; CHECK: stlrb w0, [x[[ADDR]]]
-
+; CHECK-NOT: dmb
ret void
}
define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
; CHECK: test_atomic_store_seq_cst_i8:
store atomic i8 %val, i8* @var8 seq_cst, align 1
+; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var8
+; CHECK-NOT: dmb
; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
+; CHECK-NOT: dmb
; CHECK: stlrb w0, [x[[ADDR]]]
-; CHECK: dmb ish
+; CHECK-NOT: dmb
ret void
}
@@ -1026,9 +1044,11 @@ define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
; CHECK: test_atomic_store_monotonic_i16:
store atomic i16 %val, i16* @var16 monotonic, align 2
+; CHECK-NOT: dmb
; CHECK: adrp x[[HIADDR:[0-9]+]], var16
+; CHECK-NOT: dmb
; CHECK: strh w0, [x[[HIADDR]], #:lo12:var16]
-
+; CHECK-NOT: dmb
ret void
}
@@ -1039,7 +1059,9 @@ define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %va
%addr = inttoptr i64 %addr_int to i32*
store atomic i32 %val, i32* %addr monotonic, align 4
+; CHECK-NOT: dmb
; CHECK: str w2, [x0, x1]
+; CHECK-NOT: dmb
ret void
}
@@ -1047,9 +1069,12 @@ define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %va
define void @test_atomic_store_release_i64(i64 %val) nounwind {
; CHECK: test_atomic_store_release_i64:
store atomic i64 %val, i64* @var64 release, align 8
+; CHECK-NOT: dmb
; CHECK: adrp [[HIADDR:x[0-9]+]], var64
+; CHECK-NOT: dmb
; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
+; CHECK-NOT: dmb
; CHECK: stlr x0, [x[[ADDR]]]
-
+; CHECK-NOT: dmb
ret void
}
diff --git a/test/CodeGen/AArch64/blockaddress.ll b/test/CodeGen/AArch64/blockaddress.ll
index 3d0a5cf96bcd..5e85057a3c3b 100644
--- a/test/CodeGen/AArch64/blockaddress.ll
+++ b/test/CodeGen/AArch64/blockaddress.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -code-model=large -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK-LARGE %s
@addr = global i8* null
@@ -13,6 +14,14 @@ define void @test_blockaddress() {
; CHECK: ldr [[NEWDEST:x[0-9]+]]
; CHECK: br [[NEWDEST]]
+; CHECK-LARGE: movz [[ADDR_REG:x[0-9]+]], #:abs_g3:[[DEST_LBL:.Ltmp[0-9]+]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g2_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g1_nc:[[DEST_LBL]]
+; CHECK-LARGE: movk [[ADDR_REG]], #:abs_g0_nc:[[DEST_LBL]]
+; CHECK-LARGE: str [[ADDR_REG]],
+; CHECK-LARGE: ldr [[NEWDEST:x[0-9]+]]
+; CHECK-LARGE: br [[NEWDEST]]
+
block:
ret void
}
diff --git a/test/CodeGen/AArch64/code-model-large-abs.ll b/test/CodeGen/AArch64/code-model-large-abs.ll
new file mode 100644
index 000000000000..a365568e11ee
--- /dev/null
+++ b/test/CodeGen/AArch64/code-model-large-abs.ll
@@ -0,0 +1,61 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large < %s | FileCheck %s
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
+
+define i8* @global_addr() {
+; CHECK: global_addr:
+ ret i8* @var8
+ ; The movz/movk calculation should end up returned directly in x0.
+; CHECK: movz x0, #:abs_g3:var8
+; CHECK: movk x0, #:abs_g2_nc:var8
+; CHECK: movk x0, #:abs_g1_nc:var8
+; CHECK: movk x0, #:abs_g0_nc:var8
+; CHECK-NEXT: ret
+}
+
+define i8 @global_i8() {
+; CHECK: global_i8:
+ %val = load i8* @var8
+ ret i8 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var8
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var8
+; CHECK: ldrb w0, [x[[ADDR_REG]]]
+}
+
+define i16 @global_i16() {
+; CHECK: global_i16:
+ %val = load i16* @var16
+ ret i16 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var16
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var16
+; CHECK: ldrh w0, [x[[ADDR_REG]]]
+}
+
+define i32 @global_i32() {
+; CHECK: global_i32:
+ %val = load i32* @var32
+ ret i32 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var32
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var32
+; CHECK: ldr w0, [x[[ADDR_REG]]]
+}
+
+define i64 @global_i64() {
+; CHECK: global_i64:
+ %val = load i64* @var64
+ ret i64 %val
+; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g1_nc:var64
+; CHECK: movk x[[ADDR_REG]], #:abs_g0_nc:var64
+; CHECK: ldr x0, [x[[ADDR_REG]]]
+}
diff --git a/test/CodeGen/AArch64/elf-extern.ll b/test/CodeGen/AArch64/elf-extern.ll
index ee89d8d94ba4..8bf1b2ff4fa9 100644
--- a/test/CodeGen/AArch64/elf-extern.ll
+++ b/test/CodeGen/AArch64/elf-extern.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -filetype=obj | elf-dump | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -filetype=obj | llvm-readobj -r | FileCheck %s
; External symbols are a different concept to global variables but should still
; get relocations and so on when used.
@@ -10,12 +10,8 @@ define i32 @check_extern() {
ret i32 0
}
-; CHECK: .rela.text
-; CHECK: ('r_sym', 0x00000009)
-; CHECK-NEXT: ('r_type', 0x0000011b)
-
-; CHECK: .symtab
-; CHECK: Symbol 9
-; CHECK-NEXT: memcpy
-
-
+; CHECK: Relocations [
+; CHECK: Section (1) .text {
+; CHECK: 0x{{[0-9,A-F]+}} R_AARCH64_CALL26 memcpy
+; CHECK: }
+; CHECK: ]
diff --git a/test/CodeGen/AArch64/extern-weak.ll b/test/CodeGen/AArch64/extern-weak.ll
index 3d3d8676818a..bc0acc253388 100644
--- a/test/CodeGen/AArch64/extern-weak.ll
+++ b/test/CodeGen/AArch64/extern-weak.ll
@@ -1,4 +1,5 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -o - < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu -code-model=large -o - < %s | FileCheck --check-prefix=CHECK-LARGE %s
declare extern_weak i32 @var()
@@ -11,6 +12,12 @@ define i32()* @foo() {
; CHECK: ldr x0, [{{x[0-9]+}}, #:lo12:.LCPI0_0]
+ ; In the large model, the usual relocations are absolute and can
+ ; materialise 0.
+; CHECK-LARGE: movz x0, #:abs_g3:var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:var
+; CHECK-LARGE: movk x0, #:abs_g1_nc:var
+; CHECK-LARGE: movk x0, #:abs_g0_nc:var
}
@@ -24,6 +31,13 @@ define i32* @bar() {
; CHECK: ldr [[BASE:x[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI1_0]
; CHECK: add x0, [[BASE]], #20
ret i32* %addr
+
+ ; In the large model, the usual relocations are absolute and can
+ ; materialise 0.
+; CHECK-LARGE: movz x0, #:abs_g3:arr_var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:arr_var
+; CHECK-LARGE: movk x0, #:abs_g1_nc:arr_var
+; CHECK-LARGE: movk x0, #:abs_g0_nc:arr_var
}
@defined_weak_var = internal unnamed_addr global i32 0
@@ -32,4 +46,9 @@ define i32* @wibble() {
ret i32* @defined_weak_var
; CHECK: adrp [[BASE:x[0-9]+]], defined_weak_var
; CHECK: add x0, [[BASE]], #:lo12:defined_weak_var
+
+; CHECK-LARGE: movz x0, #:abs_g3:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g2_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g1_nc:defined_weak_var
+; CHECK-LARGE: movk x0, #:abs_g0_nc:defined_weak_var
} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/jump-table.ll b/test/CodeGen/AArch64/jump-table.ll
index dcf9f4ed455c..3c7f5f9ec1b0 100644
--- a/test/CodeGen/AArch64/jump-table.ll
+++ b/test/CodeGen/AArch64/jump-table.ll
@@ -1,5 +1,6 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -filetype=obj | elf-dump | FileCheck %s -check-prefix=CHECK-ELF
+; RUN: llc -code-model=large -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefix=CHECK-LARGE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -filetype=obj | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-ELF
define i32 @test_jumptable(i32 %in) {
; CHECK: test_jumptable
@@ -15,6 +16,13 @@ define i32 @test_jumptable(i32 %in) {
; CHECK: ldr [[DEST:x[0-9]+]], [x[[JT]], {{x[0-9]+}}, lsl #3]
; CHECK: br [[DEST]]
+; CHECK-LARGE: movz x[[JTADDR:[0-9]+]], #:abs_g3:.LJTI0_0
+; CHECK-LARGE: movk x[[JTADDR]], #:abs_g2_nc:.LJTI0_0
+; CHECK-LARGE: movk x[[JTADDR]], #:abs_g1_nc:.LJTI0_0
+; CHECK-LARGE: movk x[[JTADDR]], #:abs_g0_nc:.LJTI0_0
+; CHECK-LARGE: ldr [[DEST:x[0-9]+]], [x[[JTADDR]], {{x[0-9]+}}, lsl #3]
+; CHECK-LARGE: br [[DEST]]
+
def:
ret i32 0
@@ -44,13 +52,15 @@ lbl4:
; ELF tests:
; First make sure we get a page/lo12 pair in .text to pick up the jump-table
-; CHECK-ELF: .rela.text
-; CHECK-ELF: ('r_sym', 0x00000008)
-; CHECK-ELF-NEXT: ('r_type', 0x00000113)
-; CHECK-ELF: ('r_sym', 0x00000008)
-; CHECK-ELF-NEXT: ('r_type', 0x00000115)
+
+; CHECK-ELF: Relocations [
+; CHECK-ELF: Section ({{[0-9]+}}) .text {
+; CHECK-ELF-NEXT: 0x{{[0-9,A-F]+}} R_AARCH64_ADR_PREL_PG_HI21 .rodata
+; CHECK-ELF-NEXT: 0x{{[0-9,A-F]+}} R_AARCH64_ADD_ABS_LO12_NC .rodata
+; CHECK-ELF: }
; Also check the targets in .rodata are relocated
-; CHECK-ELF: .rela.rodata
-; CHECK-ELF: ('r_sym', 0x00000005)
-; CHECK-ELF-NEXT: ('r_type', 0x00000101) \ No newline at end of file
+; CHECK-ELF: Section ({{[0-9]+}}) .rodata {
+; CHECK-ELF-NEXT: 0x{{[0-9,A-F]+}} R_AARCH64_ABS64 .text
+; CHECK-ELF: }
+; CHECK-ELF: ]
diff --git a/test/CodeGen/AArch64/literal_pools.ll b/test/CodeGen/AArch64/literal_pools.ll
index e09084148fdf..9cfa8c5426e4 100644
--- a/test/CodeGen/AArch64/literal_pools.ll
+++ b/test/CodeGen/AArch64/literal_pools.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -code-model=large | FileCheck --check-prefix=CHECK-LARGE %s
@var32 = global i32 0
@var64 = global i64 0
@@ -13,21 +14,45 @@ define void @foo() {
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{w[0-9]+}}, [x[[LITADDR]]]
+
%val64_lit32 = and i64 %val64, 305402420
store volatile i64 %val64_lit32, i64* @var64
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{w[0-9]+}}, [x[[LITADDR]]]
+
%val64_lit32signed = and i64 %val64, -12345678
store volatile i64 %val64_lit32signed, i64* @var64
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldrsw {{x[0-9]+}}, [x[[LITADDR]]]
+
%val64_lit64 = and i64 %val64, 1234567898765432
store volatile i64 %val64_lit64, i64* @var64
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI0_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{x[0-9]+}}, [x[[LITADDR]]]
+
ret void
}
@@ -42,6 +67,14 @@ define void @floating_lits() {
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
; CHECK: ldr {{s[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
; CHECK: fadd
+
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{s[0-9]+}}, [x[[LITADDR]]]
+; CHECK-LARGE: fadd
+
store float %newfloat, float* @varfloat
%doubleval = load double* @vardouble
@@ -49,6 +82,13 @@ define void @floating_lits() {
; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
; CHECK: ldr {{d[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
; CHECK: fadd
+
+; CHECK-LARGE: movz x[[LITADDR:[0-9]+]], #:abs_g3:[[CURLIT:.LCPI1_[0-9]+]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g2_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g1_nc:[[CURLIT]]
+; CHECK-LARGE: movk x[[LITADDR]], #:abs_g0_nc:[[CURLIT]]
+; CHECK-LARGE: ldr {{d[0-9]+}}, [x[[LITADDR]]]
+
store double %newdouble, double* @vardouble
ret void
diff --git a/test/CodeGen/ARM/2010-08-04-StackVariable.ll b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
index 91a9903f3852..112512ff59a5 100644
--- a/test/CodeGen/ARM/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/ARM/2010-08-04-StackVariable.ll
@@ -79,7 +79,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"", metadata !2, i32 11, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786451, metadata !2, metadata !"SVal", metadata !2, i32 1, i64 128, i64 64, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_structure_type ]
!2 = metadata !{i32 786473, metadata !48} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786449, i32 4, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, metadata !47, metadata !47, metadata !46, metadata !47, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786449, i32 4, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, metadata !47, metadata !47, metadata !46, metadata !47, metadata !47, metadata !""} ; [ DW_TAG_compile_unit ]
!4 = metadata !{metadata !5, metadata !7, metadata !0, metadata !9}
!5 = metadata !{i32 786445, metadata !1, metadata !"Data", metadata !2, i32 7, i64 64, i64 64, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
!6 = metadata !{i32 786447, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
diff --git a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
index 36d15757c314..b253fefe87c4 100644
--- a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
+++ b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll
@@ -1,36 +1,47 @@
; RUN: llc %s -mtriple=arm-linux-gnueabi -filetype=obj -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=BASIC %s
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=BASIC %s
; RUN: llc %s -mtriple=armv7-linux-gnueabi -march=arm -mcpu=cortex-a8 \
; RUN: -mattr=-neon,-vfp3,+vfp2 \
; RUN: -arm-reserve-r9 -filetype=obj -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=CORTEXA8 %s
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=CORTEXA8 %s
; This tests that the extpected ARM attributes are emitted.
;
-; BASIC: .ARM.attributes
-; BASIC-NEXT: 0x70000003
-; BASIC-NEXT: 0x00000000
-; BASIC-NEXT: 0x00000000
-; BASIC-NEXT: 0x0000003c
-; BASIC-NEXT: 0x00000022
-; BASIC-NEXT: 0x00000000
-; BASIC-NEXT: 0x00000000
-; BASIC-NEXT: 0x00000001
-; BASIC-NEXT: 0x00000000
-; BASIC-NEXT: '41210000 00616561 62690001 17000000 060a0741 08010902 14011501 17031801 1901'
+; BASIC: Section {
+; BASIC: Name: .ARM.attributes
+; BASIC-NEXT: Type: SHT_ARM_ATTRIBUTES
+; BASIC-NEXT: Flags [ (0x0)
+; BASIC-NEXT: ]
+; BASIC-NEXT: Address: 0x0
+; BASIC-NEXT: Offset: 0x3C
+; BASIC-NEXT: Size: 34
+; BASIC-NEXT: Link: 0
+; BASIC-NEXT: Info: 0
+; BASIC-NEXT: AddressAlignment: 1
+; BASIC-NEXT: EntrySize: 0
+; BASIC-NEXT: SectionData (
+; BASIC-NEXT: 0000: 41210000 00616561 62690001 17000000
+; BASIC-NEXT: 0010: 060A0741 08010902 14011501 17031801
+; BASIC-NEXT: 0020: 1901
+; BASIC-NEXT: )
-; CORTEXA8: .ARM.attributes
-; CORTEXA8-NEXT: 0x70000003
-; CORTEXA8-NEXT: 0x00000000
-; CORTEXA8-NEXT: 0x00000000
-; CORTEXA8-NEXT: 0x0000003c
-; CORTEXA8-NEXT: 0x0000002f
-; CORTEXA8-NEXT: 0x00000000
-; CORTEXA8-NEXT: 0x00000000
-; CORTEXA8-NEXT: 0x00000001
-; CORTEXA8-NEXT: 0x00000000
-; CORTEXA8-NEXT: '412e0000 00616561 62690001 24000000 05434f52 5445582d 41380006 0a074108 0109020a 02140115 01170318 011901'
+; CORTEXA8: Name: .ARM.attributes
+; CORTEXA8-NEXT: Type: SHT_ARM_ATTRIBUTES
+; CORTEXA8-NEXT: Flags [ (0x0)
+; CORTEXA8-NEXT: ]
+; CORTEXA8-NEXT: Address: 0x0
+; CORTEXA8-NEXT: Offset: 0x3C
+; CORTEXA8-NEXT: Size: 47
+; CORTEXA8-NEXT: Link: 0
+; CORTEXA8-NEXT: Info: 0
+; CORTEXA8-NEXT: AddressAlignment: 1
+; CORTEXA8-NEXT: EntrySize: 0
+; CORTEXA8-NEXT: SectionData (
+; CORTEXA8-NEXT: 0000: 412E0000 00616561 62690001 24000000
+; CORTEXA8-NEXT: 0010: 05434F52 5445582D 41380006 0A074108
+; CORTEXA8-NEXT: 0020: 0109020A 02140115 01170318 011901
+; CORTEXA8-NEXT: )
define i32 @f(i64 %z) {
ret i32 0
diff --git a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
index 94a05412f5d4..9eecd045bfa0 100644
--- a/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
+++ b/test/CodeGen/ARM/2010-11-30-reloc-movt.ll
@@ -1,5 +1,5 @@
; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=OBJ %s
+; RUN: llvm-readobj -s -sr -sd | FileCheck -check-prefix=OBJ %s
target triple = "armv7-none-linux-gnueabi"
@@ -9,32 +9,17 @@ define arm_aapcs_vfpcc i32 @barf() nounwind {
entry:
%0 = tail call arm_aapcs_vfpcc i32 @foo(i8* @a) nounwind
ret i32 %0
-; OBJ: '.text'
-; OBJ-NEXT: 'sh_type'
-; OBJ-NEXT: 'sh_flags'
-; OBJ-NEXT: 'sh_addr'
-; OBJ-NEXT: 'sh_offset'
-; OBJ-NEXT: 'sh_size'
-; OBJ-NEXT: 'sh_link'
-; OBJ-NEXT: 'sh_info'
-; OBJ-NEXT: 'sh_addralign'
-; OBJ-NEXT: 'sh_entsize'
-; OBJ-NEXT: '_section_data', '00482de9 000000e3 000040e3 feffffeb 0088bde8'
-
-; OBJ: Relocation 0
-; OBJ-NEXT: 'r_offset', 0x00000004
-; OBJ-NEXT: 'r_sym', 0x000009
-; OBJ-NEXT: 'r_type', 0x2b
-
-; OBJ: Relocation 1
-; OBJ-NEXT: 'r_offset', 0x00000008
-; OBJ-NEXT: 'r_sym'
-; OBJ-NEXT: 'r_type', 0x2c
-
-; OBJ: # Relocation 2
-; OBJ-NEXT: 'r_offset', 0x0000000c
-; OBJ-NEXT: 'r_sym', 0x00000a
-; OBJ-NEXT: 'r_type', 0x1c
+; OBJ: Section {
+; OBJ: Name: .text
+; OBJ: Relocations [
+; OBJ-NEXT: 0x4 R_ARM_MOVW_ABS_NC a
+; OBJ-NEXT: 0x8 R_ARM_MOVT_ABS
+; OBJ-NEXT: 0xC R_ARM_CALL foo
+; OBJ-NEXT: ]
+; OBJ-NEXT: SectionData (
+; OBJ-NEXT: 0000: 00482DE9 000000E3 000040E3 FEFFFFEB
+; OBJ-NEXT: 0010: 0088BDE8
+; OBJ-NEXT: )
}
diff --git a/test/CodeGen/ARM/2010-12-08-tpsoft.ll b/test/CodeGen/ARM/2010-12-08-tpsoft.ll
index b8ed8199d398..1351a26756ef 100644
--- a/test/CodeGen/ARM/2010-12-08-tpsoft.ll
+++ b/test/CodeGen/ARM/2010-12-08-tpsoft.ll
@@ -1,9 +1,9 @@
; RUN: llc %s -mtriple=armv7-linux-gnueabi -o - | \
; RUN: FileCheck -check-prefix=ELFASM %s
; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=ELFOBJ %s
+; RUN: llvm-readobj -s -sd | FileCheck -check-prefix=ELFOBJ %s
-;; Make sure that bl __aeabi_read_tp is materiazlied and fixed up correctly
+;; Make sure that bl __aeabi_read_tp is materialized and fixed up correctly
;; in the obj case.
@i = external thread_local global i32
@@ -24,19 +24,13 @@ bb: ; preds = %entry
; ELFASM: bl __aeabi_read_tp
-; ELFOBJ: '.text'
-; ELFOBJ-NEXT: 'sh_type'
-; ELFOBJ-NEXT: 'sh_flags'
-; ELFOBJ-NEXT: 'sh_addr'
-; ELFOBJ-NEXT: 'sh_offset'
-; ELFOBJ-NEXT: 'sh_size'
-; ELFOBJ-NEXT: 'sh_link'
-; ELFOBJ-NEXT: 'sh_info'
-; ELFOBJ-NEXT: 'sh_addralign'
-; ELFOBJ-NEXT: 'sh_entsize'
-;;; BL __aeabi_read_tp is ---+
-;;; V
-; ELFOBJ-NEXT: 00482de9 3c009fe5 00109fe7 feffffeb
+; ELFOBJ: Sections [
+; ELFOBJ: Section {
+; ELFOBJ: Name: .text
+; ELFOBJ: SectionData (
+;;; BL __aeabi_read_tp is ---------+
+;;; V
+; ELFOBJ-NEXT: 0000: 00482DE9 3C009FE5 00109FE7 FEFFFFEB
bb1: ; preds = %entry
diff --git a/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll b/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
index 1272a257931d..f13bc1214a5a 100644
--- a/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
+++ b/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
@@ -1,5 +1,5 @@
; RUN: llc %s -mtriple=armv7-linux-gnueabi -filetype=obj -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=OBJ %s
+; RUN: llvm-readobj -s -t | FileCheck -check-prefix=OBJ %s
; RUN: llc %s -mtriple=armv7-linux-gnueabi -o - | \
; RUN: FileCheck -check-prefix=ASM %s
@@ -15,17 +15,20 @@
; ASM-NEXT: .type _MergedGlobals,%object @ @_MergedGlobals
-
-; OBJ: Section 4
-; OBJ-NEXT: '.bss'
-
-; OBJ: 'array00'
-; OBJ-NEXT: 'st_value', 0x00000000
-; OBJ-NEXT: 'st_size', 0x00000050
-; OBJ-NEXT: 'st_bind', 0x0
-; OBJ-NEXT: 'st_type', 0x1
-; OBJ-NEXT: 'st_other', 0x00
-; OBJ-NEXT: 'st_shndx', 0x0004
+; OBJ: Sections [
+; OBJ: Section {
+; OBJ: Index: 4
+; OBJ-NEXT: Name: .bss
+
+; OBJ: Symbols [
+; OBJ: Symbol {
+; OBJ: Name: array00
+; OBJ-NEXT: Value: 0x0
+; OBJ-NEXT: Size: 80
+; OBJ-NEXT: Binding: Local
+; OBJ-NEXT: Type: Object
+; OBJ-NEXT: Other: 0
+; OBJ-NEXT: Section: .bss
define i32 @main(i32 %argc) nounwind {
%1 = load i32* @sum, align 4
diff --git a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
index 1d1b89a34f9a..98c0af35ef9a 100644
--- a/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
@@ -79,7 +79,7 @@ entry:
!0 = metadata !{i32 786478, metadata !1, metadata !"get1", metadata !"get1", metadata !"get1", metadata !1, i32 4, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i8 (i8)* @get1, null, null, metadata !42, i32 4} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !47} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !47, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, metadata !"", i32 0, null, null, metadata !40, metadata !41, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !47, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2369.8)", i1 true, metadata !"", i32 0, null, null, metadata !40, metadata !41, metadata !41, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5, metadata !5}
!5 = metadata !{i32 786468, metadata !1, metadata !1, metadata !"_Bool", i32 0, i64 8, i64 8, i64 0, i32 0, i32 2} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
index 266609b8ce69..7a7ca8e0d8d9 100644
--- a/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
+++ b/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
@@ -74,7 +74,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, metadata !47, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, null, null, metadata !40, metadata !41, null} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, metadata !47, i32 12, metadata !"clang", i1 true, metadata !"", i32 0, null, null, metadata !40, metadata !41, metadata !41, null} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !2, metadata !"get1", metadata !"get1", metadata !"", metadata !2, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32)* @get1, null, null, metadata !42, i32 5} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !47} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !2, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
diff --git a/test/CodeGen/ARM/2011-12-14-machine-sink.ll b/test/CodeGen/ARM/2011-12-14-machine-sink.ll
index 1b21f7571d8e..9334bf36d805 100644
--- a/test/CodeGen/ARM/2011-12-14-machine-sink.ll
+++ b/test/CodeGen/ARM/2011-12-14-machine-sink.ll
@@ -15,13 +15,13 @@ for.cond: ; preds = %for.body, %entry
for.body: ; preds = %for.cond
%v.5 = select i1 undef, i32 undef, i32 0
- %0 = load i8* undef, align 1, !tbaa !0
+ %0 = load i8* undef, align 1
%conv88 = zext i8 %0 to i32
%sub89 = sub nsw i32 0, %conv88
%v.8 = select i1 undef, i32 undef, i32 %sub89
- %1 = load i8* null, align 1, !tbaa !0
+ %1 = load i8* null, align 1
%conv108 = zext i8 %1 to i32
- %2 = load i8* undef, align 1, !tbaa !0
+ %2 = load i8* undef, align 1
%conv110 = zext i8 %2 to i32
%sub111 = sub nsw i32 %conv108, %conv110
%cmp112 = icmp slt i32 %sub111, 0
@@ -44,6 +44,3 @@ if.end299: ; preds = %for.body, %for.cond
%s.10 = phi i32 [ %add172, %for.body ], [ 0, %for.cond ]
ret i32 %s.10
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll b/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll
index 926daafbb7f1..0f1c452b8678 100644
--- a/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll
+++ b/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll
@@ -18,7 +18,7 @@ bb3: ; preds = %bb4, %bb2
br i1 %tmp, label %bb4, label %bb67
bb4: ; preds = %bb3
- %tmp5 = load <4 x i32>* undef, align 16, !tbaa !0
+ %tmp5 = load <4 x i32>* undef, align 16
%tmp6 = and <4 x i32> %tmp5, <i32 8388607, i32 8388607, i32 8388607, i32 8388607>
%tmp7 = or <4 x i32> %tmp6, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
%tmp8 = bitcast <4 x i32> %tmp7 to <4 x float>
@@ -41,9 +41,9 @@ bb4: ; preds = %bb3
%tmp24 = trunc i128 %tmp23 to i64
%tmp25 = insertvalue [2 x i64] undef, i64 %tmp24, 0
%tmp26 = insertvalue [2 x i64] %tmp25, i64 0, 1
- %tmp27 = load float* undef, align 4, !tbaa !2
+ %tmp27 = load float* undef, align 4
%tmp28 = insertelement <4 x float> undef, float %tmp27, i32 3
- %tmp29 = load <4 x i32>* undef, align 16, !tbaa !0
+ %tmp29 = load <4 x i32>* undef, align 16
%tmp30 = and <4 x i32> %tmp29, <i32 8388607, i32 8388607, i32 8388607, i32 8388607>
%tmp31 = or <4 x i32> %tmp30, <i32 1065353216, i32 1065353216, i32 1065353216, i32 1065353216>
%tmp32 = bitcast <4 x i32> %tmp31 to <4 x float>
@@ -52,10 +52,10 @@ bb4: ; preds = %bb3
%tmp35 = fmul <4 x float> %tmp34, undef
%tmp36 = fmul <4 x float> %tmp35, undef
%tmp37 = call arm_aapcs_vfpcc i8* undef(i8* undef) nounwind
- %tmp38 = load float* undef, align 4, !tbaa !2
+ %tmp38 = load float* undef, align 4
%tmp39 = insertelement <2 x float> undef, float %tmp38, i32 0
%tmp40 = call arm_aapcs_vfpcc i8* undef(i8* undef) nounwind
- %tmp41 = load float* undef, align 4, !tbaa !2
+ %tmp41 = load float* undef, align 4
%tmp42 = insertelement <4 x float> undef, float %tmp41, i32 3
%tmp43 = shufflevector <2 x float> %tmp39, <2 x float> undef, <4 x i32> zeroinitializer
%tmp44 = fmul <4 x float> %tmp33, %tmp43
@@ -64,10 +64,10 @@ bb4: ; preds = %bb3
%tmp47 = fmul <4 x float> %tmp46, %tmp36
%tmp48 = fadd <4 x float> undef, %tmp47
%tmp49 = call arm_aapcs_vfpcc i8* undef(i8* undef) nounwind
- %tmp50 = load float* undef, align 4, !tbaa !2
+ %tmp50 = load float* undef, align 4
%tmp51 = insertelement <4 x float> undef, float %tmp50, i32 3
%tmp52 = call arm_aapcs_vfpcc float* null(i8* undef) nounwind
- %tmp54 = load float* %tmp52, align 4, !tbaa !2
+ %tmp54 = load float* %tmp52, align 4
%tmp55 = insertelement <4 x float> undef, float %tmp54, i32 3
%tmp56 = fsub <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, %tmp22
%tmp57 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp56, <4 x float> %tmp55) nounwind
@@ -99,7 +99,3 @@ declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwin
declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!2 = metadata !{metadata !"float", metadata !0}
diff --git a/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll b/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll
index f1c85f1b41f5..61623ec1b6a4 100644
--- a/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll
+++ b/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll
@@ -7,7 +7,7 @@ target triple = "armv7-none-linux-eabi"
; This test case is exercising REG_SEQUENCE, and chains of REG_SEQUENCE.
define arm_aapcs_vfpcc void @foo(i8* nocapture %arg, i8* %arg1) nounwind align 2 {
bb:
- %tmp = load <2 x float>* undef, align 8, !tbaa !0
+ %tmp = load <2 x float>* undef, align 8
%tmp2 = extractelement <2 x float> %tmp, i32 0
%tmp3 = insertelement <4 x float> undef, float %tmp2, i32 0
%tmp4 = insertelement <4 x float> %tmp3, float 0.000000e+00, i32 1
@@ -70,6 +70,3 @@ entry:
declare arm_aapcs_vfpcc void @bar(i8*, float, float, float)
declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind
declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll b/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll
index 5f24e427c229..a9e2ebb7fe12 100644
--- a/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll
+++ b/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll
@@ -56,9 +56,9 @@ bb3: ; preds = %bb2
%tmp39 = shufflevector <2 x i64> %tmp38, <2 x i64> undef, <1 x i32> zeroinitializer
%tmp40 = bitcast <1 x i64> %tmp39 to <2 x float>
%tmp41 = shufflevector <2 x float> %tmp40, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %tmp42 = load <4 x float>* null, align 16, !tbaa !0
+ %tmp42 = load <4 x float>* null, align 16
%tmp43 = fmul <4 x float> %tmp42, %tmp41
- %tmp44 = load <4 x float>* undef, align 16, !tbaa !0
+ %tmp44 = load <4 x float>* undef, align 16
%tmp45 = fadd <4 x float> undef, %tmp43
%tmp46 = fadd <4 x float> undef, %tmp45
%tmp47 = bitcast <4 x float> %tmp36 to <2 x i64>
@@ -108,7 +108,7 @@ bb3: ; preds = %bb2
%tmp89 = fmul <4 x float> undef, %tmp88
%tmp90 = fadd <4 x float> %tmp89, undef
%tmp91 = fadd <4 x float> undef, %tmp90
- store <4 x float> %tmp91, <4 x float>* undef, align 16, !tbaa !0
+ store <4 x float> %tmp91, <4 x float>* undef, align 16
unreachable
bb92: ; preds = %bb2
@@ -116,6 +116,3 @@ bb92: ; preds = %bb2
}
declare arm_aapcs_vfpcc void @bar(i8* noalias nocapture sret, [8 x i64]) nounwind uwtable inlinehint
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/2012-04-02-TwoAddrInstrCrash.ll b/test/CodeGen/ARM/2012-04-02-TwoAddrInstrCrash.ll
index 33ad187926bf..0843fdc4e75e 100644
--- a/test/CodeGen/ARM/2012-04-02-TwoAddrInstrCrash.ll
+++ b/test/CodeGen/ARM/2012-04-02-TwoAddrInstrCrash.ll
@@ -9,16 +9,13 @@ define arm_aapcs_vfpcc void @foo() nounwind align 2 {
; <label>:1 ; preds = %0
%2 = shufflevector <1 x i64> zeroinitializer, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
%3 = bitcast <2 x i64> %2 to <4 x float>
- store <4 x float> zeroinitializer, <4 x float>* undef, align 16, !tbaa !0
- store <4 x float> zeroinitializer, <4 x float>* undef, align 16, !tbaa !0
- store <4 x float> %3, <4 x float>* undef, align 16, !tbaa !0
+ store <4 x float> zeroinitializer, <4 x float>* undef, align 16
+ store <4 x float> zeroinitializer, <4 x float>* undef, align 16
+ store <4 x float> %3, <4 x float>* undef, align 16
%4 = insertelement <4 x float> %3, float 8.000000e+00, i32 2
- store <4 x float> %4, <4 x float>* undef, align 16, !tbaa !0
+ store <4 x float> %4, <4 x float>* undef, align 16
unreachable
; <label>:5 ; preds = %0
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/2012-04-10-DAGCombine.ll b/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
index 6f50f279b5de..089dc9153afa 100644
--- a/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
+++ b/test/CodeGen/ARM/2012-04-10-DAGCombine.ll
@@ -20,12 +20,9 @@ bb5: ; preds = %bb4
%tmp15 = shufflevector <2 x float> %tmp14, <2 x float> undef, <4 x i32> zeroinitializer
%tmp16 = fmul <4 x float> zeroinitializer, %tmp15
%tmp17 = fadd <4 x float> %tmp16, %arg
- store <4 x float> %tmp17, <4 x float>* undef, align 8, !tbaa !0
+ store <4 x float> %tmp17, <4 x float>* undef, align 8
br label %bb18
bb18: ; preds = %bb5, %bb4
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll b/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
index ca0964a05933..a288015d6016 100644
--- a/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
+++ b/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
@@ -26,18 +26,14 @@
; CHECK: Successors:
define i32 @f1(i32* nocapture %p1, i32* nocapture %p2) nounwind {
entry:
- store volatile i32 65540, i32* %p1, align 4, !tbaa !0
- %0 = load volatile i32* %p2, align 4, !tbaa !0
+ store volatile i32 65540, i32* %p1, align 4
+ %0 = load volatile i32* %p2, align 4
ret i32 %0
}
define i32 @f2(i32* nocapture %p1, i32* nocapture %p2) nounwind {
entry:
- store i32 65540, i32* %p1, align 4, !tbaa !0
- %0 = load i32* %p2, align 4, !tbaa !0
+ store i32 65540, i32* %p1, align 4
+ %0 = load i32* %p2, align 4
ret i32 %0
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll b/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
index e4ad45bf526e..adb5c7e4b259 100644
--- a/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
+++ b/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
@@ -129,7 +129,7 @@ define arm_aapcs_vfpcc void @foo(float, i1 zeroext, i1 zeroext) nounwind uwtable
%45 = fmul <4 x float> undef, undef
%46 = fmul <4 x float> %45, %43
%47 = fmul <4 x float> undef, %44
- %48 = load <4 x float>* undef, align 8, !tbaa !1
+ %48 = load <4 x float>* undef, align 8
%49 = bitcast <4 x float> %48 to <2 x i64>
%50 = shufflevector <2 x i64> %49, <2 x i64> undef, <1 x i32> <i32 1>
%51 = bitcast <1 x i64> %50 to <2 x float>
@@ -145,10 +145,10 @@ define arm_aapcs_vfpcc void @foo(float, i1 zeroext, i1 zeroext) nounwind uwtable
%61 = fmul <4 x float> %59, %60
%62 = fmul <4 x float> %61, <float 6.000000e+01, float 6.000000e+01, float 6.000000e+01, float 6.000000e+01>
%63 = fadd <4 x float> %47, %62
- store <4 x float> %46, <4 x float>* undef, align 8, !tbaa !1
+ store <4 x float> %46, <4 x float>* undef, align 8
call arm_aapcs_vfpcc void @bar(%0* undef, float 0.000000e+00) nounwind
call arm_aapcs_vfpcc void @bar(%0* undef, float 0.000000e+00) nounwind
- store <4 x float> %63, <4 x float>* undef, align 8, !tbaa !1
+ store <4 x float> %63, <4 x float>* undef, align 8
unreachable
; <label>:64 ; preds = %41, %40
@@ -170,5 +170,3 @@ define arm_aapcs_vfpcc void @foo(float, i1 zeroext, i1 zeroext) nounwind uwtable
declare arm_aapcs_vfpcc void @bar(%0*, float)
!0 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/2013-01-21-PR14992.ll b/test/CodeGen/ARM/2013-01-21-PR14992.ll
index 38b9e0e8f086..05abdeda0f19 100644
--- a/test/CodeGen/ARM/2013-01-21-PR14992.ll
+++ b/test/CodeGen/ARM/2013-01-21-PR14992.ll
@@ -6,11 +6,11 @@
;CHECK: foo:
define i32 @foo(i32* %a) nounwind optsize {
entry:
- %0 = load i32* %a, align 4, !tbaa !0
+ %0 = load i32* %a, align 4
%arrayidx1 = getelementptr inbounds i32* %a, i32 1
- %1 = load i32* %arrayidx1, align 4, !tbaa !0
+ %1 = load i32* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds i32* %a, i32 2
- %2 = load i32* %arrayidx2, align 4, !tbaa !0
+ %2 = load i32* %arrayidx2, align 4
%add.ptr = getelementptr inbounds i32* %a, i32 3
;Make sure we do not have a duplicated register in the front of the reg list
;EXPECTED: ldm [[BASE:r[0-9]+]]!, {[[REG:r[0-9]+]], {{r[0-9]+}},
@@ -22,7 +22,3 @@ entry:
}
declare void @bar(i32*) optsize
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll b/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
new file mode 100644
index 000000000000..4a5ca9db0e50
--- /dev/null
+++ b/test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll
@@ -0,0 +1,73 @@
+;PR15293: ARM codegen ice - expected larger existing stack allocation
+;RUN: llc -mtriple=arm-linux-gnueabihf < %s | FileCheck %s
+
+;CHECK: foo:
+;CHECK: sub sp, sp, #8
+;CHECK: push {r11, lr}
+;CHECK: str r0, [sp, #12]
+;CHECK: add r0, sp, #12
+;CHECK: bl fooUseParam
+;CHECK: pop {r11, lr}
+;CHECK: add sp, sp, #8
+;CHECK: mov pc, lr
+
+;CHECK: foo2:
+;CHECK: sub sp, sp, #16
+;CHECK: push {r11, lr}
+;CHECK: str r0, [sp, #12]
+;CHECK: add r0, sp, #12
+;CHECK: str r2, [sp, #16]
+;CHECK: bl fooUseParam
+;CHECK: add r0, sp, #16
+;CHECK: bl fooUseParam
+;CHECK: pop {r11, lr}
+;CHECK: add sp, sp, #16
+;CHECK: mov pc, lr
+
+;CHECK: doFoo:
+;CHECK: push {r11, lr}
+;CHECK: ldr r0,
+;CHECK: ldr r0, [r0]
+;CHECK: bl foo
+;CHECK: pop {r11, lr}
+;CHECK: mov pc, lr
+
+
+;CHECK: doFoo2:
+;CHECK: push {r11, lr}
+;CHECK: ldr r0,
+;CHECK: mov r1, #0
+;CHECK: ldr r0, [r0]
+;CHECK: mov r2, r0
+;CHECK: bl foo2
+;CHECK: pop {r11, lr}
+;CHECK: mov pc, lr
+
+
+%artz = type { i32 }
+@static_val = constant %artz { i32 777 }
+
+declare void @fooUseParam(%artz* )
+
+define void @foo(%artz* byval %s) {
+ call void @fooUseParam(%artz* %s)
+ ret void
+}
+
+define void @foo2(%artz* byval %s, i32 %p, %artz* byval %s2) {
+ call void @fooUseParam(%artz* %s)
+ call void @fooUseParam(%artz* %s2)
+ ret void
+}
+
+
+define void @doFoo() {
+ call void @foo(%artz* byval @static_val)
+ ret void
+}
+
+define void @doFoo2() {
+ call void @foo2(%artz* byval @static_val, i32 0, %artz* byval @static_val)
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll b/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
new file mode 100644
index 000000000000..38d515f9227f
--- /dev/null
+++ b/test/CodeGen/ARM/2013-04-16-AAPCS-C4-vs-VFP.ll
@@ -0,0 +1,95 @@
+;Check 5.5 Parameter Passing --> Stage C --> C.4 statement, when NSAA is not
+;equal to SP.
+;
+; Our purpose: make NSAA != SP, and only after start to use GPRs.
+;
+;Co-Processor register candidates may be either in VFP or in stack, so after
+;all VFP are allocated, stack is used. We can use stack without GPR allocation
+;in that case, passing 9 f64 params, for example.
+;First eight params goes to d0-d7, ninth one goes to the stack.
+;Now, as 10th parameter, we pass i32, and it must go to R0.
+;
+;5.5 Parameter Passing, Stage C:
+;
+;C.2.cp If the argument is a CPRC then any co-processor registers in that class
+;that are unallocated are marked as unavailable. The NSAA is adjusted upwards
+;until it is correctly aligned for the argument and the argument is copied to
+;the memory at the adjusted NSAA. The NSAA is further incremented by the size
+;of the argument. The argument has now been allocated.
+;...
+;C.4 If the size in words of the argument is not more than r4 minus NCRN, the
+;argument is copied into core registers, starting at the NCRN. The NCRN is
+;incremented by the number of registers used. Successive registers hold the
+;parts of the argument they would hold if its value were loaded into those
+;registers from memory using an LDM instruction. The argument has now been
+;allocated.
+;
+;What is actually checked here:
+;Here we check that i32 param goes to r0.
+;
+;Current test-case was produced with command:
+;arm-linux-gnueabihf-clang -mcpu=cortex-a9 params-to-GPR.c -S -O1 -emit-llvm
+;
+;// params-to-GRP.c:
+;
+;void fooUseI32(unsigned);
+;
+;void foo(long double p0,
+; long double p1,
+; long double p2,
+; long double p3,
+; long double p4,
+; long double p5,
+; long double p6,
+; long double p7,
+; long double p8,
+; unsigned p9) {
+; fooUseI32(p9);
+;}
+;
+;void doFoo() {
+; foo( 1,2,3,4,5,6,7,8,9, 43 );
+;}
+
+;RUN: llc -mtriple=thumbv7-linux-gnueabihf -float-abi=hard < %s | FileCheck %s
+;
+;CHECK: foo:
+;CHECK-NOT: mov r0
+;CHECK-NOT: ldr r0
+;CHECK: bl fooUseI32
+;CHECK: doFoo:
+;CHECK: movs r0, #43
+;CHECK: bl foo
+
+define void @foo(double %p0, ; --> D0
+ double %p1, ; --> D1
+ double %p2, ; --> D2
+ double %p3, ; --> D3
+ double %p4, ; --> D4
+ double %p5, ; --> D5
+ double %p6, ; --> D6
+ double %p7, ; --> D7
+ double %p8, ; --> Stack
+ i32 %p9) #0 { ; --> R0, not Stack+8
+entry:
+ tail call void @fooUseI32(i32 %p9)
+ ret void
+}
+
+declare void @fooUseI32(i32)
+
+define void @doFoo() {
+entry:
+ tail call void @foo(double 23.0, ; --> D0
+ double 23.1, ; --> D1
+ double 23.2, ; --> D2
+ double 23.3, ; --> D3
+ double 23.4, ; --> D4
+ double 23.5, ; --> D5
+ double 23.6, ; --> D6
+ double 23.7, ; --> D7
+ double 23.8, ; --> Stack
+ i32 43) ; --> R0, not Stack+8
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/2013-04-16-AAPCS-C5-vs-VFP.ll b/test/CodeGen/ARM/2013-04-16-AAPCS-C5-vs-VFP.ll
new file mode 100644
index 000000000000..446403d79cac
--- /dev/null
+++ b/test/CodeGen/ARM/2013-04-16-AAPCS-C5-vs-VFP.ll
@@ -0,0 +1,61 @@
+;Check 5.5 Parameter Passing --> Stage C --> C.5 statement, when NSAA is not
+;equal to SP.
+;
+; Our purpose: make NSAA != SP, and only after start to use GPRs, then pass
+; byval parameter and check that it goes to stack only.
+;
+;Co-Processor register candidates may be either in VFP or in stack, so after
+;all VFP are allocated, stack is used. We can use stack without GPR allocation
+;in that case, passing 9 f64 params, for example.
+;First eight params goes to d0-d7, ninth one goes to the stack.
+;Now, as 10th parameter, we pass i32, and it must go to R0.
+;
+;For more information,
+;please, read 5.5 Parameter Passing, Stage C, stages C.2.cp, C.4 and C.5
+;
+;
+;RUN: llc -mtriple=thumbv7-linux-gnueabihf -float-abi=hard < %s | FileCheck %s
+
+%struct_t = type { i32, i32, i32, i32 }
+@static_val = constant %struct_t { i32 777, i32 888, i32 999, i32 1000 }
+declare void @fooUseStruct(%struct_t*)
+
+define void @foo2(double %p0, ; --> D0
+ double %p1, ; --> D1
+ double %p2, ; --> D2
+ double %p3, ; --> D3
+ double %p4, ; --> D4
+ double %p5, ; --> D5
+ double %p6, ; --> D6
+ double %p7, ; --> D7
+ double %p8, ; --> Stack
+ i32 %p9, ; --> R0
+ %struct_t* byval %p10) ; --> Stack+8
+{
+entry:
+;CHECK: push.w {r11, lr}
+;CHECK-NOT: stm
+;CHECK: add r0, sp, #16
+;CHECK: bl fooUseStruct
+ call void @fooUseStruct(%struct_t* %p10)
+
+ ret void
+}
+
+define void @doFoo2() {
+entry:
+;CHECK-NOT: ldm
+ tail call void @foo2(double 23.0, ; --> D0
+ double 23.1, ; --> D1
+ double 23.2, ; --> D2
+ double 23.3, ; --> D3
+ double 23.4, ; --> D4
+ double 23.5, ; --> D5
+ double 23.6, ; --> D6
+ double 23.7, ; --> D7
+ double 23.8, ; --> Stack
+ i32 43, ; --> R0, not Stack+8
+ %struct_t* byval @static_val) ; --> Stack+8, not R1
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/2013-04-05-overridden-loads-PR14824.ll b/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll
index 2561686c1f83..459992818749 100644
--- a/test/CodeGen/ARM/2013-04-05-overridden-loads-PR14824.ll
+++ b/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll
@@ -1,18 +1,17 @@
; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi -mcpu=cortex-a9 -mattr=+neon,+neonfp | FileCheck %s
-; The test is presented by Jiangning Liu.
-;CHECK-NOT: vldmia
+; PR14824. The test is presented by Jiangning Liu. If the ld/st optimization algorithm is changed, this test case may fail.
+; Also if the machine code for ld/st optimizor is changed, this test case may fail. If so, remove this test.
define void @sample_test(<8 x i64> * %secondSource, <8 x i64> * %source, <8 x i64> * %dest) nounwind {
+; CHECK: sample_test
+; CHECK-NOT: vldmia
+; CHECK: add
entry:
+
+; Load %source
%s0 = load <8 x i64> * %source, align 64
- %s1 = load <8 x i64> * %secondSource, align 64
- %s2 = bitcast <8 x i64> %s0 to i512
- %data.i.i.48.extract.shift = lshr i512 %s2, 384
- %data.i.i.48.extract.trunc = trunc i512 %data.i.i.48.extract.shift to i64
%arrayidx64 = getelementptr inbounds <8 x i64> * %source, i32 6
%s120 = load <8 x i64> * %arrayidx64, align 64
- %arrayidx67 = getelementptr inbounds <8 x i64> * %secondSource, i32 6
- %s121 = load <8 x i64> * %arrayidx67, align 64
%s122 = bitcast <8 x i64> %s120 to i512
%data.i.i677.48.extract.shift = lshr i512 %s122, 384
%data.i.i677.48.extract.trunc = trunc i512 %data.i.i677.48.extract.shift to i64
@@ -32,6 +31,11 @@ entry:
%s128 = insertelement <8 x i64> %s127, i64 %data.i.i677.32.extract.trunc, i32 5
%s129 = insertelement <8 x i64> %s128, i64 %data.i.i677.16.extract.trunc, i32 6
%s130 = insertelement <8 x i64> %s129, i64 %data.i.i677.56.extract.trunc, i32 7
+
+; Load %secondSource
+ %s1 = load <8 x i64> * %secondSource, align 64
+ %arrayidx67 = getelementptr inbounds <8 x i64> * %secondSource, i32 6
+ %s121 = load <8 x i64> * %arrayidx67, align 64
%s131 = bitcast <8 x i64> %s121 to i512
%data.i1.i676.48.extract.shift = lshr i512 %s131, 384
%data.i1.i676.48.extract.trunc = trunc i512 %data.i1.i676.48.extract.shift to i64
@@ -51,34 +55,16 @@ entry:
%s137 = insertelement <8 x i64> %s136, i64 %data.i1.i676.32.extract.trunc, i32 5
%s138 = insertelement <8 x i64> %s137, i64 %data.i1.i676.16.extract.trunc, i32 6
%s139 = insertelement <8 x i64> %s138, i64 %data.i1.i676.56.extract.trunc, i32 7
+
+; Operations about %Source and %secondSource
%vecinit28.i.i699 = shufflevector <8 x i64> %s139, <8 x i64> %s130, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 undef, i32 undef, i32 undef>
%vecinit35.i.i700 = shufflevector <8 x i64> %vecinit28.i.i699, <8 x i64> %s139, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 13, i32 undef, i32 undef>
%vecinit42.i.i701 = shufflevector <8 x i64> %vecinit35.i.i700, <8 x i64> %s139, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 14, i32 undef>
%vecinit49.i.i702 = shufflevector <8 x i64> %vecinit42.i.i701, <8 x i64> %s130, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 15>
%arrayidx72 = getelementptr inbounds <8 x i64> * %dest, i32 6
store <8 x i64> %vecinit49.i.i702, <8 x i64> * %arrayidx72, align 64
- %arrayidx75 = getelementptr inbounds <8 x i64> * %source, i32 7
- %s140 = load <8 x i64> * %arrayidx75, align 64
%arrayidx78 = getelementptr inbounds <8 x i64> * %secondSource, i32 7
%s141 = load <8 x i64> * %arrayidx78, align 64
- %s142 = bitcast <8 x i64> %s140 to i512
- %data.i.i650.32.extract.shift = lshr i512 %s142, 256
- %data.i.i650.32.extract.trunc = trunc i512 %data.i.i650.32.extract.shift to i64
- %s143 = insertelement <8 x i64> undef, i64 %data.i.i650.32.extract.trunc, i32 0
- %s144 = insertelement <8 x i64> %s143, i64 %data.i.i650.32.extract.trunc, i32 1
- %data.i.i650.16.extract.shift = lshr i512 %s142, 128
- %data.i.i650.16.extract.trunc = trunc i512 %data.i.i650.16.extract.shift to i64
- %s145 = insertelement <8 x i64> %s144, i64 %data.i.i650.16.extract.trunc, i32 2
- %data.i.i650.8.extract.shift = lshr i512 %s142, 64
- %data.i.i650.8.extract.trunc = trunc i512 %data.i.i650.8.extract.shift to i64
- %s146 = insertelement <8 x i64> %s145, i64 %data.i.i650.8.extract.trunc, i32 3
- %s147 = insertelement <8 x i64> %s146, i64 %data.i.i650.8.extract.trunc, i32 4
- %data.i.i650.48.extract.shift = lshr i512 %s142, 384
- %data.i.i650.48.extract.trunc = trunc i512 %data.i.i650.48.extract.shift to i64
- %s148 = insertelement <8 x i64> %s147, i64 %data.i.i650.48.extract.trunc, i32 5
- %s149 = insertelement <8 x i64> %s148, i64 %data.i.i650.16.extract.trunc, i32 6
- %data.i.i650.0.extract.trunc = trunc i512 %s142 to i64
- %s150 = insertelement <8 x i64> %s149, i64 %data.i.i650.0.extract.trunc, i32 7
%s151 = bitcast <8 x i64> %s141 to i512
%data.i1.i649.32.extract.shift = lshr i512 %s151, 256
%data.i1.i649.32.extract.trunc = trunc i512 %data.i1.i649.32.extract.shift to i64
@@ -90,21 +76,7 @@ entry:
%data.i1.i649.8.extract.shift = lshr i512 %s151, 64
%data.i1.i649.8.extract.trunc = trunc i512 %data.i1.i649.8.extract.shift to i64
%s155 = insertelement <8 x i64> %s154, i64 %data.i1.i649.8.extract.trunc, i32 3
- %s156 = insertelement <8 x i64> %s155, i64 %data.i1.i649.8.extract.trunc, i32 4
- %data.i1.i649.48.extract.shift = lshr i512 %s151, 384
- %data.i1.i649.48.extract.trunc = trunc i512 %data.i1.i649.48.extract.shift to i64
- %s157 = insertelement <8 x i64> %s156, i64 %data.i1.i649.48.extract.trunc, i32 5
- %s158 = insertelement <8 x i64> %s157, i64 %data.i1.i649.16.extract.trunc, i32 6
- %data.i1.i649.0.extract.trunc = trunc i512 %s151 to i64
- %s159 = insertelement <8 x i64> %s158, i64 %data.i1.i649.0.extract.trunc, i32 7
- %vecinit7.i.i669 = shufflevector <8 x i64> %s159, <8 x i64> %s150, <8 x i32> <i32 0, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %vecinit14.i.i670 = shufflevector <8 x i64> %vecinit7.i.i669, <8 x i64> %s150, <8 x i32> <i32 0, i32 1, i32 10, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %vecinit21.i.i671 = shufflevector <8 x i64> %vecinit14.i.i670, <8 x i64> %s150, <8 x i32> <i32 0, i32 1, i32 2, i32 11, i32 undef, i32 undef, i32 undef, i32 undef>
- %vecinit28.i.i672 = shufflevector <8 x i64> %vecinit21.i.i671, <8 x i64> %s150, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 undef, i32 undef, i32 undef>
- %vecinit35.i.i673 = shufflevector <8 x i64> %vecinit28.i.i672, <8 x i64> %s159, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 13, i32 undef, i32 undef>
- %vecinit42.i.i674 = shufflevector <8 x i64> %vecinit35.i.i673, <8 x i64> %s159, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 14, i32 undef>
- %vecinit49.i.i675 = shufflevector <8 x i64> %vecinit42.i.i674, <8 x i64> %s159, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 15>
%arrayidx83 = getelementptr inbounds <8 x i64> * %dest, i32 7
- store <8 x i64> %vecinit49.i.i675, <8 x i64> * %arrayidx83, align 64
+ store <8 x i64> %s155, <8 x i64> * %arrayidx83, align 64
ret void
}
diff --git a/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll b/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll
new file mode 100644
index 000000000000..de5fd31e2f2d
--- /dev/null
+++ b/test/CodeGen/ARM/2013-04-21-AAPCS-VA-C.1.cp.ll
@@ -0,0 +1,28 @@
+;Check 5.5 Parameter Passing --> Stage C --> C.1.cp statement for VA functions.
+;Note: There are no VFP CPRCs in a variadic procedure.
+;Check that after %C was sent to stack, we set Next Core Register Number to R4.
+
+;This test is simplified IR version of
+;test-suite/SingleSource/UnitTests/2002-05-02-ManyArguments.c
+
+;RUN: llc -mtriple=thumbv7-linux-gnueabihf -float-abi=hard < %s | FileCheck %s
+
+@.str = private unnamed_addr constant [13 x i8] c"%d %d %f %i\0A\00", align 1
+
+;CHECK: printfn:
+define void @printfn(i32 %a, i16 signext %b, double %C, i8 signext %E) {
+entry:
+ %conv = sext i16 %b to i32
+ %conv1 = sext i8 %E to i32
+ %call = tail call i32 (i8*, ...)* @printf(
+ i8* getelementptr inbounds ([13 x i8]* @.str, i32 0, i32 0), ; --> R0
+ i32 %a, ; --> R1
+ i32 %conv, ; --> R2
+ double %C, ; --> SP, NCRN := R4
+;CHECK: str r2, [sp, #8]
+ i32 %conv1) ; --> SP+8
+ ret void
+}
+
+declare i32 @printf(i8* nocapture, ...)
+
diff --git a/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll b/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll
new file mode 100644
index 000000000000..6db71fed958e
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll
@@ -0,0 +1,48 @@
+;Check AAPCS, 5.5 Parameters Passing, C4 and C5 rules.
+;Check case when NSAA != 0, and NCRN < R4, NCRN+ParamSize < R4
+;RUN: llc -mtriple=thumbv7-linux-gnueabihf -float-abi=hard < %s | FileCheck %s
+
+%st_t = type { i32, i32 }
+@static_val = constant %st_t { i32 777, i32 888}
+
+declare void @fooUseStruct(%st_t*)
+
+define void @foo(double %vfp0, ; --> D0, NSAA=SP
+ double %vfp1, ; --> D1, NSAA=SP
+ double %vfp2, ; --> D2, NSAA=SP
+ double %vfp3, ; --> D3, NSAA=SP
+ double %vfp4, ; --> D4, NSAA=SP
+ double %vfp5, ; --> D5, NSAA=SP
+ double %vfp6, ; --> D6, NSAA=SP
+ double %vfp7, ; --> D7, NSAA=SP
+ double %vfp8, ; --> SP, NSAA=SP+8 (!)
+ i32 %p0, ; --> R0, NSAA=SP+8
+ %st_t* byval %p1, ; --> R1, R2, NSAA=SP+8
+ i32 %p2, ; --> R3, NSAA=SP+8
+ i32 %p3) #0 { ; --> SP+4, NSAA=SP+12
+entry:
+ ;CHECK: sub sp, #8
+ ;CHECK: push.w {r11, lr}
+ ;CHECK: add r0, sp, #16
+ ;CHECK: str r2, [sp, #20]
+ ;CHECK: str r1, [sp, #16]
+ ;CHECK: bl fooUseStruct
+ call void @fooUseStruct(%st_t* %p1)
+ ret void
+}
+
+define void @doFoo() {
+entry:
+ call void @foo(double 23.0,
+ double 23.1,
+ double 23.2,
+ double 23.3,
+ double 23.4,
+ double 23.5,
+ double 23.6,
+ double 23.7,
+ double 23.8,
+ i32 0, %st_t* byval @static_val, i32 1, i32 2)
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll b/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll
new file mode 100644
index 000000000000..212bbc2ee9c8
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll
@@ -0,0 +1,45 @@
+;Check AAPCS, 5.5 Parameters Passing, C4 and C5 rules.
+;Check case when NSAA != 0, and NCRN < R4, NCRN+ParamSize > R4
+;RUN: llc -mtriple=thumbv7-linux-gnueabihf -float-abi=hard < %s | FileCheck %s
+
+%st_t = type { i32, i32, i32, i32 }
+@static_val = constant %st_t { i32 777, i32 888, i32 787, i32 878}
+
+define void @foo(double %vfp0, ; --> D0, NSAA=SP
+ double %vfp1, ; --> D1, NSAA=SP
+ double %vfp2, ; --> D2, NSAA=SP
+ double %vfp3, ; --> D3, NSAA=SP
+ double %vfp4, ; --> D4, NSAA=SP
+ double %vfp5, ; --> D5, NSAA=SP
+ double %vfp6, ; --> D6, NSAA=SP
+ double %vfp7, ; --> D7, NSAA=SP
+ double %vfp8, ; --> SP, NSAA=SP+8 (!)
+ i32 %p0, ; --> R0, NSAA=SP+8
+ %st_t* byval %p1, ; --> SP+8, 4 words NSAA=SP+24
+ i32 %p2) #0 { ; --> SP+24, NSAA=SP+24
+
+entry:
+ ;CHECK: push.w {r11, lr}
+ ;CHECK: ldr r0, [sp, #32]
+ ;CHECK: bl fooUseI32
+ call void @fooUseI32(i32 %p2)
+ ret void
+}
+
+declare void @fooUseI32(i32)
+
+define void @doFoo() {
+entry:
+ call void @foo(double 23.0,
+ double 23.1,
+ double 23.2,
+ double 23.3,
+ double 23.4,
+ double 23.5,
+ double 23.6,
+ double 23.7,
+ double 23.8,
+ i32 0, %st_t* byval @static_val, i32 1)
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
new file mode 100644
index 000000000000..abc6e0d11144
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-05-IfConvertBug.ll
@@ -0,0 +1,71 @@
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s
+; rdar://13782395
+
+define i32 @t1(i32 %a, i32 %b, i8** %retaddr) {
+; CHECK: t1:
+; CHECK: Block address taken
+; CHECK-NOT: Address of block that was removed by CodeGen
+ store i8* blockaddress(@t1, %cond_true), i8** %retaddr
+ %tmp2 = icmp eq i32 %a, 0
+ br i1 %tmp2, label %cond_false, label %cond_true
+
+cond_true:
+ %tmp5 = add i32 %b, 1
+ ret i32 %tmp5
+
+cond_false:
+ %tmp7 = add i32 %b, -1
+ ret i32 %tmp7
+}
+
+define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d, i8** %retaddr) {
+; CHECK: t2:
+; CHECK: Block address taken
+; CHECK: %cond_true
+; CHECK: add
+; CHECK: bx lr
+ store i8* blockaddress(@t2, %cond_true), i8** %retaddr
+ %tmp2 = icmp sgt i32 %c, 10
+ %tmp5 = icmp slt i32 %d, 4
+ %tmp8 = and i1 %tmp5, %tmp2
+ %tmp13 = add i32 %b, %a
+ br i1 %tmp8, label %cond_true, label %UnifiedReturnBlock
+
+cond_true:
+ %tmp15 = add i32 %tmp13, %c
+ %tmp1821 = sub i32 %tmp15, %d
+ ret i32 %tmp1821
+
+UnifiedReturnBlock:
+ ret i32 %tmp13
+}
+
+define hidden fastcc void @t3(i8** %retaddr) {
+; CHECK: t3:
+; CHECK: Block address taken
+; CHECK-NOT: Address of block that was removed by CodeGen
+bb:
+ store i8* blockaddress(@t3, %KBBlockZero_return_1), i8** %retaddr
+ br i1 undef, label %bb77, label %bb7.i
+
+bb7.i: ; preds = %bb35
+ br label %bb2.i
+
+KBBlockZero_return_1: ; preds = %KBBlockZero.exit
+ unreachable
+
+KBBlockZero_return_0: ; preds = %KBBlockZero.exit
+ unreachable
+
+bb77: ; preds = %bb26, %bb12, %bb
+ ret void
+
+bb2.i: ; preds = %bb6.i350, %bb7.i
+ br i1 undef, label %bb6.i350, label %KBBlockZero.exit
+
+bb6.i350: ; preds = %bb2.i
+ br label %bb2.i
+
+KBBlockZero.exit: ; preds = %bb2.i
+ indirectbr i8* undef, [label %KBBlockZero_return_1, label %KBBlockZero_return_0]
+}
diff --git a/test/CodeGen/ARM/avoid-cpsr-rmw.ll b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
index c5d00a0f8a4c..c14f5302d311 100644
--- a/test/CodeGen/ARM/avoid-cpsr-rmw.ll
+++ b/test/CodeGen/ARM/avoid-cpsr-rmw.ll
@@ -91,7 +91,7 @@ entry:
; CHECK: t4
; CHECK: vmrs APSR_nzcv, fpscr
; CHECK: if.then
-; CHECK-NOT movs
+; CHECK-NOT: movs
%0 = load double* %q, align 4
%cmp = fcmp olt double %0, 1.000000e+01
%incdec.ptr1 = getelementptr inbounds i32* %p, i32 1
diff --git a/test/CodeGen/ARM/commute-movcc.ll b/test/CodeGen/ARM/commute-movcc.ll
index 769ba55eb9eb..fbc25b45b6ff 100644
--- a/test/CodeGen/ARM/commute-movcc.ll
+++ b/test/CodeGen/ARM/commute-movcc.ll
@@ -32,7 +32,7 @@ for.body: ; preds = %entry, %if.end8
%BestCost.011 = phi i32 [ -1, %entry ], [ %BestCost.1, %if.end8 ]
%BestIdx.010 = phi i32 [ 0, %entry ], [ %BestIdx.1, %if.end8 ]
%arrayidx = getelementptr inbounds i32* %a, i32 %i.012
- %0 = load i32* %arrayidx, align 4, !tbaa !0
+ %0 = load i32* %arrayidx, align 4
%mul = mul i32 %0, %0
%sub = add nsw i32 %i.012, -5
%cmp2 = icmp eq i32 %sub, %Pref
@@ -53,7 +53,7 @@ if.else: ; preds = %for.body
if.end8: ; preds = %if.else, %if.then
%BestIdx.1 = phi i32 [ %i.0.BestIdx.0, %if.then ], [ %BestIdx.0.i.0, %if.else ]
%BestCost.1 = phi i32 [ %mul.BestCost.0, %if.then ], [ %BestCost.0.mul, %if.else ]
- store i32 %mul, i32* %arrayidx, align 4, !tbaa !0
+ store i32 %mul, i32* %arrayidx, align 4
%inc = add i32 %i.012, 1
%cmp = icmp eq i32 %inc, 11
br i1 %cmp, label %for.end, label %for.body
@@ -61,7 +61,3 @@ if.end8: ; preds = %if.else, %if.then
for.end: ; preds = %if.end8
ret i32 %BestIdx.1
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/dagcombine-concatvector.ll b/test/CodeGen/ARM/dagcombine-concatvector.ll
new file mode 100644
index 000000000000..e9e0fe3239a7
--- /dev/null
+++ b/test/CodeGen/ARM/dagcombine-concatvector.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -mtriple=thumbv7s-apple-ios3.0.0 | FileCheck %s
+
+; PR15525
+; CHECK: test1:
+; CHECK: ldr.w [[REG:r[0-9]+]], [sp]
+; CHECK-NEXT: vmov {{d[0-9]+}}, r1, r2
+; CHECK-NEXT: vmov {{d[0-9]+}}, r3, [[REG]]
+; CHECK-NEXT: vst1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0]
+; CHECK-NEXT: bx lr
+define void @test1(i8* %arg, [4 x i64] %vec.coerce) {
+bb:
+ %tmp = extractvalue [4 x i64] %vec.coerce, 0
+ %tmp2 = bitcast i64 %tmp to <8 x i8>
+ %tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %tmp4 = extractvalue [4 x i64] %vec.coerce, 1
+ %tmp5 = bitcast i64 %tmp4 to <8 x i8>
+ %tmp6 = shufflevector <8 x i8> %tmp5, <8 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %tmp7 = shufflevector <16 x i8> %tmp6, <16 x i8> %tmp3, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ tail call void @llvm.arm.neon.vst1.v16i8(i8* %arg, <16 x i8> %tmp7, i32 2)
+ ret void
+}
+
+declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32)
diff --git a/test/CodeGen/ARM/debug-info-arg.ll b/test/CodeGen/ARM/debug-info-arg.ll
index 33c8e9daae69..c162260dcd0c 100644
--- a/test/CodeGen/ARM/debug-info-arg.ll
+++ b/test/CodeGen/ARM/debug-info-arg.ll
@@ -31,7 +31,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, metadata !32, i32 12, metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, metadata !"", i32 0, null, null, metadata !30, null, null} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, metadata !32, i32 12, metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, metadata !"", i32 0, null, null, metadata !30, null, null, null} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !2, metadata !2, metadata !"foo", metadata !"foo", metadata !"", i32 11, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, void (%struct.tag_s*, %struct.tag_s*, i64, i64, %struct.tag_s*, %struct.tag_s*)* @foo, null, null, metadata !31, i32 11} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !32} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !32, metadata !2, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
diff --git a/test/CodeGen/ARM/debug-info-branch-folding.ll b/test/CodeGen/ARM/debug-info-branch-folding.ll
index 95e6cf2554a0..38945ac2ea7b 100644
--- a/test/CodeGen/ARM/debug-info-branch-folding.ll
+++ b/test/CodeGen/ARM/debug-info-branch-folding.ll
@@ -40,7 +40,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, i32 0, metadata !1, metadata !"test0001", metadata !"test0001", metadata !"", metadata !1, i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, <4 x float> (float)* @test0001, null, null, metadata !51, i32 0} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !54} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !50, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !50, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786454, metadata !54, metadata !2, metadata !"v4f32", i32 14, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_typedef ]
diff --git a/test/CodeGen/ARM/debug-info-d16-reg.ll b/test/CodeGen/ARM/debug-info-d16-reg.ll
index e3e4d068932e..e4040fa02caa 100644
--- a/test/CodeGen/ARM/debug-info-d16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-d16-reg.ll
@@ -60,7 +60,7 @@ declare i32 @puts(i8* nocapture) nounwind
!0 = metadata !{i32 786478, metadata !1, metadata !"printer", metadata !"printer", metadata !"printer", metadata !1, i32 12, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (i8*, double, i8)* @printer, null, null, metadata !43, i32 12} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !46} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"(LLVM build 00)", i1 true, metadata !"", i32 0, null, null, metadata !42, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"(LLVM build 00)", i1 true, metadata !"", i32 0, null, null, metadata !42, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5, metadata !6, metadata !7, metadata !8}
!5 = metadata !{i32 786468, metadata !1, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/ARM/debug-info-qreg.ll b/test/CodeGen/ARM/debug-info-qreg.ll
index 038c2296cdbe..1de6ffaeec7d 100644
--- a/test/CodeGen/ARM/debug-info-qreg.ll
+++ b/test/CodeGen/ARM/debug-info-qreg.ll
@@ -39,7 +39,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"test0001", metadata !"test0001", metadata !"", metadata !1, i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, <4 x float> (float)* @test0001, null, null, metadata !51, i32 3} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !54} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !50, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !54, i32 12, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !50, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !54, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786454, metadata !54, metadata !2, metadata !"v4f32", i32 14, i64 0, i64 0, i64 0, i32 0, metadata !6} ; [ DW_TAG_typedef ]
diff --git a/test/CodeGen/ARM/debug-info-s16-reg.ll b/test/CodeGen/ARM/debug-info-s16-reg.ll
index f3af0b93c69c..186894232eaf 100644
--- a/test/CodeGen/ARM/debug-info-s16-reg.ll
+++ b/test/CodeGen/ARM/debug-info-s16-reg.ll
@@ -65,7 +65,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"inlineprinter", metadata !"inlineprinter", metadata !"", metadata !1, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i8*, float, i8)* @inlineprinter, null, null, metadata !48, i32 5} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !51} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !47, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 3.0 (trunk 129915)", i1 true, metadata !"", i32 0, null, null, metadata !47, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, metadata !2, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/ARM/debug-info-sreg2.ll b/test/CodeGen/ARM/debug-info-sreg2.ll
index ae02a245b432..ba83f797e2ce 100644
--- a/test/CodeGen/ARM/debug-info-sreg2.ll
+++ b/test/CodeGen/ARM/debug-info-sreg2.ll
@@ -41,7 +41,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, i32 4, metadata !2, metadata !"clang version 3.0 (trunk 130845)", i1 true, metadata !"", i32 0, null, null, metadata !16, null, null} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, i32 4, metadata !2, metadata !"clang version 3.0 (trunk 130845)", i1 true, metadata !"", i32 0, null, null, metadata !16, null, null, null} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"_Z3foov", metadata !2, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, void ()* @_Z3foov, null, null, metadata !17, i32 5} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !18} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
diff --git a/test/CodeGen/ARM/ehabi-filters.ll b/test/CodeGen/ARM/ehabi-filters.ll
index c42839d9fe3d..4c92a2975d39 100644
--- a/test/CodeGen/ARM/ehabi-filters.ll
+++ b/test/CodeGen/ARM/ehabi-filters.ll
@@ -19,7 +19,7 @@ define i32 @main() {
entry:
%exception.i = tail call i8* @__cxa_allocate_exception(i32 4) nounwind
%0 = bitcast i8* %exception.i to i32*
- store i32 42, i32* %0, align 4, !tbaa !0
+ store i32 42, i32* %0, align 4
invoke void @__cxa_throw(i8* %exception.i, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn
to label %unreachable.i unwind label %lpad.i
@@ -71,7 +71,3 @@ declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
declare i8* @__cxa_begin_catch(i8*)
declare void @__cxa_end_catch()
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/ARM/ehabi-mc-compact-pr0.ll b/test/CodeGen/ARM/ehabi-mc-compact-pr0.ll
new file mode 100644
index 000000000000..11f3e6db0fe5
--- /dev/null
+++ b/test/CodeGen/ARM/ehabi-mc-compact-pr0.ll
@@ -0,0 +1,49 @@
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=obj -o - %s \
+; RUN: | llvm-objdump -s - \
+; RUN: | FileCheck %s --check-prefix=CHECK
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -filetype=obj -o - %s \
+; RUN: | llvm-objdump -s - \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=obj -o - %s \
+; RUN: | llvm-objdump -r - \
+; RUN: | FileCheck %s --check-prefix=CHECK-RELOC
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -filetype=obj -o - %s \
+; RUN: | llvm-objdump -r - \
+; RUN: | FileCheck %s --check-prefix=CHECK-RELOC
+
+define void @_Z4testv() {
+entry:
+ tail call void @_Z15throw_exceptionv()
+ ret void
+}
+
+declare void @_Z15throw_exceptionv()
+
+; CHECK-NOT: section .ARM.extab
+; CHECK: section .text
+; CHECK-NOT: section .ARM.extab
+; CHECK: section .ARM.exidx
+; CHECK-NEXT: 0000 00000000 80849b80
+; CHECK-NOT: section .ARM.extab
+
+; CHECK-FP-ELIM-NOT: section .ARM.extab
+; CHECK-FP-ELIM: section .text
+; CHECK-FP-ELIM-NOT: section .ARM.extab
+; CHECK-FP-ELIM: section .ARM.exidx
+; CHECK-FP-ELIM-NEXT: 0000 00000000 b0808480
+; CHECK-FP-ELIM-NOT: section .ARM.extab
+
+; CHECK-RELOC: RELOCATION RECORDS FOR [.ARM.exidx]
+; CHECK-RELOC-NEXT: 0 R_ARM_PREL31 .text
+; CHECK-RELOC-NEXT: 0 R_ARM_NONE __aeabi_unwind_cpp_pr0
diff --git a/test/CodeGen/ARM/ehabi-mc-compact-pr1.ll b/test/CodeGen/ARM/ehabi-mc-compact-pr1.ll
new file mode 100644
index 000000000000..79dba084c044
--- /dev/null
+++ b/test/CodeGen/ARM/ehabi-mc-compact-pr1.ll
@@ -0,0 +1,62 @@
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=obj -o - %s \
+; RUN: | llvm-objdump -s - \
+; RUN: | FileCheck %s --check-prefix=CHECK
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -filetype=obj -o - %s \
+; RUN: | llvm-objdump -s - \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=obj -o - %s \
+; RUN: | llvm-objdump -r - \
+; RUN: | FileCheck %s --check-prefix=CHECK-RELOC
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -filetype=obj -o - %s \
+; RUN: | llvm-objdump -r - \
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM-RELOC
+
+define i32 @_Z3addiiiiiiii(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
+entry:
+ %add = add nsw i32 %b, %a
+ %add1 = add nsw i32 %add, %c
+ %add2 = add nsw i32 %add1, %d
+ tail call void @_Z15throw_exceptioni(i32 %add2)
+ %add3 = add nsw i32 %f, %e
+ %add4 = add nsw i32 %add3, %g
+ %add5 = add nsw i32 %add4, %h
+ tail call void @_Z15throw_exceptioni(i32 %add5)
+ %add6 = add nsw i32 %add5, %add2
+ ret i32 %add6
+}
+
+declare void @_Z15throw_exceptioni(i32)
+
+; CHECK-NOT: section .ARM.extab
+; CHECK: section .text
+; CHECK: section .ARM.extab
+; CHECK-NEXT: 0000 419b0181 b0b08384
+; CHECK: section .ARM.exidx
+; CHECK-NEXT: 0000 00000000 00000000
+; CHECK-NOT: section .ARM.extab
+
+; CHECK-FP-ELIM-NOT: section .ARM.extab
+; CHECK-FP-ELIM: section .text
+; CHECK-FP-ELIM-NOT: section .ARM.extab
+; CHECK-FP-ELIM: section .ARM.exidx
+; CHECK-FP-ELIM-NEXT: 0000 00000000 b0838480
+; CHECK-FP-ELIM-NOT: section .ARM.extab
+
+; CHECK-RELOC: RELOCATION RECORDS FOR [.ARM.exidx]
+; CHECK-RELOC-NEXT: 0 R_ARM_PREL31 .text
+; CHECK-RELOC-NEXT: 0 R_ARM_NONE __aeabi_unwind_cpp_pr1
+
+; CHECK-FP-ELIM-RELOC: RELOCATION RECORDS FOR [.ARM.exidx]
+; CHECK-FP-ELIM-RELOC-NEXT: 0 R_ARM_PREL31 .text
+; CHECK-FP-ELIM-RELOC-NEXT: 0 R_ARM_NONE __aeabi_unwind_cpp_pr0
diff --git a/test/CodeGen/ARM/ehabi-mc-section-group.ll b/test/CodeGen/ARM/ehabi-mc-section-group.ll
index 5e4b5096c494..616aa1ba46e7 100644
--- a/test/CodeGen/ARM/ehabi-mc-section-group.ll
+++ b/test/CodeGen/ARM/ehabi-mc-section-group.ll
@@ -8,7 +8,7 @@
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -filetype=obj -o - %s \
-; RUN: | elf-dump --dump-section-data \
+; RUN: | llvm-readobj -s -sd \
; RUN: | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32-S64"
@@ -68,12 +68,21 @@ declare void @__cxa_end_catch()
declare void @_ZSt9terminatev()
-; CHECK: # Section 1
-; CHECK-NEXT: (('sh_name', 0x0000002f) # '.group'
-; CHECK: ('_section_data', '01000000 0a000000 0c000000 0e000000')
-; CHECK: # Section 10
-; CHECK-NEXT: (('sh_name', 0x000000e1) # '.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_'
-; CHECK: # Section 12
-; CHECK-NEXT: (('sh_name', 0x000000d7) # '.ARM.extab.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_'
-; CHECK: # Section 14
-; CHECK-NEXT: (('sh_name', 0x00000065) # '.ARM.exidx.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_'
+; CHECK: Section {
+; CHECK: Index: 1
+; CHECK-NEXT: Name: .group (47)
+; CHECK: SectionData (
+; CHECK-NEXT: 0000: 01000000 09000000 0B000000 0D000000
+; CHECK-NEXT: )
+
+; CHECK: Section {
+; CHECK: Index: 9
+; CHECK-NEXT: Name: .text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_ (214)
+
+; CHECK: Section {
+; CHECK: Index: 11
+; CHECK-NEXT: Name: .ARM.extab.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_ (204)
+
+; CHECK: Section {
+; CHECK: Index: 13
+; CHECK-NEXT: Name: .ARM.exidx.text._Z4testIidEvT_S0_S0_S0_S0_T0_S1_S1_S1_S1_ (90)
diff --git a/test/CodeGen/ARM/ehabi-mc-section.ll b/test/CodeGen/ARM/ehabi-mc-section.ll
index fc51b240ff3d..4e6e46829148 100644
--- a/test/CodeGen/ARM/ehabi-mc-section.ll
+++ b/test/CodeGen/ARM/ehabi-mc-section.ll
@@ -1,8 +1,14 @@
-; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=obj -o - %s \
+; RUN: | llvm-objdump -s - \
+; RUN: | FileCheck %s --check-prefix=CHECK
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -filetype=obj -o - %s \
; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
define void @_Z4testiiiiiddddd(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5, double %v1, double %v2, double %v3, double %v4, double %v5) section ".test_section" {
entry:
@@ -54,6 +60,12 @@ declare void @_ZSt9terminatev()
; CHECK: section .test_section
; CHECK: section .ARM.extab.test_section
-; CHECK-NEXT: 0000 00000000 b0b0b000
+; CHECK-NEXT: 0000 00000000 c9409b01 b0818484
; CHECK: section .ARM.exidx.test_section
; CHECK-NEXT: 0000 00000000 00000000
+
+; CHECK-FP-ELIM: section .test_section
+; CHECK-FP-ELIM: section .ARM.extab.test_section
+; CHECK-FP-ELIM-NEXT: 0000 00000000 84c90501 b0b0b0a8
+; CHECK-FP-ELIM: section .ARM.exidx.test_section
+; CHECK-FP-ELIM-NEXT: 0000 00000000 00000000
diff --git a/test/CodeGen/ARM/ehabi-mc-sh_link.ll b/test/CodeGen/ARM/ehabi-mc-sh_link.ll
index f90e5f384c1e..ac0a0fc9309a 100644
--- a/test/CodeGen/ARM/ehabi-mc-sh_link.ll
+++ b/test/CodeGen/ARM/ehabi-mc-sh_link.ll
@@ -7,7 +7,7 @@
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -filetype=obj -o - %s \
-; RUN: | elf-dump --dump-section-data \
+; RUN: | llvm-readobj -s \
; RUN: | FileCheck %s
define void @test1() nounwind {
@@ -20,28 +20,39 @@ entry:
ret void
}
-; CHECK: # Section 1
-; CHECK-NEXT: (('sh_name', 0x00000010) # '.text'
-
-; CHECK: (('sh_name', 0x00000005) # '.ARM.exidx'
-; CHECK-NEXT: ('sh_type', 0x70000001)
-; CHECK-NEXT: ('sh_flags', 0x00000082)
-; CHECK-NEXT: ('sh_addr', 0x00000000)
-; CHECK-NEXT: ('sh_offset', 0x0000005c)
-; CHECK-NEXT: ('sh_size', 0x00000008)
-; CHECK-NEXT: ('sh_link', 0x00000001)
-; CHECK-NEXT: ('sh_info', 0x00000000)
-; CHECK-NEXT: ('sh_addralign', 0x00000004)
-
-; CHECK: # Section 7
-; CHECK-NEXT: (('sh_name', 0x00000039) # '.test_section'
-
-; CHECK: (('sh_name', 0x0000002f) # '.ARM.exidx.test_section'
-; CHECK-NEXT: ('sh_type', 0x70000001)
-; CHECK-NEXT: ('sh_flags', 0x00000082)
-; CHECK-NEXT: ('sh_addr', 0x00000000)
-; CHECK-NEXT: ('sh_offset', 0x00000068)
-; CHECK-NEXT: ('sh_size', 0x00000008)
-; CHECK-NEXT: ('sh_link', 0x00000007)
-; CHECK-NEXT: ('sh_info', 0x00000000)
-; CHECK-NEXT: ('sh_addralign', 0x00000004)
+; CHECK: Sections [
+; CHECK: Section {
+; CHECK: Index: 1
+; CHECK-NEXT: Name: .text (16)
+
+; CHECK: Section {
+; CHECK: Name: .ARM.exidx (5)
+; CHECK-NEXT: Type: SHT_ARM_EXIDX
+; CHECK-NEXT: Flags [ (0x82)
+; CHECK-NEXT: SHF_ALLOC
+; CHECK-NEXT: SHF_LINK_ORDER
+; CHECK-NEXT: ]
+; CHECK-NEXT: Address: 0x0
+; CHECK-NEXT: Offset: 0x5C
+; CHECK-NEXT: Size: 8
+; CHECK-NEXT: Link: 1
+; CHECK-NEXT: Info: 0
+; CHECK-NEXT: AddressAlignment: 4
+
+; CHECK: Section {
+; CHECK: Index: 7
+; CHECK-NEXT: Name: .test_section (57)
+
+; CHECK: Section {
+; CHECK: Name: .ARM.exidx.test_section (47)
+; CHECK-NEXT: Type: SHT_ARM_EXIDX
+; CHECK-NEXT: Flags [ (0x82)
+; CHECK-NEXT: SHF_ALLOC
+; CHECK-NEXT: SHF_LINK_ORDER
+; CHECK-NEXT: ]
+; CHECK-NEXT: Address: 0x0
+; CHECK-NEXT: Offset: 0x68
+; CHECK-NEXT: Size: 8
+; CHECK-NEXT: Link: 7
+; CHECK-NEXT: Info: 0
+; CHECK-NEXT: AddressAlignment: 4
diff --git a/test/CodeGen/ARM/ehabi-mc.ll b/test/CodeGen/ARM/ehabi-mc.ll
index 0dc2ef7838f0..83b8425af7c4 100644
--- a/test/CodeGen/ARM/ehabi-mc.ll
+++ b/test/CodeGen/ARM/ehabi-mc.ll
@@ -1,8 +1,14 @@
-; RUN: llc -mtriple arm-unknown-linux-gnueabi \
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
+; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
+; RUN: -disable-fp-elim -filetype=obj -o - %s \
+; RUN: | llvm-objdump -s - \
+; RUN: | FileCheck %s --check-prefix=CHECK
+
+; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
; RUN: -arm-enable-ehabi -arm-enable-ehabi-descriptors \
; RUN: -filetype=obj -o - %s \
; RUN: | llvm-objdump -s - \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
define void @_Z4testiiiiiddddd(i32 %u1, i32 %u2, i32 %u3, i32 %u4, i32 %u5, double %v1, double %v2, double %v3, double %v4, double %v5) {
entry:
@@ -54,6 +60,12 @@ declare void @_ZSt9terminatev()
; CHECK: section .text
; CHECK: section .ARM.extab
-; CHECK-NEXT: 0000 00000000 b0b0b000
+; CHECK-NEXT: 0000 00000000 c9409b01 b0818484
; CHECK: section .ARM.exidx
; CHECK-NEXT: 0000 00000000 00000000
+
+; CHECK-FP-ELIM: section .text
+; CHECK-FP-ELIM: section .ARM.extab
+; CHECK-FP-ELIM-NEXT: 0000 00000000 84c90501 b0b0b0a8
+; CHECK-FP-ELIM: section .ARM.exidx
+; CHECK-FP-ELIM-NEXT: 0000 00000000 00000000
diff --git a/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll b/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll
new file mode 100644
index 000000000000..00027119f9e0
--- /dev/null
+++ b/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll
@@ -0,0 +1,30 @@
+; REQUIRES: asserts
+; RUN: llc -mtriple=thumbv7-none-linux-gnueabi -debug -o /dev/null < %s 2>&1 | FileCheck %s
+
+; This test makes sure spills of 64-bit pairs in Thumb mode actually
+; generate thumb instructions. Previously we were inserting an ARM
+; STMIA which happened to have the same encoding.
+
+define void @foo(i64* %addr) {
+ %val1 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val2 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val3 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val4 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val5 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val6 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val7 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+
+ ; Make sure we are actually creating the Thumb versions of the spill
+ ; instructions.
+; CHECK: t2STRDi8
+; CHECK: t2LDRDi8
+
+ store volatile i64 %val1, i64* %addr
+ store volatile i64 %val2, i64* %addr
+ store volatile i64 %val3, i64* %addr
+ store volatile i64 %val4, i64* %addr
+ store volatile i64 %val5, i64* %addr
+ store volatile i64 %val6, i64* %addr
+ store volatile i64 %val7, i64* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/gpr-paired-spill.ll b/test/CodeGen/ARM/gpr-paired-spill.ll
new file mode 100644
index 000000000000..ef3e5a54a2db
--- /dev/null
+++ b/test/CodeGen/ARM/gpr-paired-spill.ll
@@ -0,0 +1,44 @@
+; RUN: llc -mtriple=armv7-none-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-WITH-LDRD
+; RUN: llc -mtriple=armv4-none-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-WITHOUT-LDRD
+; RUN: llc -mtriple=thumbv7-none-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-WITH-LDRD
+
+define void @foo(i64* %addr) {
+ %val1 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val2 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val3 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val4 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val5 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val6 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val7 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+
+ ; Key point is that enough 64-bit paired GPR values are live that
+ ; one of them has to be spilled. This used to cause an abort because
+ ; an LDMIA was created with both a FrameIndex and an offset, which
+ ; is not allowed.
+
+; CHECK-WITH-LDRD: strd {{r[0-9]+}}, {{r[0-9]+}}, [sp, #8]
+; CHECK-WITH-LDRD: strd {{r[0-9]+}}, {{r[0-9]+}}, [sp]
+
+; CHECK-WITH-LDRD: ldrd {{r[0-9]+}}, {{r[0-9]+}}, [sp, #8]
+; CHECK-WITH-LDRD: ldrd {{r[0-9]+}}, {{r[0-9]+}}, [sp]
+
+ ; We also want to ensure the register scavenger is working (i.e. an
+ ; offset from sp can be generated), so we need two spills.
+; CHECK-WITHOUT-LDRD: add [[ADDRREG:[a-z0-9]+]], sp, #{{[0-9]+}}
+; CHECK-WITHOUT-LDRD: stm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
+; CHECK-WITHOUT-LDRD: stm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
+
+ ; In principle LLVM may have to recalculate the offset. At the moment
+ ; it reuses the original though.
+; CHECK-WITHOUT-LDRD: ldm [[ADDRREG]], {r{{[0-9]+}}, r{{[0-9]+}}}
+; CHECK-WITHOUT-LDRD: ldm sp, {r{{[0-9]+}}, r{{[0-9]+}}}
+
+ store volatile i64 %val1, i64* %addr
+ store volatile i64 %val2, i64* %addr
+ store volatile i64 %val3, i64* %addr
+ store volatile i64 %val4, i64* %addr
+ store volatile i64 %val5, i64* %addr
+ store volatile i64 %val6, i64* %addr
+ store volatile i64 %val7, i64* %addr
+ ret void
+}
diff --git a/test/CodeGen/ARM/lsr-unfolded-offset.ll b/test/CodeGen/ARM/lsr-unfolded-offset.ll
index 5b4cf9d81606..9b0f3e54e88a 100644
--- a/test/CodeGen/ARM/lsr-unfolded-offset.ll
+++ b/test/CodeGen/ARM/lsr-unfolded-offset.ll
@@ -26,8 +26,8 @@ outer.loop: ; preds = %for.inc69, %entry
%0 = phi i32 [ %inc71, %for.inc69 ], [ 0, %entry ]
%offset = getelementptr %struct.partition_entry* %part, i32 %0, i32 2
%len = getelementptr %struct.partition_entry* %part, i32 %0, i32 3
- %tmp5 = load i64* %offset, align 4, !tbaa !0
- %tmp15 = load i64* %len, align 4, !tbaa !0
+ %tmp5 = load i64* %offset, align 4
+ %tmp15 = load i64* %len, align 4
%add = add nsw i64 %tmp15, %tmp5
br label %inner.loop
@@ -40,8 +40,8 @@ inner.loop: ; preds = %for.inc, %outer.loo
if.end: ; preds = %inner.loop
%len39 = getelementptr %struct.partition_entry* %part, i32 %1, i32 3
%offset28 = getelementptr %struct.partition_entry* %part, i32 %1, i32 2
- %tmp29 = load i64* %offset28, align 4, !tbaa !0
- %tmp40 = load i64* %len39, align 4, !tbaa !0
+ %tmp29 = load i64* %offset28, align 4
+ %tmp40 = load i64* %len39, align 4
%add41 = add nsw i64 %tmp40, %tmp29
%cmp44 = icmp sge i64 %tmp29, %tmp5
%cmp47 = icmp slt i64 %tmp29, %add
@@ -74,7 +74,3 @@ for.end72: ; preds = %for.inc69, %entry
%overlap.0.lcssa = phi i32 [ 0, %entry ], [ %overlap.4, %for.inc69 ]
ret i32 %overlap.0.lcssa
}
-
-!0 = metadata !{metadata !"long long", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll
new file mode 100644
index 000000000000..4b15326008a4
--- /dev/null
+++ b/test/CodeGen/ARM/misched-copy-arm.ll
@@ -0,0 +1,30 @@
+; REQUIRES: asserts
+; RUN: llc < %s -march=thumb -mcpu=swift -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+;
+; Loop counter copies should be eliminated.
+; There is also a MUL here, but we don't care where it is scheduled.
+; CHECK: postinc
+; CHECK: *** Final schedule for BB#2 ***
+; CHECK: t2LDRs
+; CHECK: t2ADDrr
+; CHECK: t2CMPrr
+; CHECK: COPY
+define i32 @postinc(i32 %a, i32* nocapture %d, i32 %s) nounwind {
+entry:
+ %cmp4 = icmp eq i32 %a, 0
+ br i1 %cmp4, label %for.end, label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %s.05 = phi i32 [ %mul, %for.body ], [ 0, %entry ]
+ %indvars.iv.next = add i32 %indvars.iv, %s
+ %arrayidx = getelementptr inbounds i32* %d, i32 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %mul = mul nsw i32 %0, %s.05
+ %exitcond = icmp eq i32 %indvars.iv.next, %a
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %s.0.lcssa = phi i32 [ 0, %entry ], [ %mul, %for.body ]
+ ret i32 %s.0.lcssa
+}
diff --git a/test/CodeGen/ARM/neon_vabs.ll b/test/CodeGen/ARM/neon_vabs.ll
new file mode 100644
index 000000000000..bf2770b15b01
--- /dev/null
+++ b/test/CodeGen/ARM/neon_vabs.ll
@@ -0,0 +1,91 @@
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+
+define <4 x i32> @test1(<4 x i32> %a) nounwind {
+; CHECK: test1:
+; CHECK: vabs.s32 q
+ %tmp1neg = sub <4 x i32> zeroinitializer, %a
+ %b = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
+ ret <4 x i32> %abs
+}
+
+define <4 x i32> @test2(<4 x i32> %a) nounwind {
+; CHECK: test2:
+; CHECK: vabs.s32 q
+ %tmp1neg = sub <4 x i32> zeroinitializer, %a
+ %b = icmp sge <4 x i32> %a, zeroinitializer
+ %abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
+ ret <4 x i32> %abs
+}
+
+define <8 x i16> @test3(<8 x i16> %a) nounwind {
+; CHECK: test3:
+; CHECK: vabs.s16 q
+ %tmp1neg = sub <8 x i16> zeroinitializer, %a
+ %b = icmp sgt <8 x i16> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i16> %a, <8 x i16> %tmp1neg
+ ret <8 x i16> %abs
+}
+
+define <16 x i8> @test4(<16 x i8> %a) nounwind {
+; CHECK: test4:
+; CHECK: vabs.s8 q
+ %tmp1neg = sub <16 x i8> zeroinitializer, %a
+ %b = icmp slt <16 x i8> %a, zeroinitializer
+ %abs = select <16 x i1> %b, <16 x i8> %tmp1neg, <16 x i8> %a
+ ret <16 x i8> %abs
+}
+
+define <4 x i32> @test5(<4 x i32> %a) nounwind {
+; CHECK: test5:
+; CHECK: vabs.s32 q
+ %tmp1neg = sub <4 x i32> zeroinitializer, %a
+ %b = icmp sle <4 x i32> %a, zeroinitializer
+ %abs = select <4 x i1> %b, <4 x i32> %tmp1neg, <4 x i32> %a
+ ret <4 x i32> %abs
+}
+
+define <2 x i32> @test6(<2 x i32> %a) nounwind {
+; CHECK: test6:
+; CHECK: vabs.s32 d
+ %tmp1neg = sub <2 x i32> zeroinitializer, %a
+ %b = icmp sgt <2 x i32> %a, <i32 -1, i32 -1>
+ %abs = select <2 x i1> %b, <2 x i32> %a, <2 x i32> %tmp1neg
+ ret <2 x i32> %abs
+}
+
+define <2 x i32> @test7(<2 x i32> %a) nounwind {
+; CHECK: test7:
+; CHECK: vabs.s32 d
+ %tmp1neg = sub <2 x i32> zeroinitializer, %a
+ %b = icmp sge <2 x i32> %a, zeroinitializer
+ %abs = select <2 x i1> %b, <2 x i32> %a, <2 x i32> %tmp1neg
+ ret <2 x i32> %abs
+}
+
+define <4 x i16> @test8(<4 x i16> %a) nounwind {
+; CHECK: test8:
+; CHECK: vabs.s16 d
+ %tmp1neg = sub <4 x i16> zeroinitializer, %a
+ %b = icmp sgt <4 x i16> %a, zeroinitializer
+ %abs = select <4 x i1> %b, <4 x i16> %a, <4 x i16> %tmp1neg
+ ret <4 x i16> %abs
+}
+
+define <8 x i8> @test9(<8 x i8> %a) nounwind {
+; CHECK: test9:
+; CHECK: vabs.s8 d
+ %tmp1neg = sub <8 x i8> zeroinitializer, %a
+ %b = icmp slt <8 x i8> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i8> %tmp1neg, <8 x i8> %a
+ ret <8 x i8> %abs
+}
+
+define <2 x i32> @test10(<2 x i32> %a) nounwind {
+; CHECK: test10:
+; CHECK: vabs.s32 d
+ %tmp1neg = sub <2 x i32> zeroinitializer, %a
+ %b = icmp sle <2 x i32> %a, zeroinitializer
+ %abs = select <2 x i1> %b, <2 x i32> %tmp1neg, <2 x i32> %a
+ ret <2 x i32> %abs
+}
diff --git a/test/CodeGen/ARM/nop_concat_vectors.ll b/test/CodeGen/ARM/nop_concat_vectors.ll
new file mode 100644
index 000000000000..c81090095a99
--- /dev/null
+++ b/test/CodeGen/ARM/nop_concat_vectors.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+
+;CHECK: _foo
+;CHECK-NOT: vld1.32
+;CHECK-NOT: vst1.32
+;CHECK: bx
+define void @foo(<16 x i8>* %J) {
+ %A = load <16 x i8>* %J
+ %T1 = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %T2 = shufflevector <8 x i8> %T1, <8 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <16 x i8> %T2, <16 x i8>* %J
+ ret void
+}
diff --git a/test/CodeGen/ARM/private.ll b/test/CodeGen/ARM/private.ll
index f93ffe7b339a..94578d82fddc 100644
--- a/test/CodeGen/ARM/private.ll
+++ b/test/CodeGen/ARM/private.ll
@@ -1,10 +1,11 @@
; Test to make sure that the 'private' is used correctly.
;
-; RUN: llc < %s -mtriple=arm-linux-gnueabi > %t
-; RUN: grep .Lfoo: %t
-; RUN: egrep bl.*\.Lfoo %t
-; RUN: grep .Lbaz: %t
-; RUN: grep long.*\.Lbaz %t
+; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
+; CHECK: .Lfoo:
+; CHECK: bar:
+; CHECK: bl .Lfoo
+; CHECK: .long .Lbaz
+; CHECK: .Lbaz:
define private void @foo() {
ret void
diff --git a/test/CodeGen/ARM/returned-ext.ll b/test/CodeGen/ARM/returned-ext.ll
new file mode 100644
index 000000000000..670b12f249d4
--- /dev/null
+++ b/test/CodeGen/ARM/returned-ext.ll
@@ -0,0 +1,178 @@
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2D
+
+declare i16 @identity16(i16 returned %x)
+declare i32 @identity32(i32 returned %x)
+declare zeroext i16 @retzext16(i16 returned %x)
+declare i16 @paramzext16(i16 zeroext returned %x)
+declare zeroext i16 @bothzext16(i16 zeroext returned %x)
+
+; The zeroext param attribute below is meant to have no effect
+define i16 @test_identity(i16 zeroext %x) {
+entry:
+; CHECKELF: test_identity:
+; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
+; CHECKELF: bl identity16
+; CHECKELF: uxth r0, r0
+; CHECKELF: bl identity32
+; CHECKELF: mov r0, [[SAVEX]]
+; CHECKT2D: test_identity:
+; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
+; CHECKT2D: blx _identity16
+; CHECKT2D: uxth r0, r0
+; CHECKT2D: blx _identity32
+; CHECKT2D: mov r0, [[SAVEX]]
+ %call = tail call i16 @identity16(i16 %x)
+ %b = zext i16 %call to i32
+ %call2 = tail call i32 @identity32(i32 %b)
+ ret i16 %x
+}
+
+; FIXME: This ought not to require register saving but currently does because
+; x is not considered equal to %call (see SelectionDAGBuilder.cpp)
+define i16 @test_matched_ret(i16 %x) {
+entry:
+; CHECKELF: test_matched_ret:
+
+; This shouldn't be required
+; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
+
+; CHECKELF: bl retzext16
+; CHECKELF-NOT: uxth r0, {{r[0-9]+}}
+; CHECKELF: bl identity32
+
+; This shouldn't be required
+; CHECKELF: mov r0, [[SAVEX]]
+
+; CHECKT2D: test_matched_ret:
+
+; This shouldn't be required
+; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
+
+; CHECKT2D: blx _retzext16
+; CHECKT2D-NOT: uxth r0, {{r[0-9]+}}
+; CHECKT2D: blx _identity32
+
+; This shouldn't be required
+; CHECKT2D: mov r0, [[SAVEX]]
+
+ %call = tail call i16 @retzext16(i16 %x)
+ %b = zext i16 %call to i32
+ %call2 = tail call i32 @identity32(i32 %b)
+ ret i16 %x
+}
+
+define i16 @test_mismatched_ret(i16 %x) {
+entry:
+; CHECKELF: test_mismatched_ret:
+; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
+; CHECKELF: bl retzext16
+; CHECKELF: sxth r0, {{r[0-9]+}}
+; CHECKELF: bl identity32
+; CHECKELF: mov r0, [[SAVEX]]
+; CHECKT2D: test_mismatched_ret:
+; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
+; CHECKT2D: blx _retzext16
+; CHECKT2D: sxth r0, {{r[0-9]+}}
+; CHECKT2D: blx _identity32
+; CHECKT2D: mov r0, [[SAVEX]]
+ %call = tail call i16 @retzext16(i16 %x)
+ %b = sext i16 %call to i32
+ %call2 = tail call i32 @identity32(i32 %b)
+ ret i16 %x
+}
+
+define i16 @test_matched_paramext(i16 %x) {
+entry:
+; CHECKELF: test_matched_paramext:
+; CHECKELF: uxth r0, r0
+; CHECKELF: bl paramzext16
+; CHECKELF: uxth r0, r0
+; CHECKELF: bl identity32
+; CHECKELF: b paramzext16
+; CHECKT2D: test_matched_paramext:
+; CHECKT2D: uxth r0, r0
+; CHECKT2D: blx _paramzext16
+; CHECKT2D: uxth r0, r0
+; CHECKT2D: blx _identity32
+; CHECKT2D: b.w _paramzext16
+ %call = tail call i16 @paramzext16(i16 %x)
+ %b = zext i16 %call to i32
+ %call2 = tail call i32 @identity32(i32 %b)
+ %call3 = tail call i16 @paramzext16(i16 %call)
+ ret i16 %call3
+}
+
+; FIXME: This theoretically ought to optimize to exact same output as the
+; version above, but doesn't currently (see SelectionDAGBuilder.cpp)
+define i16 @test_matched_paramext2(i16 %x) {
+entry:
+
+; Since there doesn't seem to be an unambiguous optimal selection and
+; scheduling of uxth and mov instructions below in lieu of the 'returned'
+; optimization, don't bother checking: just verify that the calls are made
+; in the correct order as a basic sanity check
+
+; CHECKELF: test_matched_paramext2:
+; CHECKELF: bl paramzext16
+; CHECKELF: bl identity32
+; CHECKELF: b paramzext16
+; CHECKT2D: test_matched_paramext2:
+; CHECKT2D: blx _paramzext16
+; CHECKT2D: blx _identity32
+; CHECKT2D: b.w _paramzext16
+ %call = tail call i16 @paramzext16(i16 %x)
+
+; Should make no difference if %x is used below rather than %call, but it does
+ %b = zext i16 %x to i32
+
+ %call2 = tail call i32 @identity32(i32 %b)
+ %call3 = tail call i16 @paramzext16(i16 %call)
+ ret i16 %call3
+}
+
+define i16 @test_matched_bothext(i16 %x) {
+entry:
+; CHECKELF: test_matched_bothext:
+; CHECKELF: uxth r0, r0
+; CHECKELF: bl bothzext16
+; CHECKELF-NOT: uxth r0, r0
+
+; FIXME: Tail call should be OK here
+; CHECKELF: bl identity32
+
+; CHECKT2D: test_matched_bothext:
+; CHECKT2D: uxth r0, r0
+; CHECKT2D: blx _bothzext16
+; CHECKT2D-NOT: uxth r0, r0
+
+; FIXME: Tail call should be OK here
+; CHECKT2D: blx _identity32
+
+ %call = tail call i16 @bothzext16(i16 %x)
+ %b = zext i16 %x to i32
+ %call2 = tail call i32 @identity32(i32 %b)
+ ret i16 %call
+}
+
+define i16 @test_mismatched_bothext(i16 %x) {
+entry:
+; CHECKELF: test_mismatched_bothext:
+; CHECKELF: mov [[SAVEX:r[0-9]+]], r0
+; CHECKELF: uxth r0, {{r[0-9]+}}
+; CHECKELF: bl bothzext16
+; CHECKELF: sxth r0, [[SAVEX]]
+; CHECKELF: bl identity32
+; CHECKELF: mov r0, [[SAVEX]]
+; CHECKT2D: test_mismatched_bothext:
+; CHECKT2D: mov [[SAVEX:r[0-9]+]], r0
+; CHECKT2D: uxth r0, {{r[0-9]+}}
+; CHECKT2D: blx _bothzext16
+; CHECKT2D: sxth r0, [[SAVEX]]
+; CHECKT2D: blx _identity32
+; CHECKT2D: mov r0, [[SAVEX]]
+ %call = tail call i16 @bothzext16(i16 %x)
+ %b = sext i16 %x to i32
+ %call2 = tail call i32 @identity32(i32 %b)
+ ret i16 %x
+}
diff --git a/test/CodeGen/ARM/tail-dup.ll b/test/CodeGen/ARM/tail-dup.ll
index e015bf098ff8..eb4d0bab929e 100644
--- a/test/CodeGen/ARM/tail-dup.ll
+++ b/test/CodeGen/ARM/tail-dup.ll
@@ -11,19 +11,19 @@
define i32 @fn(i32* nocapture %opcodes) nounwind readonly ssp {
entry:
- %0 = load i32* %opcodes, align 4, !tbaa !0
+ %0 = load i32* %opcodes, align 4
%arrayidx = getelementptr inbounds [3 x i8*]* @fn.codetable, i32 0, i32 %0
br label %indirectgoto
INCREMENT: ; preds = %indirectgoto
%inc = add nsw i32 %result.0, 1
- %1 = load i32* %opcodes.addr.0, align 4, !tbaa !0
+ %1 = load i32* %opcodes.addr.0, align 4
%arrayidx2 = getelementptr inbounds [3 x i8*]* @fn.codetable, i32 0, i32 %1
br label %indirectgoto
DECREMENT: ; preds = %indirectgoto
%dec = add nsw i32 %result.0, -1
- %2 = load i32* %opcodes.addr.0, align 4, !tbaa !0
+ %2 = load i32* %opcodes.addr.0, align 4
%arrayidx4 = getelementptr inbounds [3 x i8*]* @fn.codetable, i32 0, i32 %2
br label %indirectgoto
@@ -38,7 +38,3 @@ indirectgoto: ; preds = %DECREMENT, %INCREME
RETURN: ; preds = %indirectgoto
ret i32 %result.0
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/this-return.ll b/test/CodeGen/ARM/this-return.ll
new file mode 100644
index 000000000000..f06e4a4f8ddc
--- /dev/null
+++ b/test/CodeGen/ARM/this-return.ll
@@ -0,0 +1,105 @@
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -arm-tail-calls | FileCheck %s -check-prefix=CHECKELF
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-tail-calls | FileCheck %s -check-prefix=CHECKT2D
+
+%struct.A = type { i8 }
+%struct.B = type { i32 }
+%struct.C = type { %struct.B }
+%struct.D = type { %struct.B }
+%struct.E = type { %struct.B, %struct.B }
+
+declare %struct.A* @A_ctor_base(%struct.A* returned)
+declare %struct.B* @B_ctor_base(%struct.B* returned, i32)
+declare %struct.B* @B_ctor_complete(%struct.B* returned, i32)
+
+declare %struct.A* @A_ctor_base_nothisret(%struct.A*)
+declare %struct.B* @B_ctor_base_nothisret(%struct.B*, i32)
+declare %struct.B* @B_ctor_complete_nothisret(%struct.B*, i32)
+
+define %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x) {
+entry:
+; CHECKELF: C_ctor_base:
+; CHECKELF-NOT: mov {{r[0-9]+}}, r0
+; CHECKELF: bl A_ctor_base
+; CHECKELF-NOT: mov r0, {{r[0-9]+}}
+; CHECKELF: b B_ctor_base
+; CHECKT2D: C_ctor_base:
+; CHECKT2D-NOT: mov {{r[0-9]+}}, r0
+; CHECKT2D: blx _A_ctor_base
+; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
+; CHECKT2D: b.w _B_ctor_base
+ %0 = bitcast %struct.C* %this to %struct.A*
+ %call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
+ %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x) {
+entry:
+; CHECKELF: C_ctor_base_nothisret:
+; CHECKELF: mov [[SAVETHIS:r[0-9]+]], r0
+; CHECKELF: bl A_ctor_base_nothisret
+; CHECKELF: mov r0, [[SAVETHIS]]
+; CHECKELF-NOT: b B_ctor_base_nothisret
+; CHECKT2D: C_ctor_base_nothisret:
+; CHECKT2D: mov [[SAVETHIS:r[0-9]+]], r0
+; CHECKT2D: blx _A_ctor_base_nothisret
+; CHECKT2D: mov r0, [[SAVETHIS]]
+; CHECKT2D-NOT: b.w _B_ctor_base_nothisret
+ %0 = bitcast %struct.C* %this to %struct.A*
+ %call = tail call %struct.A* @A_ctor_base_nothisret(%struct.A* %0)
+ %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %call2 = tail call %struct.B* @B_ctor_base_nothisret(%struct.B* %1, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
+entry:
+; CHECKELF: C_ctor_complete:
+; CHECKELF: b C_ctor_base
+; CHECKT2D: C_ctor_complete:
+; CHECKT2D: b.w _C_ctor_base
+ %call = tail call %struct.C* @C_ctor_base(%struct.C* %this, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_complete_nothisret(%struct.C* %this, i32 %x) {
+entry:
+; CHECKELF: C_ctor_complete_nothisret:
+; CHECKELF-NOT: b C_ctor_base_nothisret
+; CHECKT2D: C_ctor_complete_nothisret:
+; CHECKT2D-NOT: b.w _C_ctor_base_nothisret
+ %call = tail call %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x)
+ ret %struct.C* %this
+}
+
+define %struct.D* @D_ctor_base(%struct.D* %this, i32 %x) {
+entry:
+; CHECKELF: D_ctor_base:
+; CHECKELF-NOT: mov {{r[0-9]+}}, r0
+; CHECKELF: bl B_ctor_complete
+; CHECKELF-NOT: mov r0, {{r[0-9]+}}
+; CHECKELF: b B_ctor_complete
+; CHECKT2D: D_ctor_base:
+; CHECKT2D-NOT: mov {{r[0-9]+}}, r0
+; CHECKT2D: blx _B_ctor_complete
+; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
+; CHECKT2D: b.w _B_ctor_complete
+ %b = getelementptr inbounds %struct.D* %this, i32 0, i32 0
+ %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+ %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+ ret %struct.D* %this
+}
+
+define %struct.E* @E_ctor_base(%struct.E* %this, i32 %x) {
+entry:
+; CHECKELF: E_ctor_base:
+; CHECKELF-NOT: b B_ctor_complete
+; CHECKT2D: E_ctor_base:
+; CHECKT2D-NOT: b.w _B_ctor_complete
+ %b = getelementptr inbounds %struct.E* %this, i32 0, i32 0
+ %call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
+ %b2 = getelementptr inbounds %struct.E* %this, i32 0, i32 1
+ %call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
+ ret %struct.E* %this
+}
diff --git a/test/CodeGen/ARM/v1-constant-fold.ll b/test/CodeGen/ARM/v1-constant-fold.ll
new file mode 100644
index 000000000000..b86d5db29c4b
--- /dev/null
+++ b/test/CodeGen/ARM/v1-constant-fold.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+v7,+vfp3,-neon | FileCheck %s
+
+; PR15611. Check that we don't crash when constant folding v1i32 types.
+
+; CHECK: foo:
+define void @foo(i32 %arg) {
+bb:
+ %tmp = insertelement <4 x i32> undef, i32 %arg, i32 0
+ %tmp1 = insertelement <4 x i32> %tmp, i32 0, i32 1
+ %tmp2 = insertelement <4 x i32> %tmp1, i32 0, i32 2
+ %tmp3 = insertelement <4 x i32> %tmp2, i32 0, i32 3
+ %tmp4 = add <4 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1>
+; CHECK: bl bar
+ tail call void @bar(<4 x i32> %tmp4)
+ ret void
+}
+
+declare void @bar(<4 x i32>)
diff --git a/test/CodeGen/ARM/vcvt-cost.ll b/test/CodeGen/ARM/vcvt-cost.ll
new file mode 100644
index 000000000000..0d45c40b8814
--- /dev/null
+++ b/test/CodeGen/ARM/vcvt-cost.ll
@@ -0,0 +1,153 @@
+; We currently estimate the cost of sext/zext/trunc v8(v16)i32 <-> v8(v16)i8
+; instructions as expensive. If lowering is improved the cost model needs to
+; change.
+; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
+%T0_5 = type <8 x i8>
+%T1_5 = type <8 x i32>
+; CHECK: func_cvt5:
+define void @func_cvt5(%T0_5* %loadaddr, %T1_5* %storeaddr) {
+; CHECK: vmovl.s8
+; CHECK: vmovl.s16
+; CHECK: vmovl.s16
+ %v0 = load %T0_5* %loadaddr
+; COST: func_cvt5
+; COST: cost of 3 {{.*}} sext
+ %r = sext %T0_5 %v0 to %T1_5
+ store %T1_5 %r, %T1_5* %storeaddr
+ ret void
+}
+;; We currently estimate the cost of this instruction as expensive. If lowering
+;; is improved the cost needs to change.
+%TA0_5 = type <8 x i8>
+%TA1_5 = type <8 x i32>
+; CHECK: func_cvt1:
+define void @func_cvt1(%TA0_5* %loadaddr, %TA1_5* %storeaddr) {
+; CHECK: vmovl.u8
+; CHECK: vmovl.u16
+; CHECK: vmovl.u16
+ %v0 = load %TA0_5* %loadaddr
+; COST: func_cvt1
+; COST: cost of 3 {{.*}} zext
+ %r = zext %TA0_5 %v0 to %TA1_5
+ store %TA1_5 %r, %TA1_5* %storeaddr
+ ret void
+}
+
+%T0_51 = type <8 x i32>
+%T1_51 = type <8 x i8>
+; CHECK: func_cvt51:
+define void @func_cvt51(%T0_51* %loadaddr, %T1_51* %storeaddr) {
+; CHECK: vmovn.i32
+; CHECK: vmovn.i32
+; CHECK: vmovn.i16
+ %v0 = load %T0_51* %loadaddr
+; COST: func_cvt51
+; COST: cost of 3 {{.*}} trunc
+ %r = trunc %T0_51 %v0 to %T1_51
+ store %T1_51 %r, %T1_51* %storeaddr
+ ret void
+}
+
+%TT0_5 = type <16 x i8>
+%TT1_5 = type <16 x i32>
+; CHECK: func_cvt52:
+define void @func_cvt52(%TT0_5* %loadaddr, %TT1_5* %storeaddr) {
+; CHECK: vmovl.s16
+; CHECK: vmovl.s16
+; CHECK: vmovl.s16
+; CHECK: vmovl.s16
+ %v0 = load %TT0_5* %loadaddr
+; COST: func_cvt52
+; COST: cost of 6 {{.*}} sext
+ %r = sext %TT0_5 %v0 to %TT1_5
+ store %TT1_5 %r, %TT1_5* %storeaddr
+ ret void
+}
+;; We currently estimate the cost of this instruction as expensive. If lowering
+;; is improved the cost needs to change.
+%TTA0_5 = type <16 x i8>
+%TTA1_5 = type <16 x i32>
+; CHECK: func_cvt12:
+define void @func_cvt12(%TTA0_5* %loadaddr, %TTA1_5* %storeaddr) {
+; CHECK: vmovl.u16
+; CHECK: vmovl.u16
+; CHECK: vmovl.u16
+; CHECK: vmovl.u16
+ %v0 = load %TTA0_5* %loadaddr
+; COST: func_cvt12
+; COST: cost of 6 {{.*}} zext
+ %r = zext %TTA0_5 %v0 to %TTA1_5
+ store %TTA1_5 %r, %TTA1_5* %storeaddr
+ ret void
+}
+
+%TT0_51 = type <16 x i32>
+%TT1_51 = type <16 x i8>
+; CHECK: func_cvt512:
+define void @func_cvt512(%TT0_51* %loadaddr, %TT1_51* %storeaddr) {
+; CHECK: vmovn.i32
+; CHECK: vmovn.i32
+; CHECK: vmovn.i32
+; CHECK: vmovn.i32
+; CHECK: vmovn.i16
+; CHECK: vmovn.i16
+ %v0 = load %TT0_51* %loadaddr
+; COST: func_cvt512
+; COST: cost of 6 {{.*}} trunc
+ %r = trunc %TT0_51 %v0 to %TT1_51
+ store %TT1_51 %r, %TT1_51* %storeaddr
+ ret void
+}
+
+; CHECK: sext_v4i16_v4i64:
+define void @sext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
+; CHECK: vmovl.s32
+; CHECK: vmovl.s32
+ %v0 = load <4 x i16>* %loadaddr
+; COST: sext_v4i16_v4i64
+; COST: cost of 3 {{.*}} sext
+ %r = sext <4 x i16> %v0 to <4 x i64>
+ store <4 x i64> %r, <4 x i64>* %storeaddr
+ ret void
+}
+
+; CHECK: zext_v4i16_v4i64:
+define void @zext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
+; CHECK: vmovl.u32
+; CHECK: vmovl.u32
+ %v0 = load <4 x i16>* %loadaddr
+; COST: zext_v4i16_v4i64
+; COST: cost of 3 {{.*}} zext
+ %r = zext <4 x i16> %v0 to <4 x i64>
+ store <4 x i64> %r, <4 x i64>* %storeaddr
+ ret void
+}
+
+; CHECK: sext_v8i16_v8i64:
+define void @sext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
+; CHECK: vmovl.s32
+; CHECK: vmovl.s32
+; CHECK: vmovl.s32
+; CHECK: vmovl.s32
+ %v0 = load <8 x i16>* %loadaddr
+; COST: sext_v8i16_v8i64
+; COST: cost of 6 {{.*}} sext
+ %r = sext <8 x i16> %v0 to <8 x i64>
+ store <8 x i64> %r, <8 x i64>* %storeaddr
+ ret void
+}
+
+; CHECK: zext_v8i16_v8i64:
+define void @zext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
+; CHECK: vmovl.u32
+; CHECK: vmovl.u32
+; CHECK: vmovl.u32
+; CHECK: vmovl.u32
+ %v0 = load <8 x i16>* %loadaddr
+; COST: zext_v8i16_v8i64
+; COST: cost of 6 {{.*}} zext
+ %r = zext <8 x i16> %v0 to <8 x i64>
+ store <8 x i64> %r, <8 x i64>* %storeaddr
+ ret void
+}
+
diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll
index e67b4788a37d..c078f493094b 100644
--- a/test/CodeGen/ARM/vcvt.ll
+++ b/test/CodeGen/ARM/vcvt.ll
@@ -156,175 +156,3 @@ define <4 x i16> @vcvt_f32tof16(<4 x float>* %A) nounwind {
declare <4 x float> @llvm.arm.neon.vcvthf2fp(<4 x i16>) nounwind readnone
declare <4 x i16> @llvm.arm.neon.vcvtfp2hf(<4 x float>) nounwind readnone
-
-; We currently estimate the cost of sext/zext/trunc v8(v16)i32 <-> v8(v16)i8
-; instructions as expensive. If lowering is improved the cost model needs to
-; change.
-; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
-%T0_5 = type <8 x i8>
-%T1_5 = type <8 x i32>
-; CHECK: func_cvt5:
-define void @func_cvt5(%T0_5* %loadaddr, %T1_5* %storeaddr) {
-; CHECK: vmovl.s8
-; CHECK: vmovl.s16
-; CHECK: vmovl.s16
- %v0 = load %T0_5* %loadaddr
-; COST: func_cvt5
-; COST: cost of 3 {{.*}} sext
- %r = sext %T0_5 %v0 to %T1_5
- store %T1_5 %r, %T1_5* %storeaddr
- ret void
-}
-;; We currently estimate the cost of this instruction as expensive. If lowering
-;; is improved the cost needs to change.
-%TA0_5 = type <8 x i8>
-%TA1_5 = type <8 x i32>
-; CHECK: func_cvt1:
-define void @func_cvt1(%TA0_5* %loadaddr, %TA1_5* %storeaddr) {
-; CHECK: vmovl.u8
-; CHECK: vmovl.u16
-; CHECK: vmovl.u16
- %v0 = load %TA0_5* %loadaddr
-; COST: func_cvt1
-; COST: cost of 3 {{.*}} zext
- %r = zext %TA0_5 %v0 to %TA1_5
- store %TA1_5 %r, %TA1_5* %storeaddr
- ret void
-}
-;; We currently estimate the cost of this instruction as expensive. If lowering
-;; is improved the cost needs to change.
-%T0_51 = type <8 x i32>
-%T1_51 = type <8 x i8>
-; CHECK: func_cvt51:
-define void @func_cvt51(%T0_51* %loadaddr, %T1_51* %storeaddr) {
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
- %v0 = load %T0_51* %loadaddr
-; COST: func_cvt51
-; COST: cost of 19 {{.*}} trunc
- %r = trunc %T0_51 %v0 to %T1_51
- store %T1_51 %r, %T1_51* %storeaddr
- ret void
-}
-;; We currently estimate the cost of this instruction as expensive. If lowering
-;; is improved the cost needs to change.
-%TT0_5 = type <16 x i8>
-%TT1_5 = type <16 x i32>
-; CHECK: func_cvt52:
-define void @func_cvt52(%TT0_5* %loadaddr, %TT1_5* %storeaddr) {
-; CHECK: vmovl.s16
-; CHECK: vmovl.s16
-; CHECK: vmovl.s16
-; CHECK: vmovl.s16
- %v0 = load %TT0_5* %loadaddr
-; COST: func_cvt52
-; COST: cost of 6 {{.*}} sext
- %r = sext %TT0_5 %v0 to %TT1_5
- store %TT1_5 %r, %TT1_5* %storeaddr
- ret void
-}
-;; We currently estimate the cost of this instruction as expensive. If lowering
-;; is improved the cost needs to change.
-%TTA0_5 = type <16 x i8>
-%TTA1_5 = type <16 x i32>
-; CHECK: func_cvt12:
-define void @func_cvt12(%TTA0_5* %loadaddr, %TTA1_5* %storeaddr) {
-; CHECK: vmovl.u16
-; CHECK: vmovl.u16
-; CHECK: vmovl.u16
-; CHECK: vmovl.u16
- %v0 = load %TTA0_5* %loadaddr
-; COST: func_cvt12
-; COST: cost of 6 {{.*}} zext
- %r = zext %TTA0_5 %v0 to %TTA1_5
- store %TTA1_5 %r, %TTA1_5* %storeaddr
- ret void
-}
-;; We currently estimate the cost of this instruction as expensive. If lowering
-;; is improved the cost needs to change.
-%TT0_51 = type <16 x i32>
-%TT1_51 = type <16 x i8>
-; CHECK: func_cvt512:
-define void @func_cvt512(%TT0_51* %loadaddr, %TT1_51* %storeaddr) {
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
-; CHECK: strb
- %v0 = load %TT0_51* %loadaddr
-; COST: func_cvt512
-; COST: cost of 38 {{.*}} trunc
- %r = trunc %TT0_51 %v0 to %TT1_51
- store %TT1_51 %r, %TT1_51* %storeaddr
- ret void
-}
-
-; CHECK: sext_v4i16_v4i64:
-define void @sext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
-; CHECK: vmovl.s32
-; CHECK: vmovl.s32
- %v0 = load <4 x i16>* %loadaddr
-; COST: sext_v4i16_v4i64
-; COST: cost of 3 {{.*}} sext
- %r = sext <4 x i16> %v0 to <4 x i64>
- store <4 x i64> %r, <4 x i64>* %storeaddr
- ret void
-}
-
-; CHECK: zext_v4i16_v4i64:
-define void @zext_v4i16_v4i64(<4 x i16>* %loadaddr, <4 x i64>* %storeaddr) {
-; CHECK: vmovl.u32
-; CHECK: vmovl.u32
- %v0 = load <4 x i16>* %loadaddr
-; COST: zext_v4i16_v4i64
-; COST: cost of 3 {{.*}} zext
- %r = zext <4 x i16> %v0 to <4 x i64>
- store <4 x i64> %r, <4 x i64>* %storeaddr
- ret void
-}
-
-; CHECK: sext_v8i16_v8i64:
-define void @sext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
-; CHECK: vmovl.s32
-; CHECK: vmovl.s32
-; CHECK: vmovl.s32
-; CHECK: vmovl.s32
- %v0 = load <8 x i16>* %loadaddr
-; COST: sext_v8i16_v8i64
-; COST: cost of 6 {{.*}} sext
- %r = sext <8 x i16> %v0 to <8 x i64>
- store <8 x i64> %r, <8 x i64>* %storeaddr
- ret void
-}
-
-; CHECK: zext_v8i16_v8i64:
-define void @zext_v8i16_v8i64(<8 x i16>* %loadaddr, <8 x i64>* %storeaddr) {
-; CHECK: vmovl.u32
-; CHECK: vmovl.u32
-; CHECK: vmovl.u32
-; CHECK: vmovl.u32
- %v0 = load <8 x i16>* %loadaddr
-; COST: zext_v8i16_v8i64
-; COST: cost of 6 {{.*}} zext
- %r = zext <8 x i16> %v0 to <8 x i64>
- store <8 x i64> %r, <8 x i64>* %storeaddr
- ret void
-}
-
diff --git a/test/CodeGen/ARM/vcvt_combine.ll b/test/CodeGen/ARM/vcvt_combine.ll
index 3009e50c532b..07ba230757be 100644
--- a/test/CodeGen/ARM/vcvt_combine.ll
+++ b/test/CodeGen/ARM/vcvt_combine.ll
@@ -7,7 +7,7 @@
; CHECK-NOT: vmul
define void @t0() nounwind {
entry:
- %tmp = load float* @in, align 4, !tbaa !0
+ %tmp = load float* @in, align 4
%vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
%vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
%mul.i = fmul <2 x float> %vecinit2.i, <float 8.000000e+00, float 8.000000e+00>
@@ -23,7 +23,7 @@ declare void @foo_int32x2_t(<2 x i32>)
; CHECK-NOT: vmul
define void @t1() nounwind {
entry:
- %tmp = load float* @in, align 4, !tbaa !0
+ %tmp = load float* @in, align 4
%vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
%vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
%mul.i = fmul <2 x float> %vecinit2.i, <float 8.000000e+00, float 8.000000e+00>
@@ -39,7 +39,7 @@ declare void @foo_uint32x2_t(<2 x i32>)
; CHECK: vmul
define void @t2() nounwind {
entry:
- %tmp = load float* @in, align 4, !tbaa !0
+ %tmp = load float* @in, align 4
%vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
%vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
%mul.i = fmul <2 x float> %vecinit2.i, <float 0x401B333340000000, float 0x401B333340000000>
@@ -53,7 +53,7 @@ entry:
; CHECK: vmul
define void @t3() nounwind {
entry:
- %tmp = load float* @in, align 4, !tbaa !0
+ %tmp = load float* @in, align 4
%vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
%vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
%mul.i = fmul <2 x float> %vecinit2.i, <float 0x4200000000000000, float 0x4200000000000000>
@@ -67,7 +67,7 @@ entry:
; CHECK-NOT: vmul
define void @t4() nounwind {
entry:
- %tmp = load float* @in, align 4, !tbaa !0
+ %tmp = load float* @in, align 4
%vecinit.i = insertelement <2 x float> undef, float %tmp, i32 0
%vecinit2.i = insertelement <2 x float> %vecinit.i, float %tmp, i32 1
%mul.i = fmul <2 x float> %vecinit2.i, <float 0x41F0000000000000, float 0x41F0000000000000>
@@ -81,7 +81,7 @@ entry:
; CHECK-NOT: vmul
define void @t5() nounwind {
entry:
- %tmp = load float* @in, align 4, !tbaa !0
+ %tmp = load float* @in, align 4
%vecinit.i = insertelement <4 x float> undef, float %tmp, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %tmp, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %tmp, i32 2
@@ -93,7 +93,3 @@ entry:
}
declare void @foo_int32x4_t(<4 x i32>)
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/ARM/vdiv_combine.ll b/test/CodeGen/ARM/vdiv_combine.ll
index 7fddbed1ed51..e6f1338b8539 100644
--- a/test/CodeGen/ARM/vdiv_combine.ll
+++ b/test/CodeGen/ARM/vdiv_combine.ll
@@ -11,7 +11,7 @@ declare void @foo_int32x4_t(<4 x i32>)
; CHECK-NOT: {{vdiv|vmul}}
define void @t1() nounwind {
entry:
- %tmp = load i32* @iin, align 4, !tbaa !3
+ %tmp = load i32* @iin, align 4
%vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
%vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
%vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -27,7 +27,7 @@ declare void @foo_float32x2_t(<2 x float>)
; CHECK-NOT: {{vdiv|vmul}}
define void @t2() nounwind {
entry:
- %tmp = load i32* @uin, align 4, !tbaa !3
+ %tmp = load i32* @uin, align 4
%vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
%vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
%vcvt.i = uitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -41,7 +41,7 @@ entry:
; CHECK: {{vdiv|vmul}}
define void @t3() nounwind {
entry:
- %tmp = load i32* @iin, align 4, !tbaa !3
+ %tmp = load i32* @iin, align 4
%vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
%vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
%vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -55,7 +55,7 @@ entry:
; CHECK: {{vdiv|vmul}}
define void @t4() nounwind {
entry:
- %tmp = load i32* @iin, align 4, !tbaa !3
+ %tmp = load i32* @iin, align 4
%vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
%vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
%vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -69,7 +69,7 @@ entry:
; CHECK-NOT: {{vdiv|vmul}}
define void @t5() nounwind {
entry:
- %tmp = load i32* @iin, align 4, !tbaa !3
+ %tmp = load i32* @iin, align 4
%vecinit.i = insertelement <2 x i32> undef, i32 %tmp, i32 0
%vecinit2.i = insertelement <2 x i32> %vecinit.i, i32 %tmp, i32 1
%vcvt.i = sitofp <2 x i32> %vecinit2.i to <2 x float>
@@ -83,7 +83,7 @@ entry:
; CHECK-NOT: {{vdiv|vmul}}
define void @t6() nounwind {
entry:
- %tmp = load i32* @iin, align 4, !tbaa !3
+ %tmp = load i32* @iin, align 4
%vecinit.i = insertelement <4 x i32> undef, i32 %tmp, i32 0
%vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %tmp, i32 1
%vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %tmp, i32 2
@@ -95,8 +95,3 @@ entry:
}
declare void @foo_float32x4_t(<4 x float>)
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll
index 74628f0c5ce6..eb5ad8f0c3d0 100644
--- a/test/CodeGen/ARM/vmul.ll
+++ b/test/CodeGen/ARM/vmul.ll
@@ -599,3 +599,27 @@ for.end179: ; preds = %for.cond.loopexit,
declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
declare <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) nounwind readnone
+
+; vmull lowering would create a zext(v4i8 load()) instead of a zextload(v4i8),
+; creating an illegal type during legalization and causing an assert.
+; PR15970
+define void @no_illegal_types_vmull_sext(<4 x i32> %a) {
+entry:
+ %wide.load283.i = load <4 x i8>* undef, align 1
+ %0 = sext <4 x i8> %wide.load283.i to <4 x i32>
+ %1 = sub nsw <4 x i32> %0, %a
+ %2 = mul nsw <4 x i32> %1, %1
+ %predphi290.v.i = select <4 x i1> undef, <4 x i32> undef, <4 x i32> %2
+ store <4 x i32> %predphi290.v.i, <4 x i32>* undef, align 4
+ ret void
+}
+define void @no_illegal_types_vmull_zext(<4 x i32> %a) {
+entry:
+ %wide.load283.i = load <4 x i8>* undef, align 1
+ %0 = zext <4 x i8> %wide.load283.i to <4 x i32>
+ %1 = sub nsw <4 x i32> %0, %a
+ %2 = mul nsw <4 x i32> %1, %1
+ %predphi290.v.i = select <4 x i1> undef, <4 x i32> undef, <4 x i32> %2
+ store <4 x i32> %predphi290.v.i, <4 x i32>* undef, align 4
+ ret void
+}
diff --git a/test/CodeGen/Generic/annotate.ll b/test/CodeGen/Generic/annotate.ll
new file mode 100644
index 000000000000..c617eb09258c
--- /dev/null
+++ b/test/CodeGen/Generic/annotate.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s
+
+; PR15253
+
+@.str = private unnamed_addr constant [4 x i8] c"sth\00", section "llvm.metadata"
+@.str1 = private unnamed_addr constant [4 x i8] c"t.c\00", section "llvm.metadata"
+
+
+define i32 @foo(i32 %a) {
+entry:
+ %0 = call i32 @llvm.annotation.i32(i32 %a, i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8]* @.str1, i32 0, i32 0), i32 2)
+ ret i32 %0
+}
+
+declare i32 @llvm.annotation.i32(i32, i8*, i8*, i32) #1
diff --git a/test/CodeGen/Generic/crash.ll b/test/CodeGen/Generic/crash.ll
index d889389b7c53..d3fc20467aa8 100644
--- a/test/CodeGen/Generic/crash.ll
+++ b/test/CodeGen/Generic/crash.ll
@@ -51,7 +51,7 @@ for.body.i: ; preds = %for.body.i, %entry
func_74.exit.for.cond29.thread_crit_edge: ; preds = %for.body.i
%f13576.pre = getelementptr inbounds %struct.S0* undef, i64 0, i32 1
- store i8 0, i8* %f13576.pre, align 4, !tbaa !0
+ store i8 0, i8* %f13576.pre, align 4
br label %lbl_468
lbl_468: ; preds = %lbl_468, %func_74.exit.for.cond29.thread_crit_edge
@@ -63,6 +63,3 @@ lbl_468: ; preds = %lbl_468, %func_74.e
for.end74: ; preds = %lbl_468
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/Generic/ptr-annotate.ll b/test/CodeGen/Generic/ptr-annotate.ll
new file mode 100644
index 000000000000..ac5bd5533e9e
--- /dev/null
+++ b/test/CodeGen/Generic/ptr-annotate.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s
+
+; PR15253
+
+%struct.mystruct = type { i32 }
+
+@.str = private unnamed_addr constant [4 x i8] c"sth\00", section "llvm.metadata"
+@.str1 = private unnamed_addr constant [4 x i8] c"t.c\00", section "llvm.metadata"
+
+define void @foo() {
+entry:
+ %m = alloca i8, align 4
+ %0 = call i8* @llvm.ptr.annotation.p0i8(i8* %m, i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8]* @.str1, i32 0, i32 0), i32 2)
+ store i8 1, i8* %0, align 4
+ ret void
+}
+
+declare i8* @llvm.ptr.annotation.p0i8(i8*, i8*, i8*, i32) #1
diff --git a/test/CodeGen/Hexagon/absimm.ll b/test/CodeGen/Hexagon/absimm.ll
new file mode 100644
index 000000000000..b8f5edc26470
--- /dev/null
+++ b/test/CodeGen/Hexagon/absimm.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; Check that we generate absolute addressing mode instructions
+; with immediate value.
+
+define i32 @f1(i32 %i) nounwind {
+; CHECK: memw(##786432){{ *}}={{ *}}r{{[0-9]+}}
+entry:
+ store volatile i32 %i, i32* inttoptr (i32 786432 to i32*), align 262144
+ ret i32 %i
+}
+
+define i32* @f2(i32* nocapture %i) nounwind {
+entry:
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(##786432)
+ %0 = load volatile i32* inttoptr (i32 786432 to i32*), align 262144
+ %1 = inttoptr i32 %0 to i32*
+ ret i32* %1
+ }
diff --git a/test/CodeGen/Hexagon/always-ext.ll b/test/CodeGen/Hexagon/always-ext.ll
new file mode 100644
index 000000000000..9c8d708ba877
--- /dev/null
+++ b/test/CodeGen/Hexagon/always-ext.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+
+; Check that we don't generate an invalid packet with too many instructions
+; due to a store that has a must-extend operand.
+
+; CHECK: CuSuiteAdd.exit.us
+; CHECK: {
+; CHECK-NOT: call abort
+; CHECK: memw(##0)
+; CHECK: memw(r{{[0-9+]}}<<#2+##4)
+; CHECK: }
+
+%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* }
+%struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112 = type { i32, [1024 x %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*], i32 }
+
+@__func__.CuSuiteAdd = external unnamed_addr constant [11 x i8], align 8
+@.str24 = external unnamed_addr constant [140 x i8], align 8
+
+declare void @_Assert()
+
+define void @CuSuiteAddSuite() nounwind {
+entry:
+ br i1 undef, label %for.body.us, label %for.end
+
+for.body.us: ; preds = %entry
+ %0 = load %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** null, align 4
+ %1 = load i32* undef, align 4
+ %cmp.i.us = icmp slt i32 %1, 1024
+ br i1 %cmp.i.us, label %CuSuiteAdd.exit.us, label %cond.false6.i.us
+
+cond.false6.i.us: ; preds = %for.body.us
+ tail call void @_Assert() nounwind
+ unreachable
+
+CuSuiteAdd.exit.us: ; preds = %for.body.us
+ %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112* null, i32 0, i32 1, i32 %1
+ store %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111* %0, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** %arrayidx.i.us, align 4
+ call void @llvm.trap()
+ unreachable
+
+for.end: ; preds = %entry
+ ret void
+}
+
+declare void @llvm.trap() noreturn nounwind
diff --git a/test/CodeGen/Hexagon/cmp_pred2.ll b/test/CodeGen/Hexagon/cmp_pred2.ll
new file mode 100644
index 000000000000..a20b9f09b6e0
--- /dev/null
+++ b/test/CodeGen/Hexagon/cmp_pred2.ll
@@ -0,0 +1,87 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+; Make sure that the assembler mapped compare instructions are correctly generated.
+
+@c = common global i32 0, align 4
+
+define i32 @test1(i32 %a, i32 %b) nounwind {
+; CHECK-NOT: cmp.ge
+; CHECK: cmp.gt
+entry:
+ %cmp = icmp slt i32 %a, 100
+ br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
+
+entry.if.end_crit_edge:
+ %.pre = load i32* @c, align 4
+ br label %if.end
+
+if.then:
+ %sub = add nsw i32 %a, -10
+ store i32 %sub, i32* @c, align 4
+ br label %if.end
+
+if.end:
+ %0 = phi i32 [ %.pre, %entry.if.end_crit_edge ], [ %sub, %if.then ]
+ ret i32 %0
+}
+
+define i32 @test2(i32 %a, i32 %b) nounwind {
+; CHECK-NOT: cmp.lt
+; CHECK: cmp.gt
+entry:
+ %cmp = icmp sge i32 %a, %b
+ br i1 %cmp, label %entry.if.end_crit_edge, label %if.then
+
+entry.if.end_crit_edge:
+ %.pre = load i32* @c, align 4
+ br label %if.end
+
+if.then:
+ %sub = add nsw i32 %a, -10
+ store i32 %sub, i32* @c, align 4
+ br label %if.end
+
+if.end:
+ %0 = phi i32 [ %.pre, %entry.if.end_crit_edge ], [ %sub, %if.then ]
+ ret i32 %0
+}
+
+define i32 @test4(i32 %a, i32 %b) nounwind {
+; CHECK-NOT: cmp.ltu
+; CHECK: cmp.gtu
+entry:
+ %cmp = icmp uge i32 %a, %b
+ br i1 %cmp, label %entry.if.end_crit_edge, label %if.then
+
+entry.if.end_crit_edge:
+ %.pre = load i32* @c, align 4
+ br label %if.end
+
+if.then:
+ %sub = add i32 %a, -10
+ store i32 %sub, i32* @c, align 4
+ br label %if.end
+
+if.end:
+ %0 = phi i32 [ %.pre, %entry.if.end_crit_edge ], [ %sub, %if.then ]
+ ret i32 %0
+}
+
+define i32 @test5(i32 %a, i32 %b) nounwind {
+; CHECK: cmp.gtu
+entry:
+ %cmp = icmp uge i32 %a, 29999
+ br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
+
+entry.if.end_crit_edge:
+ %.pre = load i32* @c, align 4
+ br label %if.end
+
+if.then:
+ %sub = add i32 %a, -10
+ store i32 %sub, i32* @c, align 4
+ br label %if.end
+
+if.end:
+ %0 = phi i32 [ %.pre, %entry.if.end_crit_edge ], [ %sub, %if.then ]
+ ret i32 %0
+}
diff --git a/test/CodeGen/Hexagon/cmpb_pred.ll b/test/CodeGen/Hexagon/cmpb_pred.ll
index 1e6144701fee..0960da1fa060 100644
--- a/test/CodeGen/Hexagon/cmpb_pred.ll
+++ b/test/CodeGen/Hexagon/cmpb_pred.ll
@@ -16,7 +16,7 @@ entry:
define i32 @Func_3b(i32) nounwind readonly {
entry:
; CHECK-NOT: mux
- %1 = load i8* @Enum_global, align 1, !tbaa !0
+ %1 = load i8* @Enum_global, align 1
%2 = trunc i32 %0 to i8
%cmp = icmp ne i8 %1, %2
%selv = zext i1 %cmp to i32
@@ -35,7 +35,7 @@ entry:
define i32 @Func_3d(i32) nounwind readonly {
entry:
; CHECK-NOT: mux
- %1 = load i8* @Enum_global, align 1, !tbaa !0
+ %1 = load i8* @Enum_global, align 1
%2 = trunc i32 %0 to i8
%cmp = icmp eq i8 %1, %2
%selv = zext i1 %cmp to i32
@@ -45,7 +45,7 @@ entry:
define i32 @Func_3e(i32) nounwind readonly {
entry:
; CHECK-NOT: mux
- %1 = load i8* @Enum_global, align 1, !tbaa !0
+ %1 = load i8* @Enum_global, align 1
%2 = trunc i32 %0 to i8
%cmp = icmp eq i8 %1, %2
%selv = zext i1 %cmp to i32
@@ -87,6 +87,3 @@ entry:
%selv = zext i1 %cmp to i32
ret i32 %selv
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/combine_ir.ll b/test/CodeGen/Hexagon/combine_ir.ll
index 921ce9928e6d..8b99ef715546 100644
--- a/test/CodeGen/Hexagon/combine_ir.ll
+++ b/test/CodeGen/Hexagon/combine_ir.ll
@@ -6,12 +6,7 @@ define void @word(i32* nocapture %a) nounwind {
entry:
%0 = load i32* %a, align 4, !tbaa !0
%1 = zext i32 %0 to i64
- %add.ptr = getelementptr inbounds i32* %a, i32 1
- %2 = load i32* %add.ptr, align 4, !tbaa !0
- %3 = zext i32 %2 to i64
- %4 = shl nuw i64 %3, 32
- %ins = or i64 %4, %1
- tail call void @bar(i64 %ins) nounwind
+ tail call void @bar(i64 %1) nounwind
ret void
}
diff --git a/test/CodeGen/Hexagon/hwloop-const.ll b/test/CodeGen/Hexagon/hwloop-const.ll
index a621c58c63ed..8204ddea3490 100644
--- a/test/CodeGen/Hexagon/hwloop-const.ll
+++ b/test/CodeGen/Hexagon/hwloop-const.ll
@@ -15,9 +15,9 @@ entry:
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds [25000 x i32]* @b, i32 0, i32 %i.02
- store i32 %i.02, i32* %arrayidx, align 4, !tbaa !0
+ store i32 %i.02, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds [25000 x i32]* @a, i32 0, i32 %i.02
- store i32 %i.02, i32* %arrayidx1, align 4, !tbaa !0
+ store i32 %i.02, i32* %arrayidx1, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, 25000
br i1 %exitcond, label %for.end, label %for.body
@@ -25,7 +25,3 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret i32 0
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/hwloop-dbg.ll b/test/CodeGen/Hexagon/hwloop-dbg.ll
index c2e8153b7dff..17fe7b982d8f 100644
--- a/test/CodeGen/Hexagon/hwloop-dbg.ll
+++ b/test/CodeGen/Hexagon/hwloop-dbg.ll
@@ -19,8 +19,8 @@ for.body: ; preds = %for.body, %entry
%b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i32* %b.addr.01, i32 1, !dbg !21
tail call void @llvm.dbg.value(metadata !{i32* %incdec.ptr}, i64 0, metadata !14), !dbg !21
- %0 = load i32* %b.addr.01, align 4, !dbg !21, !tbaa !23
- store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21, !tbaa !23
+ %0 = load i32* %b.addr.01, align 4, !dbg !21
+ store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21
%inc = add nsw i32 %i.02, 1, !dbg !26
tail call void @llvm.dbg.value(metadata !{i32 %inc}, i64 0, metadata !15), !dbg !26
%exitcond = icmp eq i32 %inc, 10, !dbg !19
@@ -57,8 +57,5 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!20 = metadata !{i32 786443, metadata !16, i32 3, i32 3, metadata !6, i32 1} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
!21 = metadata !{i32 4, i32 5, metadata !22, null}
!22 = metadata !{i32 786443, metadata !20, i32 3, i32 28, metadata !6, i32 2} ; [ DW_TAG_lexical_block ] [/usr2/kparzysz/s.hex/t/hwloop-dbg.c]
-!23 = metadata !{metadata !"int", metadata !24}
-!24 = metadata !{metadata !"omnipotent char", metadata !25}
-!25 = metadata !{metadata !"Simple C/C++ TBAA"}
!26 = metadata !{i32 3, i32 23, metadata !20, null}
!27 = metadata !{i32 6, i32 1, metadata !16, null}
diff --git a/test/CodeGen/Hexagon/memops2.ll b/test/CodeGen/Hexagon/memops2.ll
index b1b25445c029..d6d1a50bcefa 100644
--- a/test/CodeGen/Hexagon/memops2.ll
+++ b/test/CodeGen/Hexagon/memops2.ll
@@ -6,11 +6,11 @@ define void @f(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
%add.ptr = getelementptr inbounds i16* %p, i32 10
- %0 = load i16* %add.ptr, align 2, !tbaa !0
+ %0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%sub = add nsw i32 %conv2, 65535
%conv1 = trunc i32 %sub to i16
- store i16 %conv1, i16* %add.ptr, align 2, !tbaa !0
+ store i16 %conv1, i16* %add.ptr, align 2
ret void
}
@@ -19,14 +19,10 @@ entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
%add.ptr.sum = add i32 %i, 10
%add.ptr1 = getelementptr inbounds i16* %p, i32 %add.ptr.sum
- %0 = load i16* %add.ptr1, align 2, !tbaa !0
+ %0 = load i16* %add.ptr1, align 2
%conv3 = zext i16 %0 to i32
%sub = add nsw i32 %conv3, 65535
%conv2 = trunc i32 %sub to i16
- store i16 %conv2, i16* %add.ptr1, align 2, !tbaa !0
+ store i16 %conv2, i16* %add.ptr1, align 2
ret void
}
-
-!0 = metadata !{metadata !"short", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/memops3.ll b/test/CodeGen/Hexagon/memops3.ll
index 5b8bd6c87bfb..d9e4e8f53709 100644
--- a/test/CodeGen/Hexagon/memops3.ll
+++ b/test/CodeGen/Hexagon/memops3.ll
@@ -6,11 +6,11 @@ define void @f(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
%add.ptr = getelementptr inbounds i8* %p, i32 10
- %0 = load i8* %add.ptr, align 1, !tbaa !0
+ %0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%sub = add nsw i32 %conv, 255
%conv1 = trunc i32 %sub to i8
- store i8 %conv1, i8* %add.ptr, align 1, !tbaa !0
+ store i8 %conv1, i8* %add.ptr, align 1
ret void
}
@@ -19,13 +19,10 @@ entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
%add.ptr.sum = add i32 %i, 10
%add.ptr1 = getelementptr inbounds i8* %p, i32 %add.ptr.sum
- %0 = load i8* %add.ptr1, align 1, !tbaa !0
+ %0 = load i8* %add.ptr1, align 1
%conv = zext i8 %0 to i32
%sub = add nsw i32 %conv, 255
%conv2 = trunc i32 %sub to i8
- store i8 %conv2, i8* %add.ptr1, align 1, !tbaa !0
+ store i8 %conv2, i8* %add.ptr1, align 1
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/remove_lsr.ll b/test/CodeGen/Hexagon/remove_lsr.ll
index 79b5f4ae7c43..3128dbb8b21b 100644
--- a/test/CodeGen/Hexagon/remove_lsr.ll
+++ b/test/CodeGen/Hexagon/remove_lsr.ll
@@ -46,17 +46,17 @@ for.body: ; preds = %for.body, %entry
%1 = trunc i64 %val.021 to i32
%2 = trunc i64 %0 to i32
%3 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv3, i32 %1, i32 %2)
- store i32 %3, i32* %lsr.iv3335, align 4, !tbaa !0
+ store i32 %3, i32* %lsr.iv3335, align 4
%conv8 = sext i8 %predicate_1.023 to i32
%4 = lshr i64 %val.021, 32
%5 = trunc i64 %4 to i32
%6 = lshr i64 %0, 32
%7 = trunc i64 %6 to i32
%8 = tail call i32 @llvm.hexagon.C2.mux(i32 %conv8, i32 %5, i32 %7)
- store i32 %8, i32* %lsr.iv2931, align 4, !tbaa !0
+ store i32 %8, i32* %lsr.iv2931, align 4
%srcval = load i64* %lsr.iv27, align 8
- %9 = load i8* %lsr.iv40, align 1, !tbaa !1
- %10 = load i8* %lsr.iv37, align 1, !tbaa !1
+ %9 = load i8* %lsr.iv40, align 1
+ %10 = load i8* %lsr.iv37, align 1
%lftr.wideiv = trunc i32 %lsr.iv42 to i8
%exitcond = icmp eq i8 %lftr.wideiv, 32
%scevgep26 = getelementptr %union.vect64* %lsr.iv, i32 1
@@ -74,7 +74,3 @@ for.end: ; preds = %for.body
declare i64 @llvm.hexagon.A2.vsubhs(i64, i64) nounwind readnone
declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) nounwind readnone
-
-!0 = metadata !{metadata !"long", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/Hexagon/union-1.ll b/test/CodeGen/Hexagon/union-1.ll
new file mode 100644
index 000000000000..7c6da744ec51
--- /dev/null
+++ b/test/CodeGen/Hexagon/union-1.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; CHECK: word
+; CHECK-NOT: combine(#0
+; CHECK: jump bar
+
+define void @word(i32* nocapture %a) nounwind {
+entry:
+ %0 = load i32* %a, align 4, !tbaa !0
+ %1 = zext i32 %0 to i64
+ %add.ptr = getelementptr inbounds i32* %a, i32 1
+ %2 = load i32* %add.ptr, align 4, !tbaa !0
+ %3 = zext i32 %2 to i64
+ %4 = shl nuw i64 %3, 32
+ %ins = or i64 %4, %1
+ tail call void @bar(i64 %ins) nounwind
+ ret void
+}
+
+declare void @bar(i64)
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Mips/alloca.ll b/test/CodeGen/Mips/alloca.ll
index d79ea9193d28..fc7ef862a328 100644
--- a/test/CodeGen/Mips/alloca.ll
+++ b/test/CodeGen/Mips/alloca.ll
@@ -59,23 +59,23 @@ if.end: ; preds = %if.else, %if.then
; CHECK: lw $25, %call16(printf)
%.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ]
- %tmp7 = load i32* %0, align 4, !tbaa !0
+ %tmp7 = load i32* %0, align 4
%arrayidx9 = getelementptr inbounds i8* %tmp1, i32 4
%3 = bitcast i8* %arrayidx9 to i32*
- %tmp10 = load i32* %3, align 4, !tbaa !0
+ %tmp10 = load i32* %3, align 4
%arrayidx12 = getelementptr inbounds i8* %tmp1, i32 8
%4 = bitcast i8* %arrayidx12 to i32*
- %tmp13 = load i32* %4, align 4, !tbaa !0
- %tmp16 = load i32* %.pre-phi, align 4, !tbaa !0
+ %tmp13 = load i32* %4, align 4
+ %tmp16 = load i32* %.pre-phi, align 4
%arrayidx18 = getelementptr inbounds i8* %tmp1, i32 16
%5 = bitcast i8* %arrayidx18 to i32*
- %tmp19 = load i32* %5, align 4, !tbaa !0
+ %tmp19 = load i32* %5, align 4
%arrayidx21 = getelementptr inbounds i8* %tmp1, i32 20
%6 = bitcast i8* %arrayidx21 to i32*
- %tmp22 = load i32* %6, align 4, !tbaa !0
+ %tmp22 = load i32* %6, align 4
%arrayidx24 = getelementptr inbounds i8* %tmp1, i32 24
%7 = bitcast i8* %arrayidx24 to i32*
- %tmp25 = load i32* %7, align 4, !tbaa !0
+ %tmp25 = load i32* %7, align 4
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
ret i32 0
}
@@ -83,7 +83,3 @@ if.end: ; preds = %if.else, %if.then
declare void @foo3(i32*)
declare i32 @printf(i8* nocapture, ...) nounwind
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/Mips/divrem.ll b/test/CodeGen/Mips/divrem.ll
index 398d1b78bd43..c470d1ce2ce5 100644
--- a/test/CodeGen/Mips/divrem.ll
+++ b/test/CodeGen/Mips/divrem.ll
@@ -32,7 +32,7 @@ entry:
define i32 @sdivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
%rem = srem i32 %a0, %a1
- store i32 %rem, i32* %r, align 4, !tbaa !0
+ store i32 %rem, i32* %r, align 4
%div = sdiv i32 %a0, %a1
ret i32 %div
}
@@ -41,11 +41,7 @@ entry:
define i32 @udivrem1(i32 %a0, i32 %a1, i32* nocapture %r) nounwind {
entry:
%rem = urem i32 %a0, %a1
- store i32 %rem, i32* %r, align 4, !tbaa !0
+ store i32 %rem, i32* %r, align 4
%div = udiv i32 %a0, %a1
ret i32 %div
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll b/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll
new file mode 100644
index 000000000000..9f2f0661f997
--- /dev/null
+++ b/test/CodeGen/Mips/dsp-patterns-cmp-vselect.ll
@@ -0,0 +1,641 @@
+; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s
+
+; CHECK: select_v2q15_eq_:
+; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.ph ${{[0-9]+}}, $6, $7
+
+define { i32 } @select_v2q15_eq_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp eq <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2q15_lt_:
+; CHECK: cmp.lt.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, $6, $7
+
+define { i32 } @select_v2q15_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp slt <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2q15_le_:
+; CHECK: cmp.le.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, $6, $7
+
+define { i32 } @select_v2q15_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp sle <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2q15_ne_:
+; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.ph ${{[0-9]+}}, $7, $6
+
+define { i32 } @select_v2q15_ne_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp ne <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2q15_gt_:
+; CHECK: cmp.le.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, $7, $6
+
+define { i32 } @select_v2q15_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp sgt <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2q15_ge_:
+; CHECK: cmp.lt.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, $7, $6
+
+define { i32 } @select_v2q15_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp sge <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4ui8_eq_:
+; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.qb ${{[0-9]+}}, $6, $7
+
+define { i32 } @select_v4ui8_eq_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp eq <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4ui8_lt_:
+; CHECK: cmpu.lt.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, $6, $7
+
+define { i32 } @select_v4ui8_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp ult <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4ui8_le_:
+; CHECK: cmpu.le.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, $6, $7
+
+define { i32 } @select_v4ui8_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp ule <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4ui8_ne_:
+; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.qb ${{[0-9]+}}, $7, $6
+
+define { i32 } @select_v4ui8_ne_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp ne <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4ui8_gt_:
+; CHECK: cmpu.le.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, $7, $6
+
+define { i32 } @select_v4ui8_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp ugt <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4ui8_ge_:
+; CHECK: cmpu.lt.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, $7, $6
+
+define { i32 } @select_v4ui8_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp uge <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2ui16_lt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v2ui16_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp ult <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2ui16_le_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v2ui16_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp ule <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2ui16_gt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v2ui16_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp ugt <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v2ui16_ge_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v2ui16_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %2 = bitcast i32 %a2.coerce to <2 x i16>
+ %3 = bitcast i32 %a3.coerce to <2 x i16>
+ %cmp = icmp uge <2 x i16> %0, %1
+ %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4i8_lt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v4i8_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp slt <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4i8_le_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v4i8_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp sle <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4i8_gt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v4i8_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp sgt <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: select_v4i8_ge_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @select_v4i8_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %2 = bitcast i32 %a2.coerce to <4 x i8>
+ %3 = bitcast i32 %a3.coerce to <4 x i8>
+ %cmp = icmp sge <4 x i8> %0, %1
+ %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
+ %4 = bitcast <4 x i8> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2q15_eq_:
+; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v2q15_eq_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp eq <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2q15_lt_:
+; CHECK: cmp.lt.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v2q15_lt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp slt <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2q15_le_:
+; CHECK: cmp.le.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v2q15_le_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp sle <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2q15_ne_:
+; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v2q15_ne_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp ne <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2q15_gt_:
+; CHECK: cmp.le.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v2q15_gt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp sgt <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2q15_ge_:
+; CHECK: cmp.lt.ph $4, $5
+; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v2q15_ge_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp sge <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4ui8_eq_:
+; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v4ui8_eq_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp eq <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4ui8_lt_:
+; CHECK: cmpu.lt.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v4ui8_lt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp ult <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4ui8_le_:
+; CHECK: cmpu.le.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v4ui8_le_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp ule <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4ui8_ne_:
+; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v4ui8_ne_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp ne <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4ui8_gt_:
+; CHECK: cmpu.le.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v4ui8_gt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp ugt <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4ui8_ge_:
+; CHECK: cmpu.lt.qb $4, $5
+; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
+
+define { i32 } @compare_v4ui8_ge_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp uge <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2ui16_lt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v2ui16_lt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp ult <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2ui16_le_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v2ui16_le_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp ule <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2ui16_gt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v2ui16_gt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp ugt <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v2ui16_ge_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v2ui16_ge_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %1 = bitcast i32 %a1.coerce to <2 x i16>
+ %cmp = icmp uge <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp to <2 x i16>
+ %2 = bitcast <2 x i16> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4i8_lt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v4i8_lt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp slt <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4i8_le_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v4i8_le_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp sle <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4i8_gt_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v4i8_gt_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp sgt <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; CHECK: compare_v4i8_ge_:
+; CHECK-NOT: cmp
+; CHECK-NOT: pick
+
+define { i32 } @compare_v4i8_ge_(i32 %a0.coerce, i32 %a1.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %1 = bitcast i32 %a1.coerce to <4 x i8>
+ %cmp = icmp sge <4 x i8> %0, %1
+ %sext = sext <4 x i1> %cmp to <4 x i8>
+ %2 = bitcast <4 x i8> %sext to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
diff --git a/test/CodeGen/Mips/dsp-patterns.ll b/test/CodeGen/Mips/dsp-patterns.ll
index 0752f69c3e9e..eeb7140ca2cb 100644
--- a/test/CodeGen/Mips/dsp-patterns.ll
+++ b/test/CodeGen/Mips/dsp-patterns.ll
@@ -1,7 +1,8 @@
-; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s
+; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=R1
+; RUN: llc -march=mips -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
-; CHECK: test_lbux:
-; CHECK: lbux ${{[0-9]+}}
+; R1: test_lbux:
+; R1: lbux ${{[0-9]+}}
define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) {
entry:
@@ -10,8 +11,8 @@ entry:
ret i8 %0
}
-; CHECK: test_lhx:
-; CHECK: lhx ${{[0-9]+}}
+; R1: test_lhx:
+; R1: lhx ${{[0-9]+}}
define signext i16 @test_lhx(i16* nocapture %b, i32 %i) {
entry:
@@ -20,8 +21,8 @@ entry:
ret i16 %0
}
-; CHECK: test_lwx:
-; CHECK: lwx ${{[0-9]+}}
+; R1: test_lwx:
+; R1: lwx ${{[0-9]+}}
define i32 @test_lwx(i32* nocapture %b, i32 %i) {
entry:
@@ -29,3 +30,232 @@ entry:
%0 = load i32* %add.ptr, align 4
ret i32 %0
}
+
+; R1: test_add_v2q15_:
+; R1: addq.ph ${{[0-9]+}}
+
+define { i32 } @test_add_v2q15_(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <2 x i16>
+ %1 = bitcast i32 %b.coerce to <2 x i16>
+ %add = add <2 x i16> %0, %1
+ %2 = bitcast <2 x i16> %add to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: test_sub_v2q15_:
+; R1: subq.ph ${{[0-9]+}}
+
+define { i32 } @test_sub_v2q15_(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <2 x i16>
+ %1 = bitcast i32 %b.coerce to <2 x i16>
+ %sub = sub <2 x i16> %0, %1
+ %2 = bitcast <2 x i16> %sub to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R2: test_mul_v2q15_:
+; R2: mul.ph ${{[0-9]+}}
+
+; mul.ph is an R2 instruction. Check that multiply node gets expanded.
+; R1: test_mul_v2q15_:
+; R1: mul ${{[0-9]+}}
+; R1: mul ${{[0-9]+}}
+
+define { i32 } @test_mul_v2q15_(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <2 x i16>
+ %1 = bitcast i32 %b.coerce to <2 x i16>
+ %mul = mul <2 x i16> %0, %1
+ %2 = bitcast <2 x i16> %mul to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: test_add_v4i8_:
+; R1: addu.qb ${{[0-9]+}}
+
+define { i32 } @test_add_v4i8_(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <4 x i8>
+ %1 = bitcast i32 %b.coerce to <4 x i8>
+ %add = add <4 x i8> %0, %1
+ %2 = bitcast <4 x i8> %add to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: test_sub_v4i8_:
+; R1: subu.qb ${{[0-9]+}}
+
+define { i32 } @test_sub_v4i8_(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <4 x i8>
+ %1 = bitcast i32 %b.coerce to <4 x i8>
+ %sub = sub <4 x i8> %0, %1
+ %2 = bitcast <4 x i8> %sub to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; DSP-ASE doesn't have a v4i8 multiply instruction. Check that multiply node gets expanded.
+; R2: test_mul_v4i8_:
+; R2: mul ${{[0-9]+}}
+; R2: mul ${{[0-9]+}}
+; R2: mul ${{[0-9]+}}
+; R2: mul ${{[0-9]+}}
+
+define { i32 } @test_mul_v4i8_(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <4 x i8>
+ %1 = bitcast i32 %b.coerce to <4 x i8>
+ %mul = mul <4 x i8> %0, %1
+ %2 = bitcast <4 x i8> %mul to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: test_addsc:
+; R1: addsc ${{[0-9]+}}
+; R1: addwc ${{[0-9]+}}
+
+define i64 @test_addsc(i64 %a, i64 %b) {
+entry:
+ %add = add nsw i64 %b, %a
+ ret i64 %add
+}
+
+; R1: shift1_v2i16_shl_:
+; R1: shll.ph ${{[0-9]+}}, ${{[0-9]+}}, 15
+
+define { i32 } @shift1_v2i16_shl_(i32 %a0.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %shl = shl <2 x i16> %0, <i16 15, i16 15>
+ %1 = bitcast <2 x i16> %shl to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: shift1_v2i16_sra_:
+; R1: shra.ph ${{[0-9]+}}, ${{[0-9]+}}, 15
+
+define { i32 } @shift1_v2i16_sra_(i32 %a0.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %shr = ashr <2 x i16> %0, <i16 15, i16 15>
+ %1 = bitcast <2 x i16> %shr to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: shift1_v2ui16_srl_:
+; R1-NOT: shrl.ph
+; R2: shift1_v2ui16_srl_:
+; R2: shrl.ph ${{[0-9]+}}, ${{[0-9]+}}, 15
+
+define { i32 } @shift1_v2ui16_srl_(i32 %a0.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <2 x i16>
+ %shr = lshr <2 x i16> %0, <i16 15, i16 15>
+ %1 = bitcast <2 x i16> %shr to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: shift1_v4i8_shl_:
+; R1: shll.qb ${{[0-9]+}}, ${{[0-9]+}}, 7
+
+define { i32 } @shift1_v4i8_shl_(i32 %a0.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %shl = shl <4 x i8> %0, <i8 7, i8 7, i8 7, i8 7>
+ %1 = bitcast <4 x i8> %shl to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: shift1_v4i8_sra_:
+; R1-NOT: shra.qb
+; R2: shift1_v4i8_sra_:
+; R2: shra.qb ${{[0-9]+}}, ${{[0-9]+}}, 7
+
+define { i32 } @shift1_v4i8_sra_(i32 %a0.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %shr = ashr <4 x i8> %0, <i8 7, i8 7, i8 7, i8 7>
+ %1 = bitcast <4 x i8> %shr to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; R1: shift1_v4ui8_srl_:
+; R1: shrl.qb ${{[0-9]+}}, ${{[0-9]+}}, 7
+
+define { i32 } @shift1_v4ui8_srl_(i32 %a0.coerce) {
+entry:
+ %0 = bitcast i32 %a0.coerce to <4 x i8>
+ %shr = lshr <4 x i8> %0, <i8 7, i8 7, i8 7, i8 7>
+ %1 = bitcast <4 x i8> %shr to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; Check that shift node is expanded if splat element size is not 16-bit.
+;
+; R1: test_vector_splat_imm_v2q15:
+; R1-NOT: shll.ph
+
+define { i32 } @test_vector_splat_imm_v2q15(i32 %a.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <2 x i16>
+ %shl = shl <2 x i16> %0, <i16 0, i16 2>
+ %1 = bitcast <2 x i16> %shl to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; Check that shift node is expanded if splat element size is not 8-bit.
+;
+; R1: test_vector_splat_imm_v4i8:
+; R1-NOT: shll.qb
+
+define { i32 } @test_vector_splat_imm_v4i8(i32 %a.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <4 x i8>
+ %shl = shl <4 x i8> %0, <i8 0, i8 2, i8 0, i8 2>
+ %1 = bitcast <4 x i8> %shl to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; Check that shift node is expanded if shift amount doesn't fit in 4-bit sa field.
+;
+; R1: test_shift_amount_v2q15:
+; R1-NOT: shll.ph
+
+define { i32 } @test_shift_amount_v2q15(i32 %a.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <2 x i16>
+ %shl = shl <2 x i16> %0, <i16 16, i16 16>
+ %1 = bitcast <2 x i16> %shl to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
+
+; Check that shift node is expanded if shift amount doesn't fit in 3-bit sa field.
+;
+; R1: test_shift_amount_v4i8:
+; R1-NOT: shll.qb
+
+define { i32 } @test_shift_amount_v4i8(i32 %a.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <4 x i8>
+ %shl = shl <4 x i8> %0, <i8 8, i8 8, i8 8, i8 8>
+ %1 = bitcast <4 x i8> %shl to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %1, 0
+ ret { i32 } %.fca.0.insert
+}
diff --git a/test/CodeGen/Mips/dsp-r1.ll b/test/CodeGen/Mips/dsp-r1.ll
index c9dc8cfd0be0..acdd17d1afd4 100644
--- a/test/CodeGen/Mips/dsp-r1.ll
+++ b/test/CodeGen/Mips/dsp-r1.ll
@@ -772,6 +772,7 @@ entry:
%0 = bitcast i32 %a0.coerce to <4 x i8>
%1 = bitcast i32 %a1.coerce to <4 x i8>
+ tail call void @llvm.mips.wrdsp(i32 %i0, i32 16)
%2 = tail call <4 x i8> @llvm.mips.pick.qb(<4 x i8> %0, <4 x i8> %1)
%3 = bitcast <4 x i8> %2 to i32
%.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0
@@ -786,6 +787,7 @@ entry:
%0 = bitcast i32 %a0.coerce to <2 x i16>
%1 = bitcast i32 %a1.coerce to <2 x i16>
+ tail call void @llvm.mips.wrdsp(i32 %i0, i32 16)
%2 = tail call <2 x i16> @llvm.mips.pick.ph(<2 x i16> %0, <2 x i16> %1)
%3 = bitcast <2 x i16> %2 to i32
%.fca.0.insert = insertvalue { i32 } undef, i32 %3, 0
@@ -808,14 +810,6 @@ entry:
declare <2 x i16> @llvm.mips.packrl.ph(<2 x i16>, <2 x i16>) nounwind readnone
-define i32 @test__builtin_mips_rddsp1(i32 %i0) nounwind readonly {
-entry:
-; CHECK: rddsp ${{[0-9]+}}
-
- %0 = tail call i32 @llvm.mips.rddsp(i32 31)
- ret i32 %0
-}
-
define { i32 } @test__builtin_mips_shll_qb1(i32 %i0, i32 %a0.coerce) nounwind {
entry:
; CHECK: shll.qb
@@ -1232,6 +1226,7 @@ declare i32 @llvm.mips.lwx(i8*, i32) nounwind readonly
define i32 @test__builtin_mips_wrdsp1(i32 %i0, i32 %a0) nounwind {
entry:
; CHECK: wrdsp ${{[0-9]+}}
+; CHECK: rddsp ${{[0-9]+}}
tail call void @llvm.mips.wrdsp(i32 %a0, i32 31)
%0 = tail call i32 @llvm.mips.rddsp(i32 31)
diff --git a/test/CodeGen/Mips/eh.ll b/test/CodeGen/Mips/eh.ll
index d14150a68a56..fc9e2ef21a8b 100644
--- a/test/CodeGen/Mips/eh.ll
+++ b/test/CodeGen/Mips/eh.ll
@@ -18,7 +18,7 @@ entry:
%exception = tail call i8* @__cxa_allocate_exception(i32 8) nounwind
%0 = bitcast i8* %exception to double*
- store double 3.200000e+00, double* %0, align 8, !tbaa !0
+ store double 3.200000e+00, double* %0, align 8
invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTId to i8*), i8* null) noreturn
to label %unreachable unwind label %lpad
@@ -39,7 +39,7 @@ catch: ; preds = %lpad
%4 = bitcast i8* %3 to double*
%exn.scalar = load double* %4, align 8
%add = fadd double %exn.scalar, %i2
- store double %add, double* @g1, align 8, !tbaa !0
+ store double %add, double* @g1, align 8
tail call void @__cxa_end_catch() nounwind
ret void
@@ -61,7 +61,3 @@ declare void @__cxa_throw(i8*, i8*, i8*)
declare i8* @__cxa_begin_catch(i8*)
declare void @__cxa_end_catch()
-
-!0 = metadata !{metadata !"double", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/Mips/fpneeded.ll b/test/CodeGen/Mips/fpneeded.ll
new file mode 100644
index 000000000000..623883a0d5c0
--- /dev/null
+++ b/test/CodeGen/Mips/fpneeded.ll
@@ -0,0 +1,149 @@
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-os16 | FileCheck %s -check-prefix=32
+
+@x = global float 1.000000e+00, align 4
+@y = global float 2.000000e+00, align 4
+@zz = common global float 0.000000e+00, align 4
+@z = common global float 0.000000e+00, align 4
+
+define float @fv() #0 {
+entry:
+ ret float 1.000000e+00
+}
+
+; 32: .set nomips16 # @fv
+; 32: .ent fv
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end fv
+
+define double @dv() #0 {
+entry:
+ ret double 2.000000e+00
+}
+
+; 32: .set nomips16 # @dv
+; 32: .ent dv
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end dv
+
+define void @vf(float %x) #0 {
+entry:
+ %x.addr = alloca float, align 4
+ store float %x, float* %x.addr, align 4
+ ret void
+}
+
+; 32: .set nomips16 # @vf
+; 32: .ent vf
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end vf
+
+define void @vd(double %x) #0 {
+entry:
+ %x.addr = alloca double, align 8
+ store double %x, double* %x.addr, align 8
+ ret void
+}
+
+; 32: .set nomips16 # @vd
+; 32: .ent vd
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end vd
+
+define void @foo1() #0 {
+entry:
+ store float 1.000000e+00, float* @zz, align 4
+ %0 = load float* @y, align 4
+ %1 = load float* @x, align 4
+ %add = fadd float %0, %1
+ store float %add, float* @z, align 4
+ ret void
+}
+
+; 32: .set nomips16 # @foo1
+; 32: .ent foo1
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end foo1
+
+define void @foo2() #0 {
+entry:
+ %0 = load float* @x, align 4
+ call void @vf(float %0)
+ ret void
+}
+
+
+; 32: .set nomips16 # @foo2
+; 32: .ent foo2
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end foo2
+
+define void @foo3() #0 {
+entry:
+ %call = call float @fv()
+ store float %call, float* @x, align 4
+ ret void
+}
+
+; 32: .set nomips16 # @foo3
+; 32: .ent foo3
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end foo3
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+define void @vv() #0 {
+entry:
+ ret void
+}
+
+; 32: .set mips16 # @vv
+; 32: .ent vv
+
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end vv
+
+
+
diff --git a/test/CodeGen/Mips/fpnotneeded.ll b/test/CodeGen/Mips/fpnotneeded.ll
new file mode 100644
index 000000000000..dc2ec10817f3
--- /dev/null
+++ b/test/CodeGen/Mips/fpnotneeded.ll
@@ -0,0 +1,77 @@
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-os16 | FileCheck %s -check-prefix=32
+
+@i = global i32 1, align 4
+@f = global float 1.000000e+00, align 4
+
+define void @vv() #0 {
+entry:
+ ret void
+}
+
+; 32: .set mips16 # @vv
+; 32: .ent vv
+
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end vv
+
+define i32 @iv() #0 {
+entry:
+ %0 = load i32* @i, align 4
+ ret i32 %0
+}
+
+; 32: .set mips16 # @iv
+; 32: .ent iv
+
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end iv
+
+define void @vif(i32 %i, float %f) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ %f.addr = alloca float, align 4
+ store i32 %i, i32* %i.addr, align 4
+ store float %f, float* %f.addr, align 4
+ ret void
+}
+
+; 32: .set mips16 # @vif
+; 32: .ent vif
+
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end vif
+
+define void @foo() #0 {
+entry:
+ store float 2.000000e+00, float* @f, align 4
+ ret void
+}
+
+; 32: .set mips16 # @foo
+; 32: .ent foo
+
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end foo
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+
+define float @fv() #0 {
+entry:
+ ret float 1.000000e+00
+}
+
+; 32: .set nomips16 # @fv
+; 32: .ent fv
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end fv
diff --git a/test/CodeGen/Mips/inlineasmmemop.ll b/test/CodeGen/Mips/inlineasmmemop.ll
index 1c7c4437b892..a08a0243b8b9 100644
--- a/test/CodeGen/Mips/inlineasmmemop.ll
+++ b/test/CodeGen/Mips/inlineasmmemop.ll
@@ -1,5 +1,6 @@
; RUN: llc -march=mipsel < %s | FileCheck %s
+; Simple memory
@g1 = external global i32
define i32 @f1(i32 %x) nounwind {
@@ -21,3 +22,42 @@ entry:
ret i32 %0
}
+; "D": Second word of double word. This works for any memory element
+; double or single.
+; CHECK: #APP
+; CHECK-NEXT: lw ${{[0-9]+}},4(${{[0-9]+}});
+; CHECK-NEXT: #NO_APP
+
+; No "D": First word of double word. This works for any memory element
+; double or single.
+; CHECK: #APP
+; CHECK-NEXT: lw ${{[0-9]+}},0(${{[0-9]+}});
+; CHECK-NEXT: #NO_APP
+
+;int b[8] = {0,1,2,3,4,5,6,7};
+;int main()
+;{
+; int i;
+;
+; // The first word. Notice, no 'D'
+; { asm (
+; "lw %0,%1;\n"
+; : "=r" (i) : "m" (*(b+4)));}
+;
+; // The second word
+; { asm (
+; "lw %0,%D1;\n"
+; : "=r" (i) "m" (*(b+4)));}
+;}
+
+@b = common global [20 x i32] zeroinitializer, align 4
+
+define void @main() {
+entry:
+ tail call void asm sideeffect " lw $0,${1:D};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32]* @b, i32 0, i32 3))
+ tail call void asm sideeffect " lw $0,${1};", "r,*m,~{$11}"(i32 undef, i32* getelementptr inbounds ([20 x i32]* @b, i32 0, i32 3))
+ ret void
+}
+
+attributes #0 = { nounwind }
+
diff --git a/test/CodeGen/Mips/mips16_32_1.ll b/test/CodeGen/Mips/mips16_32_1.ll
new file mode 100644
index 000000000000..6f4826ea9600
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_1.ll
@@ -0,0 +1,14 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=pic -O3 < %s -mips-mixed-16-32 | FileCheck %s
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=pic -O3 < %s -mips-mixed-16-32 | FileCheck %s
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; CHECK: .set mips16 # @foo
+; CHECK: .ent foo
+; CHECK: save {{.+}}
+; CHECK: restore {{.+}}
+; CHECK: .end foo
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_10.ll b/test/CodeGen/Mips/mips16_32_10.ll
new file mode 100644
index 000000000000..330dbfec63b9
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_10.ll
@@ -0,0 +1,59 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=16
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+; 16: .set nomips16 # @foo
+; 16: .ent foo
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: nop
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end foo
+
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 16: .set mips16 # @nofoo
+; 16: .ent nofoo
+
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end nofoo
+
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 16: .set nomips16 # @main
+; 16: .ent main
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end main
+
+
+
+
+
+
+
+
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_3.ll b/test/CodeGen/Mips/mips16_32_3.ll
new file mode 100644
index 000000000000..8874a8872534
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_3.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=16
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; 16: .set mips16 # @foo
+; 16: .ent foo
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end foo
+; 32: .set mips16 # @foo
+; 32: .ent foo
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end foo
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 16: .set nomips16 # @nofoo
+; 16: .ent nofoo
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: nop
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end nofoo
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 16: .set mips16 # @main
+; 16: .ent main
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end main
+; 32: .set nomips16 # @main
+; 32: .ent main
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: addiu $2, $zero, 0
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end main
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_4.ll b/test/CodeGen/Mips/mips16_32_4.ll
new file mode 100644
index 000000000000..cdaed6c71be0
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_4.ll
@@ -0,0 +1,65 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=16
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; 16: .set mips16 # @foo
+; 16: .ent foo
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end foo
+; 32: .set mips16 # @foo
+; 32: .ent foo
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end foo
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 16: .set nomips16 # @nofoo
+; 16: .ent nofoo
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: nop
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end nofoo
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 16: .set mips16 # @main
+; 16: .ent main
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end main
+; 32: .set mips16 # @main
+; 32: .ent main
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end main
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_5.ll b/test/CodeGen/Mips/mips16_32_5.ll
new file mode 100644
index 000000000000..45e0bf49ddd2
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_5.ll
@@ -0,0 +1,80 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=16
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; 16: .set mips16 # @foo
+; 16: .ent foo
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end foo
+; 32: .set mips16 # @foo
+; 32: .ent foo
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end foo
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 16: .set nomips16 # @nofoo
+; 16: .ent nofoo
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: nop
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end nofoo
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 16: .set nomips16 # @main
+; 16: .ent main
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: addiu $2, $zero, 0
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end main
+
+; 32: .set nomips16 # @main
+; 32: .ent main
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: addiu $2, $zero, 0
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end main
+
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_6.ll b/test/CodeGen/Mips/mips16_32_6.ll
new file mode 100644
index 000000000000..f4b8e7a91adc
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_6.ll
@@ -0,0 +1,86 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=16
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; 16: .set mips16 # @foo
+; 16: .ent foo
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end foo
+; 32: .set nomips16 # @foo
+; 32: .ent foo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end foo
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 16: .set nomips16 # @nofoo
+; 16: .ent nofoo
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: nop
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end nofoo
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 16: .set nomips16 # @main
+; 16: .ent main
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: addiu $2, $zero, 0
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end main
+
+; 32: .set nomips16 # @main
+; 32: .ent main
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: addiu $2, $zero, 0
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end main
+
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "nomips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_7.ll b/test/CodeGen/Mips/mips16_32_7.ll
new file mode 100644
index 000000000000..f8726eadc70c
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_7.ll
@@ -0,0 +1,76 @@
+; RUN: llc -march=mipsel -mcpu=mips16 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=16
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; 16: .set mips16 # @foo
+; 16: .ent foo
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end foo
+; 32: .set nomips16 # @foo
+; 32: .ent foo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end foo
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 16: .set nomips16 # @nofoo
+; 16: .ent nofoo
+; 16: .set noreorder
+; 16: .set nomacro
+; 16: .set noat
+; 16: jr $ra
+; 16: nop
+; 16: .set at
+; 16: .set macro
+; 16: .set reorder
+; 16: .end nofoo
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 16: .set mips16 # @main
+; 16: .ent main
+; 16: save {{.+}}
+; 16: restore {{.+}}
+; 16: .end main
+
+; 32: .set mips16 # @main
+; 32: .ent main
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end main
+
+
+
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_8.ll b/test/CodeGen/Mips/mips16_32_8.ll
new file mode 100644
index 000000000000..e51f296f9df3
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_8.ll
@@ -0,0 +1,74 @@
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+@x = global float 1.000000e+00, align 4
+@y = global float 0x4007333340000000, align 4
+@i = common global i32 0, align 4
+@f = common global float 0.000000e+00, align 4
+@.str = private unnamed_addr constant [8 x i8] c"f = %f\0A\00", align 1
+@.str1 = private unnamed_addr constant [11 x i8] c"hello %i \0A\00", align 1
+@.str2 = private unnamed_addr constant [13 x i8] c"goodbye %i \0A\00", align 1
+
+define void @foo() #0 {
+entry:
+ store i32 10, i32* @i, align 4
+ ret void
+}
+
+; 32: .set mips16 # @foo
+; 32: .ent foo
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end foo
+
+define void @nofoo() #1 {
+entry:
+ store i32 20, i32* @i, align 4
+ %0 = load float* @x, align 4
+ %1 = load float* @y, align 4
+ %add = fadd float %0, %1
+ store float %add, float* @f, align 4
+ %2 = load float* @f, align 4
+ %conv = fpext float %2 to double
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), double %conv)
+ ret void
+}
+
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: add.s {{.+}}
+; 32: mfc1 {{.+}}
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+declare i32 @printf(i8*, ...) #2
+
+define i32 @main() #3 {
+entry:
+ call void @foo()
+ %0 = load i32* @i, align 4
+ %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str1, i32 0, i32 0), i32 %0)
+ call void @nofoo()
+ %1 = load i32* @i, align 4
+ %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str2, i32 0, i32 0), i32 %1)
+ ret i32 0
+}
+
+; 32: .set nomips16 # @main
+; 32: .ent main
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end main
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "nomips16" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/mips16_32_9.ll b/test/CodeGen/Mips/mips16_32_9.ll
new file mode 100644
index 000000000000..f5ff36849015
--- /dev/null
+++ b/test/CodeGen/Mips/mips16_32_9.ll
@@ -0,0 +1,51 @@
+; RUN: llc -march=mipsel -mcpu=mips32 -relocation-model=static -O3 < %s -mips-mixed-16-32 | FileCheck %s -check-prefix=32
+
+define void @foo() #0 {
+entry:
+ ret void
+}
+
+; 32: .set mips16 # @foo
+; 32: .ent foo
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end foo
+define void @nofoo() #1 {
+entry:
+ ret void
+}
+
+; 32: .set nomips16 # @nofoo
+; 32: .ent nofoo
+; 32: .set noreorder
+; 32: .set nomacro
+; 32: .set noat
+; 32: jr $ra
+; 32: nop
+; 32: .set at
+; 32: .set macro
+; 32: .set reorder
+; 32: .end nofoo
+define i32 @main() #2 {
+entry:
+ ret i32 0
+}
+
+; 32: .set mips16 # @main
+; 32: .ent main
+; 32: save {{.+}}
+; 32: restore {{.+}}
+; 32: .end main
+
+
+
+
+
+
+
+
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind "less-precise-fpmad"="false" "mips16" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/select.ll b/test/CodeGen/Mips/select.ll
index 40115befc45d..06e2a86ad176 100644
--- a/test/CodeGen/Mips/select.ll
+++ b/test/CodeGen/Mips/select.ll
@@ -130,8 +130,8 @@ define i32 @sel12(i32 %f0, i32 %f1) nounwind readonly {
entry:
; CHECK: c.eq.d
; CHECK: movt
- %tmp = load double* @d2, align 8, !tbaa !0
- %tmp1 = load double* @d3, align 8, !tbaa !0
+ %tmp = load double* @d2, align 8
+ %tmp1 = load double* @d3, align 8
%cmp = fcmp oeq double %tmp, %tmp1
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
@@ -141,8 +141,8 @@ define i32 @sel13(i32 %f0, i32 %f1) nounwind readonly {
entry:
; CHECK: c.olt.d
; CHECK: movt
- %tmp = load double* @d2, align 8, !tbaa !0
- %tmp1 = load double* @d3, align 8, !tbaa !0
+ %tmp = load double* @d2, align 8
+ %tmp1 = load double* @d3, align 8
%cmp = fcmp olt double %tmp, %tmp1
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
@@ -152,13 +152,9 @@ define i32 @sel14(i32 %f0, i32 %f1) nounwind readonly {
entry:
; CHECK: c.ule.d
; CHECK: movf
- %tmp = load double* @d2, align 8, !tbaa !0
- %tmp1 = load double* @d3, align 8, !tbaa !0
+ %tmp = load double* @d2, align 8
+ %tmp1 = load double* @d3, align 8
%cmp = fcmp ogt double %tmp, %tmp1
%cond = select i1 %cmp, i32 %f0, i32 %f1
ret i32 %cond
}
-
-!0 = metadata !{metadata !"double", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/Mips/spill-copy-acreg.ll b/test/CodeGen/Mips/spill-copy-acreg.ll
new file mode 100644
index 000000000000..6563a5cffd91
--- /dev/null
+++ b/test/CodeGen/Mips/spill-copy-acreg.ll
@@ -0,0 +1,41 @@
+; RUN: llc -march=mipsel -mattr=+dsp < %s
+
+@g1 = common global i64 0, align 8
+@g2 = common global i64 0, align 8
+@g3 = common global i64 0, align 8
+
+define i64 @test_acreg_copy(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
+entry:
+ %0 = load i64* @g1, align 8
+ %1 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a0, i32 %a1)
+ %2 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a2, i32 %a3)
+ store i64 %1, i64* @g1, align 8
+ store i64 %2, i64* @g2, align 8
+ tail call void @foo1()
+ store i64 %2, i64* @g3, align 8
+ ret i64 %1
+}
+
+declare i64 @llvm.mips.maddu(i64, i32, i32)
+
+declare void @foo1()
+
+@g4 = common global <2 x i16> zeroinitializer, align 4
+@g5 = common global <2 x i16> zeroinitializer, align 4
+@g6 = common global <2 x i16> zeroinitializer, align 4
+
+define { i32 } @test_ccond_spill(i32 %a.coerce, i32 %b.coerce) {
+entry:
+ %0 = bitcast i32 %a.coerce to <2 x i16>
+ %1 = bitcast i32 %b.coerce to <2 x i16>
+ %cmp3 = icmp slt <2 x i16> %0, %1
+ %sext = sext <2 x i1> %cmp3 to <2 x i16>
+ store <2 x i16> %sext, <2 x i16>* @g4, align 4
+ tail call void @foo1()
+ %2 = load <2 x i16>* @g5, align 4
+ %3 = load <2 x i16>* @g6, align 4
+ %or = select <2 x i1> %cmp3, <2 x i16> %2, <2 x i16> %3
+ %4 = bitcast <2 x i16> %or to i32
+ %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
+ ret { i32 } %.fca.0.insert
+}
diff --git a/test/CodeGen/Mips/tnaked.ll b/test/CodeGen/Mips/tnaked.ll
new file mode 100644
index 000000000000..f5bdd915b28c
--- /dev/null
+++ b/test/CodeGen/Mips/tnaked.ll
@@ -0,0 +1,29 @@
+; RUN: llc -march=mipsel < %s | FileCheck %s
+
+
+define void @tnaked() #0 {
+entry:
+ ret void
+}
+
+; CHECK: .ent tnaked
+; CHECK: tnaked:
+; CHECK-NOT: .frame {{.*}}
+; CHECK-NOT: .mask {{.*}}
+; CHECK-NOT: .fmask {{.*}}
+; CHECK-NOT: addiu $sp, $sp, -8
+
+define void @tnonaked() #1 {
+entry:
+ ret void
+}
+
+; CHECK: .ent tnonaked
+; CHECK: tnonaked:
+; CHECK: .frame $fp,8,$ra
+; CHECK: .mask 0x40000000,-4
+; CHECK: .fmask 0x00000000,0
+; CHECK: addiu $sp, $sp, -8
+
+attributes #0 = { naked noinline nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Mips/zeroreg.ll b/test/CodeGen/Mips/zeroreg.ll
index 79ed6091f887..e0e93e2e7682 100644
--- a/test/CodeGen/Mips/zeroreg.ll
+++ b/test/CodeGen/Mips/zeroreg.ll
@@ -6,7 +6,7 @@ define i32 @foo0(i32 %s) nounwind readonly {
entry:
; CHECK: movn ${{[0-9]+}}, $zero
%tobool = icmp ne i32 %s, 0
- %0 = load i32* @g1, align 4, !tbaa !0
+ %0 = load i32* @g1, align 4
%cond = select i1 %tobool, i32 0, i32 %0
ret i32 %cond
}
@@ -15,11 +15,7 @@ define i32 @foo1(i32 %s) nounwind readonly {
entry:
; CHECK: movz ${{[0-9]+}}, $zero
%tobool = icmp ne i32 %s, 0
- %0 = load i32* @g1, align 4, !tbaa !0
+ %0 = load i32* @g1, align 4
%cond = select i1 %tobool, i32 %0, i32 0
ret i32 %cond
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/NVPTX/generic-to-nvvm.ll b/test/CodeGen/NVPTX/generic-to-nvvm.ll
new file mode 100644
index 000000000000..c9cb2f71f425
--- /dev/null
+++ b/test/CodeGen/NVPTX/generic-to-nvvm.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -drvcuda | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+
+; Ensure global variables in address space 0 are promoted to address space 1
+
+; CHECK: .global .align 4 .u32 myglobal = 42;
+@myglobal = internal global i32 42, align 4
+; CHECK: .global .align 4 .u32 myconst = 42;
+@myconst = internal constant i32 42, align 4
+
+
+define void @foo(i32* %a, i32* %b) {
+; CHECK: cvta.global.u32
+ %ld1 = load i32* @myglobal
+; CHECK: cvta.global.u32
+ %ld2 = load i32* @myconst
+ store i32 %ld1, i32* %a
+ store i32 %ld2, i32* %b
+ ret void
+}
+
+
+!nvvm.annotations = !{!0}
+!0 = metadata !{void (i32*, i32*)* @foo, metadata !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/i1-global.ll b/test/CodeGen/NVPTX/i1-global.ll
new file mode 100644
index 000000000000..0595325977e1
--- /dev/null
+++ b/test/CodeGen/NVPTX/i1-global.ll
@@ -0,0 +1,19 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -drvcuda | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+
+
+; CHECK: .visible .global .align 1 .u8 mypred
+@mypred = addrspace(1) global i1 true, align 1
+
+
+define void @foo(i1 %p, i32* %out) {
+ %ld = load i1 addrspace(1)* @mypred
+ %val = zext i1 %ld to i32
+ store i32 %val, i32* %out
+ ret void
+}
+
+
+!nvvm.annotations = !{!0}
+!0 = metadata !{void (i1, i32*)* @foo, metadata !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/i1-param.ll b/test/CodeGen/NVPTX/i1-param.ll
new file mode 100644
index 000000000000..fabd61a25d2f
--- /dev/null
+++ b/test/CodeGen/NVPTX/i1-param.ll
@@ -0,0 +1,18 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -drvcuda | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+
+; Make sure predicate (i1) operands to kernels get expanded out to .u8
+
+; CHECK: .entry foo
+; CHECK: .param .u8 foo_param_0
+; CHECK: .param .u32 foo_param_1
+define void @foo(i1 %p, i32* %out) {
+ %val = zext i1 %p to i32
+ store i32 %val, i32* %out
+ ret void
+}
+
+
+!nvvm.annotations = !{!0}
+!0 = metadata !{void (i1, i32*)* @foo, metadata !"kernel", i32 1}
diff --git a/test/CodeGen/NVPTX/intrinsics.ll b/test/CodeGen/NVPTX/intrinsics.ll
index 8b0357be87cb..1676f20643d2 100644
--- a/test/CodeGen/NVPTX/intrinsics.ll
+++ b/test/CodeGen/NVPTX/intrinsics.ll
@@ -15,5 +15,12 @@ define ptx_device double @test_fabs(double %d) {
ret double %x
}
+define float @test_nvvm_sqrt(float %a) {
+ %val = call float @llvm.nvvm.sqrt.f(float %a)
+ ret float %val
+}
+
+
declare float @llvm.fabs.f32(float)
declare double @llvm.fabs.f64(double)
+declare float @llvm.nvvm.sqrt.f(float)
diff --git a/test/CodeGen/NVPTX/refl1.ll b/test/CodeGen/NVPTX/refl1.ll
new file mode 100644
index 000000000000..5a9dac152e41
--- /dev/null
+++ b/test/CodeGen/NVPTX/refl1.ll
@@ -0,0 +1,37 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 -drvcuda | FileCheck %s
+
+; Function Attrs: nounwind
+; CHECK: .entry foo
+define void @foo(float* nocapture %a) #0 {
+ %val = load float* %a
+ %tan = tail call fastcc float @__nv_fast_tanf(float %val)
+ store float %tan, float* %a
+ ret void
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.nvvm.sin.approx.ftz.f(float) #1
+
+; Function Attrs: nounwind readnone
+declare float @llvm.nvvm.cos.approx.ftz.f(float) #1
+
+; Function Attrs: nounwind readnone
+declare float @llvm.nvvm.div.approx.ftz.f(float, float) #1
+
+; Function Attrs: alwaysinline inlinehint nounwind readnone
+; CHECK: .func (.param .b32 func_retval0) __nv_fast_tanf
+define internal fastcc float @__nv_fast_tanf(float %a) #2 {
+entry:
+ %0 = tail call float @llvm.nvvm.sin.approx.ftz.f(float %a)
+ %1 = tail call float @llvm.nvvm.cos.approx.ftz.f(float %a)
+ %2 = tail call float @llvm.nvvm.div.approx.ftz.f(float %0, float %1)
+ ret float %2
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { alwaysinline inlinehint nounwind readnone }
+
+!nvvm.annotations = !{!0}
+
+!0 = metadata !{void (float*)* @foo, metadata !"kernel", i32 1}
diff --git a/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll b/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
index ea7de9847ea7..40f46fda468d 100644
--- a/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
+++ b/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=ppc64 | grep lwzx
+; RUN: llc < %s -march=ppc64 | FileCheck %s
%struct.__db_region = type { %struct.__mutex_t, [4 x i8], %struct.anon, i32, [1 x i32] }
%struct.__mutex_t = type { i32 }
@@ -11,6 +11,10 @@ entry:
%tmp = load i32* %ttype, align 4 ; <i32> [#uses=1]
%tmp1 = call i32 (...)* @bork( i32 %tmp ) ; <i32> [#uses=0]
ret void
+
+; CHECK: @foo
+; CHECK: lwzx
+; CHECK: blr
}
declare i32 @bork(...)
diff --git a/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll b/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
index 47d985c5f755..3acd01dcb273 100644
--- a/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
+++ b/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
@@ -47,11 +47,11 @@ for.body4.us: ; preds = %for.body4.lr.ph.us,
%sext = shl i64 %sub5.us, 32
%idxprom.us = ashr exact i64 %sext, 32
%arrayidx.us = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us
- %2 = load float* %arrayidx.us, align 4, !tbaa !5
+ %2 = load float* %arrayidx.us, align 4
%arrayidx7.us = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv
- %3 = load float* %arrayidx7.us, align 4, !tbaa !5
+ %3 = load float* %arrayidx7.us, align 4
%add8.us = fadd float %3, %2
- store float %add8.us, float* %arrayidx7.us, align 4, !tbaa !5
+ store float %add8.us, float* %arrayidx7.us, align 4
%indvars.iv.next = add i64 %indvars.iv, %1
%4 = trunc i64 %indvars.iv.next to i32
%cmp3.us = icmp slt i32 %4, 32000
@@ -82,11 +82,11 @@ for.body4.us.1: ; preds = %for.body4.us.1, %fo
%sext23 = shl i64 %sub5.us.1, 32
%idxprom.us.1 = ashr exact i64 %sext23, 32
%arrayidx.us.1 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.1
- %5 = load float* %arrayidx.us.1, align 4, !tbaa !5
+ %5 = load float* %arrayidx.us.1, align 4
%arrayidx7.us.1 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.1
- %6 = load float* %arrayidx7.us.1, align 4, !tbaa !5
+ %6 = load float* %arrayidx7.us.1, align 4
%add8.us.1 = fadd float %6, %5
- store float %add8.us.1, float* %arrayidx7.us.1, align 4, !tbaa !5
+ store float %add8.us.1, float* %arrayidx7.us.1, align 4
%indvars.iv.next.1 = add i64 %indvars.iv.1, %1
%7 = trunc i64 %indvars.iv.next.1 to i32
%cmp3.us.1 = icmp slt i32 %7, 32000
@@ -104,11 +104,11 @@ for.body4.us.2: ; preds = %for.body4.us.2, %fo
%sext24 = shl i64 %sub5.us.2, 32
%idxprom.us.2 = ashr exact i64 %sext24, 32
%arrayidx.us.2 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.2
- %8 = load float* %arrayidx.us.2, align 4, !tbaa !5
+ %8 = load float* %arrayidx.us.2, align 4
%arrayidx7.us.2 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.2
- %9 = load float* %arrayidx7.us.2, align 4, !tbaa !5
+ %9 = load float* %arrayidx7.us.2, align 4
%add8.us.2 = fadd float %9, %8
- store float %add8.us.2, float* %arrayidx7.us.2, align 4, !tbaa !5
+ store float %add8.us.2, float* %arrayidx7.us.2, align 4
%indvars.iv.next.2 = add i64 %indvars.iv.2, %1
%10 = trunc i64 %indvars.iv.next.2 to i32
%cmp3.us.2 = icmp slt i32 %10, 32000
@@ -126,11 +126,11 @@ for.body4.us.3: ; preds = %for.body4.us.3, %fo
%sext25 = shl i64 %sub5.us.3, 32
%idxprom.us.3 = ashr exact i64 %sext25, 32
%arrayidx.us.3 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.3
- %11 = load float* %arrayidx.us.3, align 4, !tbaa !5
+ %11 = load float* %arrayidx.us.3, align 4
%arrayidx7.us.3 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.3
- %12 = load float* %arrayidx7.us.3, align 4, !tbaa !5
+ %12 = load float* %arrayidx7.us.3, align 4
%add8.us.3 = fadd float %12, %11
- store float %add8.us.3, float* %arrayidx7.us.3, align 4, !tbaa !5
+ store float %add8.us.3, float* %arrayidx7.us.3, align 4
%indvars.iv.next.3 = add i64 %indvars.iv.3, %1
%13 = trunc i64 %indvars.iv.next.3 to i32
%cmp3.us.3 = icmp slt i32 %13, 32000
@@ -148,11 +148,11 @@ for.body4.us.4: ; preds = %for.body4.us.4, %fo
%sext26 = shl i64 %sub5.us.4, 32
%idxprom.us.4 = ashr exact i64 %sext26, 32
%arrayidx.us.4 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.4
- %14 = load float* %arrayidx.us.4, align 4, !tbaa !5
+ %14 = load float* %arrayidx.us.4, align 4
%arrayidx7.us.4 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.4
- %15 = load float* %arrayidx7.us.4, align 4, !tbaa !5
+ %15 = load float* %arrayidx7.us.4, align 4
%add8.us.4 = fadd float %15, %14
- store float %add8.us.4, float* %arrayidx7.us.4, align 4, !tbaa !5
+ store float %add8.us.4, float* %arrayidx7.us.4, align 4
%indvars.iv.next.4 = add i64 %indvars.iv.4, %1
%16 = trunc i64 %indvars.iv.next.4 to i32
%cmp3.us.4 = icmp slt i32 %16, 32000
@@ -183,9 +183,4 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
declare i32 @puts(i8* nocapture) nounwind
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
!3 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!4 = metadata !{metadata !"int", metadata !1}
-!5 = metadata !{metadata !"float", metadata !1}
diff --git a/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll b/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
index 52bf6c7e5017..4a1a51237ffd 100644
--- a/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
+++ b/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
@@ -35,7 +35,7 @@ entry:
for.body: ; preds = %for.end17, %entry
%nl.041 = phi i32 [ 0, %entry ], [ %inc22, %for.end17 ]
- %0 = load float* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0, i64 0), align 16, !tbaa !5
+ %0 = load float* getelementptr inbounds ([256 x [256 x float]]* @aa, i64 0, i64 0, i64 0), align 16
br label %for.cond5.preheader
for.cond5.preheader: ; preds = %for.inc15, %for.body
@@ -51,7 +51,7 @@ for.body7: ; preds = %for.body7, %for.con
%xindex.234 = phi i32 [ %xindex.138, %for.cond5.preheader ], [ %xindex.3.15, %for.body7 ]
%yindex.233 = phi i32 [ %yindex.137, %for.cond5.preheader ], [ %yindex.3.15, %for.body7 ]
%arrayidx9 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
- %1 = load float* %arrayidx9, align 16, !tbaa !5
+ %1 = load float* %arrayidx9, align 16
%cmp10 = fcmp ogt float %1, %max.235
%2 = trunc i64 %indvars.iv to i32
%yindex.3 = select i1 %cmp10, i32 %2, i32 %yindex.233
@@ -60,7 +60,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3 = select i1 %cmp10, float %1, float %max.235
%indvars.iv.next45 = or i64 %indvars.iv, 1
%arrayidx9.1 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
- %4 = load float* %arrayidx9.1, align 4, !tbaa !5
+ %4 = load float* %arrayidx9.1, align 4
%cmp10.1 = fcmp ogt float %4, %max.3
%5 = trunc i64 %indvars.iv.next45 to i32
%yindex.3.1 = select i1 %cmp10.1, i32 %5, i32 %yindex.3
@@ -68,7 +68,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.1 = select i1 %cmp10.1, float %4, float %max.3
%indvars.iv.next.146 = or i64 %indvars.iv, 2
%arrayidx9.2 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
- %6 = load float* %arrayidx9.2, align 8, !tbaa !5
+ %6 = load float* %arrayidx9.2, align 8
%cmp10.2 = fcmp ogt float %6, %max.3.1
%7 = trunc i64 %indvars.iv.next.146 to i32
%yindex.3.2 = select i1 %cmp10.2, i32 %7, i32 %yindex.3.1
@@ -76,7 +76,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.2 = select i1 %cmp10.2, float %6, float %max.3.1
%indvars.iv.next.247 = or i64 %indvars.iv, 3
%arrayidx9.3 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
- %8 = load float* %arrayidx9.3, align 4, !tbaa !5
+ %8 = load float* %arrayidx9.3, align 4
%cmp10.3 = fcmp ogt float %8, %max.3.2
%9 = trunc i64 %indvars.iv.next.247 to i32
%yindex.3.3 = select i1 %cmp10.3, i32 %9, i32 %yindex.3.2
@@ -84,7 +84,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.3 = select i1 %cmp10.3, float %8, float %max.3.2
%indvars.iv.next.348 = or i64 %indvars.iv, 4
%arrayidx9.4 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
- %10 = load float* %arrayidx9.4, align 16, !tbaa !5
+ %10 = load float* %arrayidx9.4, align 16
%cmp10.4 = fcmp ogt float %10, %max.3.3
%11 = trunc i64 %indvars.iv.next.348 to i32
%yindex.3.4 = select i1 %cmp10.4, i32 %11, i32 %yindex.3.3
@@ -92,7 +92,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.4 = select i1 %cmp10.4, float %10, float %max.3.3
%indvars.iv.next.449 = or i64 %indvars.iv, 5
%arrayidx9.5 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
- %12 = load float* %arrayidx9.5, align 4, !tbaa !5
+ %12 = load float* %arrayidx9.5, align 4
%cmp10.5 = fcmp ogt float %12, %max.3.4
%13 = trunc i64 %indvars.iv.next.449 to i32
%yindex.3.5 = select i1 %cmp10.5, i32 %13, i32 %yindex.3.4
@@ -100,7 +100,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.5 = select i1 %cmp10.5, float %12, float %max.3.4
%indvars.iv.next.550 = or i64 %indvars.iv, 6
%arrayidx9.6 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
- %14 = load float* %arrayidx9.6, align 8, !tbaa !5
+ %14 = load float* %arrayidx9.6, align 8
%cmp10.6 = fcmp ogt float %14, %max.3.5
%15 = trunc i64 %indvars.iv.next.550 to i32
%yindex.3.6 = select i1 %cmp10.6, i32 %15, i32 %yindex.3.5
@@ -108,7 +108,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.6 = select i1 %cmp10.6, float %14, float %max.3.5
%indvars.iv.next.651 = or i64 %indvars.iv, 7
%arrayidx9.7 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
- %16 = load float* %arrayidx9.7, align 4, !tbaa !5
+ %16 = load float* %arrayidx9.7, align 4
%cmp10.7 = fcmp ogt float %16, %max.3.6
%17 = trunc i64 %indvars.iv.next.651 to i32
%yindex.3.7 = select i1 %cmp10.7, i32 %17, i32 %yindex.3.6
@@ -116,7 +116,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.7 = select i1 %cmp10.7, float %16, float %max.3.6
%indvars.iv.next.752 = or i64 %indvars.iv, 8
%arrayidx9.8 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
- %18 = load float* %arrayidx9.8, align 16, !tbaa !5
+ %18 = load float* %arrayidx9.8, align 16
%cmp10.8 = fcmp ogt float %18, %max.3.7
%19 = trunc i64 %indvars.iv.next.752 to i32
%yindex.3.8 = select i1 %cmp10.8, i32 %19, i32 %yindex.3.7
@@ -124,7 +124,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.8 = select i1 %cmp10.8, float %18, float %max.3.7
%indvars.iv.next.853 = or i64 %indvars.iv, 9
%arrayidx9.9 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
- %20 = load float* %arrayidx9.9, align 4, !tbaa !5
+ %20 = load float* %arrayidx9.9, align 4
%cmp10.9 = fcmp ogt float %20, %max.3.8
%21 = trunc i64 %indvars.iv.next.853 to i32
%yindex.3.9 = select i1 %cmp10.9, i32 %21, i32 %yindex.3.8
@@ -132,7 +132,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.9 = select i1 %cmp10.9, float %20, float %max.3.8
%indvars.iv.next.954 = or i64 %indvars.iv, 10
%arrayidx9.10 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
- %22 = load float* %arrayidx9.10, align 8, !tbaa !5
+ %22 = load float* %arrayidx9.10, align 8
%cmp10.10 = fcmp ogt float %22, %max.3.9
%23 = trunc i64 %indvars.iv.next.954 to i32
%yindex.3.10 = select i1 %cmp10.10, i32 %23, i32 %yindex.3.9
@@ -140,7 +140,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.10 = select i1 %cmp10.10, float %22, float %max.3.9
%indvars.iv.next.1055 = or i64 %indvars.iv, 11
%arrayidx9.11 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
- %24 = load float* %arrayidx9.11, align 4, !tbaa !5
+ %24 = load float* %arrayidx9.11, align 4
%cmp10.11 = fcmp ogt float %24, %max.3.10
%25 = trunc i64 %indvars.iv.next.1055 to i32
%yindex.3.11 = select i1 %cmp10.11, i32 %25, i32 %yindex.3.10
@@ -148,7 +148,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.11 = select i1 %cmp10.11, float %24, float %max.3.10
%indvars.iv.next.1156 = or i64 %indvars.iv, 12
%arrayidx9.12 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
- %26 = load float* %arrayidx9.12, align 16, !tbaa !5
+ %26 = load float* %arrayidx9.12, align 16
%cmp10.12 = fcmp ogt float %26, %max.3.11
%27 = trunc i64 %indvars.iv.next.1156 to i32
%yindex.3.12 = select i1 %cmp10.12, i32 %27, i32 %yindex.3.11
@@ -156,7 +156,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.12 = select i1 %cmp10.12, float %26, float %max.3.11
%indvars.iv.next.1257 = or i64 %indvars.iv, 13
%arrayidx9.13 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
- %28 = load float* %arrayidx9.13, align 4, !tbaa !5
+ %28 = load float* %arrayidx9.13, align 4
%cmp10.13 = fcmp ogt float %28, %max.3.12
%29 = trunc i64 %indvars.iv.next.1257 to i32
%yindex.3.13 = select i1 %cmp10.13, i32 %29, i32 %yindex.3.12
@@ -164,7 +164,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.13 = select i1 %cmp10.13, float %28, float %max.3.12
%indvars.iv.next.1358 = or i64 %indvars.iv, 14
%arrayidx9.14 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
- %30 = load float* %arrayidx9.14, align 8, !tbaa !5
+ %30 = load float* %arrayidx9.14, align 8
%cmp10.14 = fcmp ogt float %30, %max.3.13
%31 = trunc i64 %indvars.iv.next.1358 to i32
%yindex.3.14 = select i1 %cmp10.14, i32 %31, i32 %yindex.3.13
@@ -172,7 +172,7 @@ for.body7: ; preds = %for.body7, %for.con
%max.3.14 = select i1 %cmp10.14, float %30, float %max.3.13
%indvars.iv.next.1459 = or i64 %indvars.iv, 15
%arrayidx9.15 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
- %32 = load float* %arrayidx9.15, align 4, !tbaa !5
+ %32 = load float* %arrayidx9.15, align 4
%cmp10.15 = fcmp ogt float %32, %max.3.14
%33 = trunc i64 %indvars.iv.next.1459 to i32
%yindex.3.15 = select i1 %cmp10.15, i32 %33, i32 %yindex.3.14
@@ -208,7 +208,7 @@ for.end23: ; preds = %for.end17
%add29 = fadd float %add, 1.000000e+00
%add31 = fadd float %add29, %conv18
%add32 = fadd float %add31, 1.000000e+00
- store float %add32, float* @temp, align 4, !tbaa !5
+ store float %add32, float* @temp, align 4
tail call void @check(i32 -1)
ret i32 0
}
@@ -217,9 +217,4 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
declare i32 @puts(i8* nocapture) nounwind
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
!3 = metadata !{metadata !"branch_weights", i32 64, i32 4}
-!4 = metadata !{metadata !"int", metadata !1}
-!5 = metadata !{metadata !"float", metadata !1}
diff --git a/test/CodeGen/PowerPC/bdzlr.ll b/test/CodeGen/PowerPC/bdzlr.ll
new file mode 100644
index 000000000000..656a85860df0
--- /dev/null
+++ b/test/CodeGen/PowerPC/bdzlr.ll
@@ -0,0 +1,64 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+%struct.lua_TValue.17.692 = type { %union.Value.16.691, i32 }
+%union.Value.16.691 = type { %union.GCObject.15.690* }
+%union.GCObject.15.690 = type { %struct.lua_State.14.689 }
+%struct.lua_State.14.689 = type { %union.GCObject.15.690*, i8, i8, i8, %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, %struct.global_State.10.685*, %struct.CallInfo.11.686*, i32*, %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, %struct.CallInfo.11.686*, %struct.CallInfo.11.686*, i32, i32, i16, i16, i8, i8, i32, i32, void (%struct.lua_State.14.689*, %struct.lua_Debug.12.687*)*, %struct.lua_TValue.17.692, %struct.lua_TValue.17.692, %union.GCObject.15.690*, %union.GCObject.15.690*, %struct.lua_longjmp.13.688*, i64 }
+%struct.global_State.10.685 = type { %struct.stringtable.0.675, i8* (i8*, i8*, i64, i64)*, i8*, i8, i8, i32, %union.GCObject.15.690*, %union.GCObject.15.690**, %union.GCObject.15.690*, %union.GCObject.15.690*, %union.GCObject.15.690*, %union.GCObject.15.690*, %struct.Mbuffer.1.676, i64, i64, i64, i64, i32, i32, i32 (%struct.lua_State.14.689*)*, %struct.lua_TValue.17.692, %struct.lua_State.14.689*, %struct.UpVal.3.678, [9 x %struct.Table.7.682*], [17 x %union.TString.9.684*] }
+%struct.stringtable.0.675 = type { %union.GCObject.15.690**, i32, i32 }
+%struct.Mbuffer.1.676 = type { i8*, i64, i64 }
+%struct.UpVal.3.678 = type { %union.GCObject.15.690*, i8, i8, %struct.lua_TValue.17.692*, %union.anon.2.677 }
+%union.anon.2.677 = type { %struct.lua_TValue.17.692 }
+%struct.Table.7.682 = type { %union.GCObject.15.690*, i8, i8, i8, i8, %struct.Table.7.682*, %struct.lua_TValue.17.692*, %struct.Node.6.681*, %struct.Node.6.681*, %union.GCObject.15.690*, i32 }
+%struct.Node.6.681 = type { %struct.lua_TValue.17.692, %union.TKey.5.680 }
+%union.TKey.5.680 = type { %struct.anon.0.4.679 }
+%struct.anon.0.4.679 = type { %union.Value.16.691, i32, %struct.Node.6.681* }
+%union.TString.9.684 = type { %struct.anon.1.8.683 }
+%struct.anon.1.8.683 = type { %union.GCObject.15.690*, i8, i8, i8, i32, i64 }
+%struct.CallInfo.11.686 = type { %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, %struct.lua_TValue.17.692*, i32*, i32, i32 }
+%struct.lua_Debug.12.687 = type { i32, i8*, i8*, i8*, i8*, i32, i32, i32, i32, [60 x i8], i32 }
+%struct.lua_longjmp.13.688 = type opaque
+
+define void @lua_xmove(i32 signext %n) #0 {
+entry:
+ br i1 undef, label %for.end, label %if.end
+
+if.end: ; preds = %entry
+ br i1 undef, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph: ; preds = %if.end
+ br label %for.body
+
+for.body: ; preds = %for.body.for.body_crit_edge, %for.body.lr.ph
+ %0 = phi %struct.lua_TValue.17.692* [ undef, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
+ %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body.for.body_crit_edge ]
+ %tt = getelementptr inbounds %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
+ %1 = load i32* %tt, align 4, !tbaa !0
+ store i32 %1, i32* undef, align 4, !tbaa !0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge
+
+for.body.for.body_crit_edge: ; preds = %for.body
+ %.pre = load %struct.lua_TValue.17.692** undef, align 8, !tbaa !3
+ br label %for.body
+
+for.end: ; preds = %for.body, %if.end, %entry
+ ret void
+
+; CHECK: @lua_xmove
+; CHECK: bnelr
+; CHECK: bnelr
+; CHECK: bdzlr
+; CHECK-NOT: blr
+}
+
+attributes #0 = { nounwind }
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/CodeGen/PowerPC/crsave.ll b/test/CodeGen/PowerPC/crsave.ll
index 3e98dbd254d9..f1cbc5afa8ac 100644
--- a/test/CodeGen/PowerPC/crsave.ll
+++ b/test/CodeGen/PowerPC/crsave.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -disable-fp-elim -mtriple=powerpc-unknown-linux-gnu < %s | FileCheck %s -check-prefix=PPC32
-; RUN: llc -O0 -disable-fp-elim -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=PPC64
+; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s -check-prefix=PPC64
declare void @foo()
@@ -13,15 +13,19 @@ entry:
ret i32 %1
}
+; PPC32: stw 31, -4(1)
+; PPC32: stwu 1, -32(1)
; PPC32: mfcr 12
-; PPC32-NEXT: stw 12, {{[0-9]+}}(31)
-; PPC32: lwz 12, {{[0-9]+}}(31)
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32: lwz 12, 24(31)
; PPC32-NEXT: mtcrf 32, 12
; PPC64: mfcr 12
-; PPC64-NEXT: stw 12, 8(1)
+; PPC64: stw 12, 8(1)
+; PPC64: stdu 1, -[[AMT:[0-9]+]](1)
+; PPC64: addi 1, 1, [[AMT]]
; PPC64: lwz 12, 8(1)
-; PPC64-NEXT: mtcrf 32, 12
+; PPC64: mtcrf 32, 12
define i32 @test_cr234() nounwind {
entry:
@@ -33,17 +37,21 @@ entry:
ret i32 %1
}
+; PPC32: stw 31, -4(1)
+; PPC32: stwu 1, -32(1)
; PPC32: mfcr 12
-; PPC32-NEXT: stw 12, {{[0-9]+}}(31)
-; PPC32: lwz 12, {{[0-9]+}}(31)
+; PPC32-NEXT: stw 12, 24(31)
+; PPC32: lwz 12, 24(31)
; PPC32-NEXT: mtcrf 32, 12
; PPC32-NEXT: mtcrf 16, 12
; PPC32-NEXT: mtcrf 8, 12
; PPC64: mfcr 12
-; PPC64-NEXT: stw 12, 8(1)
+; PPC64: stw 12, 8(1)
+; PPC64: stdu 1, -[[AMT:[0-9]+]](1)
+; PPC64: addi 1, 1, [[AMT]]
; PPC64: lwz 12, 8(1)
-; PPC64-NEXT: mtcrf 32, 12
-; PPC64-NEXT: mtcrf 16, 12
-; PPC64-NEXT: mtcrf 8, 12
+; PPC64: mtcrf 32, 12
+; PPC64: mtcrf 16, 12
+; PPC64: mtcrf 8, 12
diff --git a/test/CodeGen/PowerPC/ctrloop-s000.ll b/test/CodeGen/PowerPC/ctrloop-s000.ll
index dcea06f29e7c..4d8ef50501f2 100644
--- a/test/CodeGen/PowerPC/ctrloop-s000.ll
+++ b/test/CodeGen/PowerPC/ctrloop-s000.ll
@@ -36,100 +36,100 @@ for.cond1.preheader: ; preds = %for.end, %entry
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next.15, %for.body3 ]
%arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv
- %0 = load double* %arrayidx, align 32, !tbaa !0
+ %0 = load double* %arrayidx, align 32
%add = fadd double %0, 1.000000e+00
%arrayidx5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv
- store double %add, double* %arrayidx5, align 32, !tbaa !0
+ store double %add, double* %arrayidx5, align 32
%indvars.iv.next11 = or i64 %indvars.iv, 1
%arrayidx.1 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
- %1 = load double* %arrayidx.1, align 8, !tbaa !0
+ %1 = load double* %arrayidx.1, align 8
%add.1 = fadd double %1, 1.000000e+00
%arrayidx5.1 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
- store double %add.1, double* %arrayidx5.1, align 8, !tbaa !0
+ store double %add.1, double* %arrayidx5.1, align 8
%indvars.iv.next.112 = or i64 %indvars.iv, 2
%arrayidx.2 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
- %2 = load double* %arrayidx.2, align 16, !tbaa !0
+ %2 = load double* %arrayidx.2, align 16
%add.2 = fadd double %2, 1.000000e+00
%arrayidx5.2 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
- store double %add.2, double* %arrayidx5.2, align 16, !tbaa !0
+ store double %add.2, double* %arrayidx5.2, align 16
%indvars.iv.next.213 = or i64 %indvars.iv, 3
%arrayidx.3 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
- %3 = load double* %arrayidx.3, align 8, !tbaa !0
+ %3 = load double* %arrayidx.3, align 8
%add.3 = fadd double %3, 1.000000e+00
%arrayidx5.3 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
- store double %add.3, double* %arrayidx5.3, align 8, !tbaa !0
+ store double %add.3, double* %arrayidx5.3, align 8
%indvars.iv.next.314 = or i64 %indvars.iv, 4
%arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
- %4 = load double* %arrayidx.4, align 32, !tbaa !0
+ %4 = load double* %arrayidx.4, align 32
%add.4 = fadd double %4, 1.000000e+00
%arrayidx5.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
- store double %add.4, double* %arrayidx5.4, align 32, !tbaa !0
+ store double %add.4, double* %arrayidx5.4, align 32
%indvars.iv.next.415 = or i64 %indvars.iv, 5
%arrayidx.5 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
- %5 = load double* %arrayidx.5, align 8, !tbaa !0
+ %5 = load double* %arrayidx.5, align 8
%add.5 = fadd double %5, 1.000000e+00
%arrayidx5.5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
- store double %add.5, double* %arrayidx5.5, align 8, !tbaa !0
+ store double %add.5, double* %arrayidx5.5, align 8
%indvars.iv.next.516 = or i64 %indvars.iv, 6
%arrayidx.6 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
- %6 = load double* %arrayidx.6, align 16, !tbaa !0
+ %6 = load double* %arrayidx.6, align 16
%add.6 = fadd double %6, 1.000000e+00
%arrayidx5.6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
- store double %add.6, double* %arrayidx5.6, align 16, !tbaa !0
+ store double %add.6, double* %arrayidx5.6, align 16
%indvars.iv.next.617 = or i64 %indvars.iv, 7
%arrayidx.7 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
- %7 = load double* %arrayidx.7, align 8, !tbaa !0
+ %7 = load double* %arrayidx.7, align 8
%add.7 = fadd double %7, 1.000000e+00
%arrayidx5.7 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
- store double %add.7, double* %arrayidx5.7, align 8, !tbaa !0
+ store double %add.7, double* %arrayidx5.7, align 8
%indvars.iv.next.718 = or i64 %indvars.iv, 8
%arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
- %8 = load double* %arrayidx.8, align 32, !tbaa !0
+ %8 = load double* %arrayidx.8, align 32
%add.8 = fadd double %8, 1.000000e+00
%arrayidx5.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
- store double %add.8, double* %arrayidx5.8, align 32, !tbaa !0
+ store double %add.8, double* %arrayidx5.8, align 32
%indvars.iv.next.819 = or i64 %indvars.iv, 9
%arrayidx.9 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
- %9 = load double* %arrayidx.9, align 8, !tbaa !0
+ %9 = load double* %arrayidx.9, align 8
%add.9 = fadd double %9, 1.000000e+00
%arrayidx5.9 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
- store double %add.9, double* %arrayidx5.9, align 8, !tbaa !0
+ store double %add.9, double* %arrayidx5.9, align 8
%indvars.iv.next.920 = or i64 %indvars.iv, 10
%arrayidx.10 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
- %10 = load double* %arrayidx.10, align 16, !tbaa !0
+ %10 = load double* %arrayidx.10, align 16
%add.10 = fadd double %10, 1.000000e+00
%arrayidx5.10 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
- store double %add.10, double* %arrayidx5.10, align 16, !tbaa !0
+ store double %add.10, double* %arrayidx5.10, align 16
%indvars.iv.next.1021 = or i64 %indvars.iv, 11
%arrayidx.11 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
- %11 = load double* %arrayidx.11, align 8, !tbaa !0
+ %11 = load double* %arrayidx.11, align 8
%add.11 = fadd double %11, 1.000000e+00
%arrayidx5.11 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
- store double %add.11, double* %arrayidx5.11, align 8, !tbaa !0
+ store double %add.11, double* %arrayidx5.11, align 8
%indvars.iv.next.1122 = or i64 %indvars.iv, 12
%arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
- %12 = load double* %arrayidx.12, align 32, !tbaa !0
+ %12 = load double* %arrayidx.12, align 32
%add.12 = fadd double %12, 1.000000e+00
%arrayidx5.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
- store double %add.12, double* %arrayidx5.12, align 32, !tbaa !0
+ store double %add.12, double* %arrayidx5.12, align 32
%indvars.iv.next.1223 = or i64 %indvars.iv, 13
%arrayidx.13 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
- %13 = load double* %arrayidx.13, align 8, !tbaa !0
+ %13 = load double* %arrayidx.13, align 8
%add.13 = fadd double %13, 1.000000e+00
%arrayidx5.13 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
- store double %add.13, double* %arrayidx5.13, align 8, !tbaa !0
+ store double %add.13, double* %arrayidx5.13, align 8
%indvars.iv.next.1324 = or i64 %indvars.iv, 14
%arrayidx.14 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
- %14 = load double* %arrayidx.14, align 16, !tbaa !0
+ %14 = load double* %arrayidx.14, align 16
%add.14 = fadd double %14, 1.000000e+00
%arrayidx5.14 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
- store double %add.14, double* %arrayidx5.14, align 16, !tbaa !0
+ store double %add.14, double* %arrayidx5.14, align 16
%indvars.iv.next.1425 = or i64 %indvars.iv, 15
%arrayidx.15 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
- %15 = load double* %arrayidx.15, align 8, !tbaa !0
+ %15 = load double* %arrayidx.15, align 8
%add.15 = fadd double %15, 1.000000e+00
%arrayidx5.15 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
- store double %add.15, double* %arrayidx5.15, align 8, !tbaa !0
+ store double %add.15, double* %arrayidx5.15, align 8
%indvars.iv.next.15 = add i64 %indvars.iv, 16
%lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
%exitcond.15 = icmp eq i32 %lftr.wideiv.15, 16000
@@ -150,7 +150,3 @@ for.end8: ; preds = %for.end
}
declare i32 @dummy(double*, double*, double*, double*, double*, [256 x double]*, [256 x double]*, [256 x double]*, double)
-
-!0 = metadata !{metadata !"double", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/ctrloop-sums.ll b/test/CodeGen/PowerPC/ctrloop-sums.ll
index eae8c38eee0e..d9965f280e72 100644
--- a/test/CodeGen/PowerPC/ctrloop-sums.ll
+++ b/test/CodeGen/PowerPC/ctrloop-sums.ll
@@ -24,7 +24,7 @@ for.body3.us: ; preds = %for.body3.us, %for.
%indvars.iv = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next, %for.body3.us ]
%Result.111.us = phi i32 [ %Result.014.us, %for.body3.lr.ph.us ], [ %add.us, %for.body3.us ]
%arrayidx5.us = getelementptr inbounds [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
- %0 = load i32* %arrayidx5.us, align 4, !tbaa !0
+ %0 = load i32* %arrayidx5.us, align 4
%add.us = add nsw i32 %0, %Result.111.us
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -60,7 +60,7 @@ for.body: ; preds = %for.body, %entry
%0 = trunc i64 %indvars.iv33 to i32
%sub = sub i32 0, %0
%arrayidx2 = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
- store i32 %sub, i32* %arrayidx2, align 4, !tbaa !0
+ store i32 %sub, i32* %arrayidx2, align 4
%indvars.iv.next34 = add i64 %indvars.iv33, 1
%lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32
%exitcond36 = icmp eq i32 %lftr.wideiv35, 100
@@ -81,7 +81,7 @@ if.then: ; preds = %for.body8
%3 = add i64 %indvars.iv, %indvars.iv29
%arrayidx13 = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
%4 = trunc i64 %3 to i32
- store i32 %4, i32* %arrayidx13, align 4, !tbaa !0
+ store i32 %4, i32* %arrayidx13, align 4
br label %for.inc14
for.inc14: ; preds = %for.body8, %if.then
@@ -106,7 +106,7 @@ for.body3.us.i: ; preds = %for.body3.lr.ph.us.
%indvars.iv.i = phi i64 [ 0, %for.body3.lr.ph.us.i ], [ %indvars.iv.next.i, %for.body3.us.i ]
%Result.111.us.i = phi i32 [ %Result.014.us.i, %for.body3.lr.ph.us.i ], [ %add.us.i, %for.body3.us.i ]
%arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
- %5 = load i32* %arrayidx5.us.i, align 4, !tbaa !0
+ %5 = load i32* %arrayidx5.us.i, align 4
%add.us.i = add nsw i32 %5, %Result.111.us.i
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%lftr.wideiv = trunc i64 %indvars.iv.next.i to i32
@@ -128,7 +128,3 @@ SumArray.exit: ; preds = %for.inc6.us.i
}
declare i32 @printf(i8* nocapture, ...) nounwind
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/ctrloops.ll b/test/CodeGen/PowerPC/ctrloops.ll
index 4b6f7b94af4a..f11e332d5fbc 100644
--- a/test/CodeGen/PowerPC/ctrloops.ll
+++ b/test/CodeGen/PowerPC/ctrloops.ll
@@ -10,9 +10,9 @@ entry:
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load volatile i32* @a, align 4, !tbaa !0
+ %0 = load volatile i32* @a, align 4
%add = add nsw i32 %0, %c
- store volatile i32 %add, i32* @a, align 4, !tbaa !0
+ store volatile i32 %add, i32* @a, align 4
%inc = add nsw i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 2048
br i1 %exitcond, label %for.end, label %for.body
@@ -34,9 +34,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %0 = load volatile i32* @a, align 4, !tbaa !0
+ %0 = load volatile i32* @a, align 4
%add = add nsw i32 %0, %c
- store volatile i32 %add, i32* @a, align 4, !tbaa !0
+ store volatile i32 %add, i32* @a, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %d
br i1 %exitcond, label %for.end, label %for.body
@@ -58,9 +58,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%mul = mul nsw i32 %i.02, %c
- %0 = load volatile i32* @a, align 4, !tbaa !0
+ %0 = load volatile i32* @a, align 4
%add = add nsw i32 %0, %mul
- store volatile i32 %add, i32* @a, align 4, !tbaa !0
+ store volatile i32 %add, i32* @a, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %d
br i1 %exitcond, label %for.end, label %for.body
@@ -73,7 +73,3 @@ for.end: ; preds = %for.body, %entry
; CHECK-NOT: cmplwi
; CHECK: bdnz
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/early-ret.ll b/test/CodeGen/PowerPC/early-ret.ll
new file mode 100644
index 000000000000..7d3e225a1e29
--- /dev/null
+++ b/test/CodeGen/PowerPC/early-ret.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define void @foo(i32* %P) #0 {
+entry:
+ %tobool = icmp eq i32* %P, null
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then: ; preds = %entry
+ store i32 0, i32* %P, align 4
+ br label %if.end
+
+if.end: ; preds = %entry, %if.then
+ ret void
+
+; CHECK: @foo
+; CHECK: beqlr
+; CHECK: blr
+}
+
+define void @bar(i32* %P, i32* %Q) #0 {
+entry:
+ %tobool = icmp eq i32* %P, null
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ store i32 0, i32* %P, align 4
+ %tobool1 = icmp eq i32* %Q, null
+ br i1 %tobool1, label %if.end3, label %if.then2
+
+if.then2: ; preds = %if.then
+ store i32 1, i32* %Q, align 4
+ br label %if.end3
+
+if.else: ; preds = %entry
+ store i32 0, i32* %Q, align 4
+ br label %if.end3
+
+if.end3: ; preds = %if.then, %if.then2, %if.else
+ ret void
+
+; CHECK: @bar
+; CHECK: beqlr
+; CHECK: blr
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/early-ret2.ll b/test/CodeGen/PowerPC/early-ret2.ll
new file mode 100644
index 000000000000..a274e2c2658f
--- /dev/null
+++ b/test/CodeGen/PowerPC/early-ret2.ll
@@ -0,0 +1,25 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define void @_Z8example3iPiS_() #0 {
+entry:
+ br i1 undef, label %while.end, label %while.body.lr.ph
+
+while.body.lr.ph: ; preds = %entry
+ br i1 undef, label %while.end, label %while.body
+
+while.body: ; preds = %while.body, %while.body.lr.ph
+ br i1 false, label %while.end, label %while.body, !llvm.vectorizer.already_vectorized !0
+
+while.end: ; preds = %while.body, %while.body.lr.ph, %entry
+ ret void
+
+; CHECK: @_Z8example3iPiS_
+; CHECK: bnelr
+}
+
+attributes #0 = { noinline nounwind }
+
+!0 = metadata !{}
+
diff --git a/test/CodeGen/PowerPC/fma.ll b/test/CodeGen/PowerPC/fma.ll
index 27496f7937e6..a173c9154041 100644
--- a/test/CodeGen/PowerPC/fma.ll
+++ b/test/CodeGen/PowerPC/fma.ll
@@ -1,22 +1,30 @@
-; RUN: llc < %s -march=ppc32 -fp-contract=fast | \
-; RUN: egrep "fn?madd|fn?msub" | count 8
+; RUN: llc < %s -march=ppc32 -fp-contract=fast | FileCheck %s
define double @test_FMADD1(double %A, double %B, double %C) {
%D = fmul double %A, %B ; <double> [#uses=1]
%E = fadd double %D, %C ; <double> [#uses=1]
ret double %E
+; CHECK: test_FMADD1:
+; CHECK: fmadd
+; CHECK-NEXT: blr
}
define double @test_FMADD2(double %A, double %B, double %C) {
%D = fmul double %A, %B ; <double> [#uses=1]
%E = fadd double %D, %C ; <double> [#uses=1]
ret double %E
+; CHECK: test_FMADD2:
+; CHECK: fmadd
+; CHECK-NEXT: blr
}
define double @test_FMSUB(double %A, double %B, double %C) {
%D = fmul double %A, %B ; <double> [#uses=1]
%E = fsub double %D, %C ; <double> [#uses=1]
ret double %E
+; CHECK: test_FMSUB:
+; CHECK: fmsub
+; CHECK-NEXT: blr
}
define double @test_FNMADD1(double %A, double %B, double %C) {
@@ -24,6 +32,9 @@ define double @test_FNMADD1(double %A, double %B, double %C) {
%E = fadd double %D, %C ; <double> [#uses=1]
%F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
ret double %F
+; CHECK: test_FNMADD1:
+; CHECK: fnmadd
+; CHECK-NEXT: blr
}
define double @test_FNMADD2(double %A, double %B, double %C) {
@@ -31,12 +42,18 @@ define double @test_FNMADD2(double %A, double %B, double %C) {
%E = fadd double %C, %D ; <double> [#uses=1]
%F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
ret double %F
+; CHECK: test_FNMADD2:
+; CHECK: fnmadd
+; CHECK-NEXT: blr
}
define double @test_FNMSUB1(double %A, double %B, double %C) {
%D = fmul double %A, %B ; <double> [#uses=1]
%E = fsub double %C, %D ; <double> [#uses=1]
ret double %E
+; CHECK: test_FNMSUB1:
+; CHECK: fnmsub
+; CHECK-NEXT: blr
}
define double @test_FNMSUB2(double %A, double %B, double %C) {
@@ -44,6 +61,9 @@ define double @test_FNMSUB2(double %A, double %B, double %C) {
%E = fsub double %D, %C ; <double> [#uses=1]
%F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
ret double %F
+; CHECK: test_FNMSUB2:
+; CHECK: fnmsub
+; CHECK-NEXT: blr
}
define float @test_FNMSUBS(float %A, float %B, float %C) {
@@ -51,4 +71,7 @@ define float @test_FNMSUBS(float %A, float %B, float %C) {
%E = fsub float %D, %C ; <float> [#uses=1]
%F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
ret float %F
+; CHECK: test_FNMSUBS:
+; CHECK: fnmsubs
+; CHECK-NEXT: blr
}
diff --git a/test/CodeGen/PowerPC/fold-zero.ll b/test/CodeGen/PowerPC/fold-zero.ll
new file mode 100644
index 000000000000..c7ec6fade53e
--- /dev/null
+++ b/test/CodeGen/PowerPC/fold-zero.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i32 @test1(i1 %a, i32 %c) nounwind {
+ %x = select i1 %a, i32 %c, i32 0
+ ret i32 %x
+
+; CHECK: @test1
+; CHECK-NOT: li {{[0-9]+}}, 0
+; CHECK: isel 3, 0,
+; CHECK: blr
+}
+
diff --git a/test/CodeGen/PowerPC/fsel.ll b/test/CodeGen/PowerPC/fsel.ll
new file mode 100644
index 000000000000..8cd43e616bf6
--- /dev/null
+++ b/test/CodeGen/PowerPC/fsel.ll
@@ -0,0 +1,137 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -enable-no-infs-fp-math -enable-no-nans-fp-math | FileCheck -check-prefix=CHECK-FM %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define double @zerocmp1(double %a, double %y, double %z) #0 {
+entry:
+ %cmp = fcmp ult double %a, 0.000000e+00
+ %z.y = select i1 %cmp, double %z, double %y
+ ret double %z.y
+
+; CHECK: @zerocmp1
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @zerocmp1
+; CHECK-FM: fsel 1, 1, 2, 3
+; CHECK-FM: blr
+}
+
+define double @zerocmp2(double %a, double %y, double %z) #0 {
+entry:
+ %cmp = fcmp ogt double %a, 0.000000e+00
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+
+; CHECK: @zerocmp2
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @zerocmp2
+; CHECK-FM: fneg [[REG:[0-9]+]], 1
+; CHECK-FM: fsel 1, [[REG]], 3, 2
+; CHECK-FM: blr
+}
+
+define double @zerocmp3(double %a, double %y, double %z) #0 {
+entry:
+ %cmp = fcmp oeq double %a, 0.000000e+00
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+
+; CHECK: @zerocmp3
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @zerocmp3
+; CHECK-FM: fsel [[REG:[0-9]+]], 1, 2, 3
+; CHECK-FM: fneg [[REG2:[0-9]+]], 1
+; CHECK-FM: fsel 1, [[REG2]], [[REG]], 3
+; CHECK-FM: blr
+}
+
+define double @min1(double %a, double %b) #0 {
+entry:
+ %cmp = fcmp ole double %a, %b
+ %cond = select i1 %cmp, double %a, double %b
+ ret double %cond
+
+; CHECK: @min1
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @min1
+; CHECK-FM: fsub [[REG:[0-9]+]], 2, 1
+; CHECK-FM: fsel 1, [[REG]], 1, 2
+; CHECK-FM: blr
+}
+
+define double @max1(double %a, double %b) #0 {
+entry:
+ %cmp = fcmp oge double %a, %b
+ %cond = select i1 %cmp, double %a, double %b
+ ret double %cond
+
+; CHECK: @max1
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @max1
+; CHECK-FM: fsub [[REG:[0-9]+]], 1, 2
+; CHECK-FM: fsel 1, [[REG]], 1, 2
+; CHECK-FM: blr
+}
+
+define double @cmp1(double %a, double %b, double %y, double %z) #0 {
+entry:
+ %cmp = fcmp ult double %a, %b
+ %z.y = select i1 %cmp, double %z, double %y
+ ret double %z.y
+
+; CHECK: @cmp1
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @cmp1
+; CHECK-FM: fsub [[REG:[0-9]+]], 1, 2
+; CHECK-FM: fsel 1, [[REG]], 3, 4
+; CHECK-FM: blr
+}
+
+define double @cmp2(double %a, double %b, double %y, double %z) #0 {
+entry:
+ %cmp = fcmp ogt double %a, %b
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+
+; CHECK: @cmp2
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @cmp2
+; CHECK-FM: fsub [[REG:[0-9]+]], 2, 1
+; CHECK-FM: fsel 1, [[REG]], 4, 3
+; CHECK-FM: blr
+}
+
+define double @cmp3(double %a, double %b, double %y, double %z) #0 {
+entry:
+ %cmp = fcmp oeq double %a, %b
+ %y.z = select i1 %cmp, double %y, double %z
+ ret double %y.z
+
+; CHECK: @cmp3
+; CHECK-NOT: fsel
+; CHECK: blr
+
+; CHECK-FM: @cmp3
+; CHECK-FM: fsub [[REG:[0-9]+]], 1, 2
+; CHECK-FM: fsel [[REG2:[0-9]+]], [[REG]], 3, 4
+; CHECK-FM: fneg [[REG3:[0-9]+]], [[REG]]
+; CHECK-FM: fsel 1, [[REG3]], [[REG2]], 4
+; CHECK-FM: blr
+}
+
+attributes #0 = { nounwind readnone }
+
diff --git a/test/CodeGen/PowerPC/ifcvt.ll b/test/CodeGen/PowerPC/ifcvt.ll
new file mode 100644
index 000000000000..9c966c95b72d
--- /dev/null
+++ b/test/CodeGen/PowerPC/ifcvt.ll
@@ -0,0 +1,34 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define i32 @test(i32 %a, i32 %b, i32 %c, i32 %d) {
+entry:
+ %sext82 = shl i32 %d, 16
+ %conv29 = ashr exact i32 %sext82, 16
+ %cmp = icmp slt i32 %sext82, 0
+ br i1 %cmp, label %cond.true, label %cond.false
+
+cond.true: ; preds = %sw.epilog
+ %and33 = and i32 %conv29, 32767
+ %sub34 = sub nsw i32 %a, %and33
+ br label %cond.end
+
+cond.false: ; preds = %sw.epilog
+ %add37 = add nsw i32 %conv29, %a
+ br label %cond.end
+
+; CHECK: @test
+; CHECK: add [[REG:[0-9]+]],
+; CHECK: subf [[REG2:[0-9]+]],
+; CHECK: isel {{[0-9]+}}, [[REG]], [[REG2]],
+
+cond.end: ; preds = %cond.false, %cond.true
+ %cond = phi i32 [ %sub34, %cond.true ], [ %add37, %cond.false ]
+ %sext83 = shl i32 %cond, 16
+ %conv39 = ashr exact i32 %sext83, 16
+ %add41 = sub i32 %b, %a
+ %sub43 = add i32 %add41, %conv39
+ ret i32 %sub43
+}
+
diff --git a/test/CodeGen/PowerPC/lbzux.ll b/test/CodeGen/PowerPC/lbzux.ll
index 98951306fd8e..f3158b32f390 100644
--- a/test/CodeGen/PowerPC/lbzux.ll
+++ b/test/CodeGen/PowerPC/lbzux.ll
@@ -4,7 +4,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define fastcc void @allocateSpace(i1 %cond1, i1 %cond2) nounwind {
entry:
- %0 = load i8** undef, align 8, !tbaa !0
+ %0 = load i8** undef, align 8
br i1 undef, label %return, label %lor.lhs.false
lor.lhs.false: ; preds = %entry
@@ -43,7 +43,3 @@ return: ; preds = %if.then45, %lor.lhs
; CHECK: @allocateSpace
; CHECK: lbzux
}
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/lsa.ll b/test/CodeGen/PowerPC/lsa.ll
new file mode 100644
index 000000000000..8a6338ef5a02
--- /dev/null
+++ b/test/CodeGen/PowerPC/lsa.ll
@@ -0,0 +1,43 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define signext i32 @foo() #0 {
+entry:
+ %v = alloca [8200 x i32], align 4
+ %w = alloca [8200 x i32], align 4
+ %q = alloca [8200 x i32], align 4
+ %0 = bitcast [8200 x i32]* %v to i8*
+ call void @llvm.lifetime.start(i64 32800, i8* %0) #0
+ %1 = bitcast [8200 x i32]* %w to i8*
+ call void @llvm.lifetime.start(i64 32800, i8* %1) #0
+ %2 = bitcast [8200 x i32]* %q to i8*
+ call void @llvm.lifetime.start(i64 32800, i8* %2) #0
+ %arraydecay = getelementptr inbounds [8200 x i32]* %q, i64 0, i64 0
+ %arraydecay1 = getelementptr inbounds [8200 x i32]* %v, i64 0, i64 0
+ %arraydecay2 = getelementptr inbounds [8200 x i32]* %w, i64 0, i64 0
+ call void @bar(i32* %arraydecay, i32* %arraydecay1, i32* %arraydecay2) #0
+ %3 = load i32* %arraydecay2, align 4
+ %arrayidx3 = getelementptr inbounds [8200 x i32]* %w, i64 0, i64 1
+ %4 = load i32* %arrayidx3, align 4
+
+; CHECK: @foo
+; CHECK-NOT: lwzx
+; CHECK: lwz {{[0-9]+}}, 4([[REG:[0-9]+]])
+; CHECK: lwz {{[0-9]+}}, 0([[REG]])
+; CHECK: blr
+
+ %add = add nsw i32 %4, %3
+ call void @llvm.lifetime.end(i64 32800, i8* %2) #0
+ call void @llvm.lifetime.end(i64 32800, i8* %1) #0
+ call void @llvm.lifetime.end(i64 32800, i8* %0) #0
+ ret i32 %add
+}
+
+declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+
+declare void @bar(i32*, i32*, i32*)
+
+declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/mcm-obj-2.ll b/test/CodeGen/PowerPC/mcm-obj-2.ll
index 2dd1718ba75a..bc60b3baf2bb 100644
--- a/test/CodeGen/PowerPC/mcm-obj-2.ll
+++ b/test/CodeGen/PowerPC/mcm-obj-2.ll
@@ -1,5 +1,5 @@
; RUN: llc -O1 -mcpu=pwr7 -code-model=medium -filetype=obj %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck %s
+; RUN: llvm-readobj -r | FileCheck %s
; FIXME: When asm-parse is available, could make this an assembly test.
@@ -19,18 +19,11 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO for
; accessing function-scoped variable si.
;
-; CHECK: Relocation 0
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM2:[0-9]+]]
-; CHECK-NEXT: 'r_type', 0x00000032
-; CHECK: Relocation 1
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM2]]
-; CHECK-NEXT: 'r_type', 0x00000030
-; CHECK: Relocation 2
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM2]]
-; CHECK-NEXT: 'r_type', 0x00000030
+; CHECK: Relocations [
+; CHECK: Section (1) .text {
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM2:[^ ]+]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM2]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM2]]
@gi = global i32 5, align 4
@@ -45,18 +38,9 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO for
; accessing file-scope variable gi.
;
-; CHECK: Relocation 3
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM3:[0-9]+]]
-; CHECK-NEXT: 'r_type', 0x00000032
-; CHECK: Relocation 4
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM3]]
-; CHECK-NEXT: 'r_type', 0x00000030
-; CHECK: Relocation 5
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM3]]
-; CHECK-NEXT: 'r_type', 0x00000030
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM3:[^ ]+]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM3]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM3]]
define double @test_double_const() nounwind {
entry:
@@ -66,12 +50,5 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO for
; accessing a constant.
;
-; CHECK: Relocation 6
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM4:[0-9]+]]
-; CHECK-NEXT: 'r_type', 0x00000032
-; CHECK: Relocation 7
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM4]]
-; CHECK-NEXT: 'r_type', 0x00000030
-
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM4:[^ ]+]]
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM4]]
diff --git a/test/CodeGen/PowerPC/mcm-obj.ll b/test/CodeGen/PowerPC/mcm-obj.ll
index 117c3b334346..720c5fb6dd65 100644
--- a/test/CodeGen/PowerPC/mcm-obj.ll
+++ b/test/CodeGen/PowerPC/mcm-obj.ll
@@ -1,7 +1,7 @@
; RUN: llc -O0 -mcpu=pwr7 -code-model=medium -filetype=obj %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=MEDIUM %s
+; RUN: llvm-readobj -r | FileCheck -check-prefix=MEDIUM %s
; RUN: llc -O0 -mcpu=pwr7 -code-model=large -filetype=obj %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck -check-prefix=LARGE %s
+; RUN: llvm-readobj -r | FileCheck -check-prefix=LARGE %s
; FIXME: When asm-parse is available, could make this an assembly test.
@@ -21,25 +21,15 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing external variable ei.
;
-; MEDIUM: '.rela.text'
-; MEDIUM: Relocation 0
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM1:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 1
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM1]]
-; MEDIUM-NEXT: 'r_type', 0x00000040
+; MEDIUM: Relocations [
+; MEDIUM: Section (1) .text {
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM1:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM1]]
;
-; LARGE: '.rela.text'
-; LARGE: Relocation 0
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM1:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 1
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM1]]
-; LARGE-NEXT: 'r_type', 0x00000040
+; LARGE: Relocations [
+; LARGE: Section (1) .text {
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM1:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM1]]
@test_fn_static.si = internal global i32 0, align 4
@@ -54,26 +44,14 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO for
; accessing function-scoped variable si.
;
-; MEDIUM: Relocation 2
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM2:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 3
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM2]]
-; MEDIUM-NEXT: 'r_type', 0x00000030
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM2:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM2]]
;
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing function-scoped variable si.
;
-; LARGE: Relocation 2
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM2:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 3
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM2]]
-; LARGE-NEXT: 'r_type', 0x00000040
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM2:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM2]]
@gi = global i32 5, align 4
@@ -88,26 +66,14 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO for
; accessing file-scope variable gi.
;
-; MEDIUM: Relocation 4
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM3:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 5
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM3]]
-; MEDIUM-NEXT: 'r_type', 0x00000030
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM3:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM3]]
;
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing file-scope variable gi.
;
-; LARGE: Relocation 4
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM3:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 5
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM3]]
-; LARGE-NEXT: 'r_type', 0x00000040
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM3:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM3]]
define double @test_double_const() nounwind {
entry:
@@ -117,26 +83,14 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO for
; accessing a constant.
;
-; MEDIUM: Relocation 6
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM4:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 7
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM4]]
-; MEDIUM-NEXT: 'r_type', 0x00000030
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM4:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM4]]
;
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing a constant.
;
-; LARGE: Relocation 6
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM4:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 7
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM4]]
-; LARGE-NEXT: 'r_type', 0x00000040
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM4:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM4]]
define signext i32 @test_jump_table(i32 signext %i) nounwind {
entry:
@@ -185,23 +139,11 @@ sw.epilog: ; preds = %sw.bb3, %sw.default
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing a jump table address.
;
-; MEDIUM: Relocation 8
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM5:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 9
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM5]]
-; MEDIUM-NEXT: 'r_type', 0x00000040
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM5:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM5]]
;
-; LARGE: Relocation 8
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM5:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 9
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM5]]
-; LARGE-NEXT: 'r_type', 0x00000040
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM5:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM5]]
@ti = common global i32 0, align 4
@@ -216,23 +158,11 @@ entry:
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing tentatively declared variable ti.
;
-; MEDIUM: Relocation 10
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM6:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 11
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM6]]
-; MEDIUM-NEXT: 'r_type', 0x00000040
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
;
-; LARGE: Relocation 10
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM6:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 11
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM6]]
-; LARGE-NEXT: 'r_type', 0x00000040
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
define i8* @test_fnaddr() nounwind {
entry:
@@ -248,21 +178,8 @@ declare signext i32 @foo(i32 signext)
; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
; accessing function address foo.
;
-; MEDIUM: Relocation 12
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM7:[0-9]+]]
-; MEDIUM-NEXT: 'r_type', 0x00000032
-; MEDIUM: Relocation 13
-; MEDIUM-NEXT: 'r_offset'
-; MEDIUM-NEXT: 'r_sym', 0x[[SYM7]]
-; MEDIUM-NEXT: 'r_type', 0x00000040
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM7:[^ ]+]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM7]]
;
-; LARGE: Relocation 12
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM7:[0-9]+]]
-; LARGE-NEXT: 'r_type', 0x00000032
-; LARGE: Relocation 13
-; LARGE-NEXT: 'r_offset'
-; LARGE-NEXT: 'r_sym', 0x[[SYM7]]
-; LARGE-NEXT: 'r_type', 0x00000040
-
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM7:[^ ]+]]
+; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM7]]
diff --git a/test/CodeGen/PowerPC/optcmp.ll b/test/CodeGen/PowerPC/optcmp.ll
new file mode 100644
index 000000000000..523f329303bf
--- /dev/null
+++ b/test/CodeGen/PowerPC/optcmp.ll
@@ -0,0 +1,143 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -disable-ppc-cmp-opt=0 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+define signext i32 @foo(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i32 %a, %b
+ store i32 %sub, i32* %c, align 4, !tbaa !0
+ %cmp = icmp sgt i32 %a, %b
+ %cond = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %cond
+
+; CHECK: @foo
+; CHECK-NOT: subf.
+}
+
+define signext i32 @foo2(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
+entry:
+ %shl = shl i32 %a, %b
+ store i32 %shl, i32* %c, align 4, !tbaa !0
+ %cmp = icmp sgt i32 %shl, 0
+ %conv = zext i1 %cmp to i32
+ ret i32 %conv
+
+; CHECK: @foo2
+; CHECK-NOT: slw.
+}
+
+define i64 @fool(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %a, %b
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp sgt i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @fool
+; CHECK: subf. [[REG:[0-9]+]], 4, 3
+; CHECK: isel 3, 3, 4, 1
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foolb(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %a, %b
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp sle i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foolb
+; CHECK: subf. [[REG:[0-9]+]], 4, 3
+; CHECK: isel 3, 4, 3, 1
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %b, %a
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp sgt i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foolc
+; CHECK: subf. [[REG:[0-9]+]], 3, 4
+; CHECK: isel 3, 3, 4, 0
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %b, %a
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp eq i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foold
+; CHECK: subf. [[REG:[0-9]+]], 3, 4
+; CHECK: isel 3, 3, 4, 2
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %a, %b
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp eq i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foold2
+; CHECK: subf. [[REG:[0-9]+]], 4, 3
+; CHECK: isel 3, 3, 4, 2
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %shl = shl i64 %a, %b
+ store i64 %shl, i64* %c, align 8, !tbaa !3
+ %cmp = icmp sgt i64 %shl, 0
+ %conv1 = zext i1 %cmp to i64
+ ret i64 %conv1
+
+; CHECK: @foo2l
+; CHECK: sld. 4, 3, 4
+; CHECK: std 4, 0(5)
+}
+
+define double @food(double %a, double %b, double* nocapture %c) #0 {
+entry:
+ %sub = fsub double %a, %b
+ store double %sub, double* %c, align 8, !tbaa !3
+ %cmp = fcmp ogt double %a, %b
+ %cond = select i1 %cmp, double %a, double %b
+ ret double %cond
+
+; CHECK: @food
+; CHECK-NOT: fsub. 0, 1, 2
+; CHECK: stfd 0, 0(5)
+}
+
+define float @foof(float %a, float %b, float* nocapture %c) #0 {
+entry:
+ %sub = fsub float %a, %b
+ store float %sub, float* %c, align 4, !tbaa !3
+ %cmp = fcmp ogt float %a, %b
+ %cond = select i1 %cmp, float %a, float %b
+ ret float %cond
+
+; CHECK: @foof
+; CHECK-NOT: fsubs. 0, 1, 2
+; CHECK: stfs 0, 0(5)
+}
+
+!0 = metadata !{metadata !"int", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"long", metadata !1}
+!4 = metadata !{metadata !"any pointer", metadata !1}
+
diff --git a/test/CodeGen/PowerPC/pr15359.ll b/test/CodeGen/PowerPC/pr15359.ll
index 12fa3e5ffbdd..df02dfcb5c1e 100644
--- a/test/CodeGen/PowerPC/pr15359.ll
+++ b/test/CodeGen/PowerPC/pr15359.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -mcpu=pwr7 -filetype=obj %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck %s
+; RUN: llvm-readobj -t | FileCheck %s
target datalayout = "E-p:64:64:64-S0-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -14,7 +14,9 @@ entry:
; Verify that nextIdx has symbol type TLS.
;
-; CHECK: '.symtab'
-; CHECK: 'nextIdx'
-; CHECK: 'st_type', 0x6
-
+; CHECK: Symbol {
+; CHECK: Name: nextIdx
+; CHECK-NEXT: Value:
+; CHECK-NEXT: Size:
+; CHECK-NEXT: Binding:
+; CHECK-NEXT: Type: TLS
diff --git a/test/CodeGen/PowerPC/rounding-ops.ll b/test/CodeGen/PowerPC/rounding-ops.ll
index b210a6bda8bf..2b5e1c9a289b 100644
--- a/test/CodeGen/PowerPC/rounding-ops.ll
+++ b/test/CodeGen/PowerPC/rounding-ops.ll
@@ -107,9 +107,10 @@ define double @test10(double %x) nounwind {
declare double @trunc(double) nounwind readnone
-define float @test11(float %x) nounwind {
+define void @test11(float %x, float* %y) nounwind {
%call = tail call float @rintf(float %x) nounwind readnone
- ret float %call
+ store float %call, float* %y
+ ret void
; CHECK: test11:
; CHECK-NOT: frin
@@ -125,9 +126,10 @@ define float @test11(float %x) nounwind {
declare float @rintf(float) nounwind readnone
-define double @test12(double %x) nounwind {
+define void @test12(double %x, double* %y) nounwind {
%call = tail call double @rint(double %x) nounwind readnone
- ret double %call
+ store double %call, double* %y
+ ret void
; CHECK: test12:
; CHECK-NOT: frin
diff --git a/test/CodeGen/PowerPC/s000-alias-misched.ll b/test/CodeGen/PowerPC/s000-alias-misched.ll
index d03ee8738eea..3570a11b6271 100644
--- a/test/CodeGen/PowerPC/s000-alias-misched.ll
+++ b/test/CodeGen/PowerPC/s000-alias-misched.ll
@@ -37,34 +37,34 @@ for.body4: ; preds = %for.body4, %for.con
%arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv
%arrayidx6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv
%0 = bitcast double* %arrayidx to <1 x double>*
- %1 = load <1 x double>* %0, align 32, !tbaa !0
+ %1 = load <1 x double>* %0, align 32
%add = fadd <1 x double> %1, <double 1.000000e+00>
%2 = bitcast double* %arrayidx6 to <1 x double>*
- store <1 x double> %add, <1 x double>* %2, align 32, !tbaa !0
+ store <1 x double> %add, <1 x double>* %2, align 32
%indvars.iv.next.322 = or i64 %indvars.iv, 4
%arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
%arrayidx6.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
%3 = bitcast double* %arrayidx.4 to <1 x double>*
- %4 = load <1 x double>* %3, align 32, !tbaa !0
+ %4 = load <1 x double>* %3, align 32
%add.4 = fadd <1 x double> %4, <double 1.000000e+00>
%5 = bitcast double* %arrayidx6.4 to <1 x double>*
- store <1 x double> %add.4, <1 x double>* %5, align 32, !tbaa !0
+ store <1 x double> %add.4, <1 x double>* %5, align 32
%indvars.iv.next.726 = or i64 %indvars.iv, 8
%arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
%arrayidx6.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
%6 = bitcast double* %arrayidx.8 to <1 x double>*
- %7 = load <1 x double>* %6, align 32, !tbaa !0
+ %7 = load <1 x double>* %6, align 32
%add.8 = fadd <1 x double> %7, <double 1.000000e+00>
%8 = bitcast double* %arrayidx6.8 to <1 x double>*
- store <1 x double> %add.8, <1 x double>* %8, align 32, !tbaa !0
+ store <1 x double> %add.8, <1 x double>* %8, align 32
%indvars.iv.next.1130 = or i64 %indvars.iv, 12
%arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
%arrayidx6.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
%9 = bitcast double* %arrayidx.12 to <1 x double>*
- %10 = load <1 x double>* %9, align 32, !tbaa !0
+ %10 = load <1 x double>* %9, align 32
%add.12 = fadd <1 x double> %10, <double 1.000000e+00>
%11 = bitcast double* %arrayidx6.12 to <1 x double>*
- store <1 x double> %add.12, <1 x double>* %11, align 32, !tbaa !0
+ store <1 x double> %add.12, <1 x double>* %11, align 32
%indvars.iv.next.15 = add i64 %indvars.iv, 16
%lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
%exitcond.15 = icmp eq i32 %lftr.wideiv.15, 16000
@@ -95,7 +95,3 @@ for.end10: ; preds = %for.end
declare i64 @clock() nounwind
declare signext i32 @dummy(double*, double*, double*, double*, double*, [256 x double]*, [256 x double]*, [256 x double]*, double)
-
-!0 = metadata !{metadata !"double", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/stubs.ll b/test/CodeGen/PowerPC/stubs.ll
index cfcc50b7a876..694f208198a1 100644
--- a/test/CodeGen/PowerPC/stubs.ll
+++ b/test/CodeGen/PowerPC/stubs.ll
@@ -6,16 +6,16 @@ entry:
}
; CHECK: _test1:
-; CHECK: bl ___floatditf$stub
+; CHECK: bl L___floatditf$stub
; CHECK: .section __TEXT,__symbol_stub1,symbol_stubs,pure_instructions,16
-; CHECK: ___floatditf$stub:
+; CHECK: L___floatditf$stub:
; CHECK: .indirect_symbol ___floatditf
-; CHECK: lis r11, ha16(___floatditf$lazy_ptr)
-; CHECK: lwzu r12, lo16(___floatditf$lazy_ptr)(r11)
+; CHECK: lis r11, ha16(L___floatditf$lazy_ptr)
+; CHECK: lwzu r12, lo16(L___floatditf$lazy_ptr)(r11)
; CHECK: mtctr r12
; CHECK: bctr
; CHECK: .section __DATA,__la_symbol_ptr,lazy_symbol_pointers
-; CHECK: ___floatditf$lazy_ptr:
+; CHECK: L___floatditf$lazy_ptr:
; CHECK: .indirect_symbol ___floatditf
; CHECK: .long dyld_stub_binding_helper
diff --git a/test/CodeGen/PowerPC/stwu-gta.ll b/test/CodeGen/PowerPC/stwu-gta.ll
index 4febe7e2fe7e..980c1d502853 100644
--- a/test/CodeGen/PowerPC/stwu-gta.ll
+++ b/test/CodeGen/PowerPC/stwu-gta.ll
@@ -8,15 +8,11 @@ target triple = "powerpc-unknown-linux"
define void @_GLOBAL__I_a() nounwind section ".text.startup" {
entry:
- store i32 5, i32* getelementptr inbounds (%class.Two.0.5* @foo, i32 0, i32 0), align 4, !tbaa !0
- store i32 6, i32* getelementptr inbounds (%class.Two.0.5* @foo, i32 0, i32 1), align 4, !tbaa !0
+ store i32 5, i32* getelementptr inbounds (%class.Two.0.5* @foo, i32 0, i32 0), align 4
+ store i32 6, i32* getelementptr inbounds (%class.Two.0.5* @foo, i32 0, i32 1), align 4
ret void
}
; CHECK: @_GLOBAL__I_a
; CHECK-NOT: stwux
; CHECK: stwu
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/stwu8.ll b/test/CodeGen/PowerPC/stwu8.ll
index e0bd04345439..b220af2df4a4 100644
--- a/test/CodeGen/PowerPC/stwu8.ll
+++ b/test/CodeGen/PowerPC/stwu8.ll
@@ -14,7 +14,7 @@ entry:
%_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
%0 = bitcast %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 4, i1 false) nounwind
- store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8, !tbaa !0
+ store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8
unreachable
}
@@ -22,7 +22,3 @@ entry:
; CHECK: stwu
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/tls-gd-obj.ll b/test/CodeGen/PowerPC/tls-gd-obj.ll
index 00b537d5325b..ffc0db0d14cb 100644
--- a/test/CodeGen/PowerPC/tls-gd-obj.ll
+++ b/test/CodeGen/PowerPC/tls-gd-obj.ll
@@ -1,5 +1,5 @@
; RUN: llc -mcpu=pwr7 -O0 -filetype=obj -relocation-model=pic %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck %s
+; RUN: llvm-readobj -r | FileCheck %s
; Test correct relocation generation for thread-local storage using
; the general dynamic model and integrated assembly.
@@ -21,21 +21,11 @@ entry:
; and R_PPC64_TLSGD for accessing external variable a, and R_PPC64_REL24
; for the call to __tls_get_addr.
;
-; CHECK: '.rela.text'
-; CHECK: Relocation 0
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1:[0-9a-f]+]]
-; CHECK-NEXT: 'r_type', 0x00000052
-; CHECK: Relocation 1
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x00000050
-; CHECK: Relocation 2
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x0000006b
-; CHECK: Relocation 3
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x{{[0-9a-f]+}}
-; CHECK-NEXT: 'r_type', 0x0000000a
-
+; CHECK: Relocations [
+; CHECK: Section (1) .text {
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_GOT_TLSGD16_HA a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_GOT_TLSGD16_LO a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TLSGD a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_REL24 __tls_get_addr
+; CHECK: }
+; CHECK: ]
diff --git a/test/CodeGen/PowerPC/tls-ie-obj.ll b/test/CodeGen/PowerPC/tls-ie-obj.ll
index 3600cc52ba54..0f7a35295234 100644
--- a/test/CodeGen/PowerPC/tls-ie-obj.ll
+++ b/test/CodeGen/PowerPC/tls-ie-obj.ll
@@ -1,5 +1,5 @@
; RUN: llc -mcpu=pwr7 -O0 -filetype=obj %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck %s
+; RUN: llvm-readobj -r | FileCheck %s
; Test correct relocation generation for thread-local storage
; using the initial-exec model and integrated assembly.
@@ -20,17 +20,10 @@ entry:
; Verify generation of R_PPC64_GOT_TPREL16_DS and R_PPC64_TLS for
; accessing external variable a.
;
-; CHECK: '.rela.text'
-; CHECK: Relocation 0
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1:[0-9a-f]+]]
-; CHECK-NEXT: 'r_type', 0x0000005a
-; CHECK: Relocation 1
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x00000058
-; CHECK: Relocation 2
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x00000043
-
+; CHECK: Relocations [
+; CHECK: Section (1) .text {
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_GOT_TPREL16_HA a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_GOT_TPREL16_LO_DS a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TLS a
+; CHECK: }
+; CHECK: ]
diff --git a/test/CodeGen/PowerPC/tls-ld-obj.ll b/test/CodeGen/PowerPC/tls-ld-obj.ll
index c521ae405f46..29ee87684552 100644
--- a/test/CodeGen/PowerPC/tls-ld-obj.ll
+++ b/test/CodeGen/PowerPC/tls-ld-obj.ll
@@ -1,5 +1,5 @@
; RUN: llc -mcpu=pwr7 -O0 -filetype=obj -relocation-model=pic %s -o - | \
-; RUN: elf-dump --dump-section-data | FileCheck %s
+; RUN: llvm-readobj -r | FileCheck %s
; Test correct relocation generation for thread-local storage using
; the local dynamic model.
@@ -22,29 +22,13 @@ entry:
; accessing external variable a, and R_PPC64_REL24 for the call to
; __tls_get_addr.
;
-; CHECK: '.rela.text'
-; CHECK: Relocation 0
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1:[0-9a-f]+]]
-; CHECK-NEXT: 'r_type', 0x00000056
-; CHECK: Relocation 1
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x00000054
-; CHECK: Relocation 2
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x0000006c
-; CHECK: Relocation 3
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x{{[0-9a-f]+}}
-; CHECK-NEXT: 'r_type', 0x0000000a
-; CHECK: Relocation 4
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x0000004d
-; CHECK: Relocation 5
-; CHECK-NEXT: 'r_offset'
-; CHECK-NEXT: 'r_sym', 0x[[SYM1]]
-; CHECK-NEXT: 'r_type', 0x0000004b
-
+; CHECK: Relocations [
+; CHECK: Section (1) .text {
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_GOT_TLSLD16_HA a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_GOT_TLSLD16_LO a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_TLSLD a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_REL24 __tls_get_addr
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_DTPREL16_HA a
+; CHECK: 0x{{[0-9,A-F]+}} R_PPC64_DTPREL16_LO a
+; CHECK: }
+; CHECK: ]
diff --git a/test/CodeGen/R600/README b/test/CodeGen/R600/README
new file mode 100644
index 000000000000..96998bba28f2
--- /dev/null
+++ b/test/CodeGen/R600/README
@@ -0,0 +1,21 @@
++==============================================================================+
+| How to organize the lit tests |
++==============================================================================+
+
+- If you write a test for matching a single DAG opcode or intrinsic, it should
+ go in a file called {opcode_name,intrinsic_name}.ll (e.g. fadd.ll)
+
+- If you write a test that matches several DAG opcodes and checks for a single
+ ISA instruction, then that test should go in a file called {ISA_name}.ll (e.g.
+ bfi_int.ll
+
+- For all other tests, use your best judgement for organizing tests and naming
+ the files.
+
++==============================================================================+
+| Naming conventions |
++==============================================================================+
+
+- Use dash '-' and not underscore '_' to separate words in file names, unless
+ the file is named after a DAG opcode or ISA instruction that has an
+ underscore '_' in its name.
diff --git a/test/CodeGen/R600/add.v4i32.ll b/test/CodeGen/R600/add.ll
index ac4a87417bde..185998b26095 100644
--- a/test/CodeGen/R600/add.v4i32.ll
+++ b/test/CodeGen/R600/add.ll
@@ -1,9 +1,9 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: ADD_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/alu-split.ll b/test/CodeGen/R600/alu-split.ll
index afefcd9f78b0..48496f6febf6 100644
--- a/test/CodeGen/R600/alu-split.ll
+++ b/test/CodeGen/R600/alu-split.ll
@@ -4,6 +4,7 @@
;CHECK: ALU
;CHECK: ALU
;CHECK-NOT: ALU
+;CHECK: CF_END
define void @main() #0 {
main_body:
diff --git a/test/CodeGen/R600/and.v4i32.ll b/test/CodeGen/R600/and.ll
index 662085e2d673..166af2d8d128 100644
--- a/test/CodeGen/R600/and.v4i32.ll
+++ b/test/CodeGen/R600/and.ll
@@ -1,9 +1,9 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: AND_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: AND_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: AND_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: AND_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/bfe_uint.ll b/test/CodeGen/R600/bfe_uint.ll
new file mode 100644
index 000000000000..92570c315299
--- /dev/null
+++ b/test/CodeGen/R600/bfe_uint.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @bfe_def
+; CHECK: BFE_UINT
+define void @bfe_def(i32 addrspace(1)* %out, i32 %x) {
+entry:
+ %0 = lshr i32 %x, 5
+ %1 = and i32 %0, 15 ; 0xf
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; This program could be implemented using a BFE_UINT instruction, however
+; since the lshr constant + number of bits in the mask is >= 32, it can also be
+; implmented with a LSHR instruction, which is better, because LSHR has less
+; operands and requires less constants.
+
+; CHECK: @bfe_shift
+; CHECK-NOT: BFE_UINT
+define void @bfe_shift(i32 addrspace(1)* %out, i32 %x) {
+entry:
+ %0 = lshr i32 %x, 16
+ %1 = and i32 %0, 65535 ; 0xffff
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/bfi_int.ll b/test/CodeGen/R600/bfi_int.ll
new file mode 100644
index 000000000000..4244dcf3c77b
--- /dev/null
+++ b/test/CodeGen/R600/bfi_int.ll
@@ -0,0 +1,52 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI-CHECK %s
+
+; BFI_INT Definition pattern from ISA docs
+; (y & x) | (z & ~x)
+;
+; R600-CHECK: @bfi_def
+; R600-CHECK: BFI_INT
+; SI-CHECK: @bfi_def
+; SI-CHECK: V_BFI_B32
+define void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
+entry:
+ %0 = xor i32 %x, -1
+ %1 = and i32 %z, %0
+ %2 = and i32 %y, %x
+ %3 = or i32 %1, %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
+
+; SHA-256 Ch function
+; z ^ (x & (y ^ z))
+; R600-CHECK: @bfi_sha256_ch
+; R600-CHECK: BFI_INT
+; SI-CHECK: @bfi_sha256_ch
+; SI-CHECK: V_BFI_B32
+define void @bfi_sha256_ch(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
+entry:
+ %0 = xor i32 %y, %z
+ %1 = and i32 %x, %0
+ %2 = xor i32 %z, %1
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+; SHA-256 Ma function
+; ((x & z) | (y & (x | z)))
+; R600-CHECK: @bfi_sha256_ma
+; R600-CHECK: XOR_INT * [[DST:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; R600-CHECK: BFI_INT * {{T[0-9]+\.[XYZW]}}, {{[[DST]]|PV.x}}, {{T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; SI-CHECK: V_XOR_B32_e32 [[DST:VGPR[0-9]+]], {{VGPR[0-9]+, VGPR[0-9]+}}
+; SI-CHECK: V_BFI_B32 {{VGPR[0-9]+}}, [[DST]], {{VGPR[0-9]+, VGPR[0-9]+}}
+
+define void @bfi_sha256_ma(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
+entry:
+ %0 = and i32 %x, %z
+ %1 = or i32 %x, %z
+ %2 = and i32 %y, %1
+ %3 = or i32 %0, %2
+ store i32 %3, i32 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/call_fs.ll b/test/CodeGen/R600/call_fs.ll
new file mode 100644
index 000000000000..e152bf6d559d
--- /dev/null
+++ b/test/CodeGen/R600/call_fs.ll
@@ -0,0 +1,15 @@
+
+; RUN: llc < %s -march=r600 -mcpu=redwood -show-mc-encoding -o - | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=rv710 -show-mc-encoding -o - | FileCheck --check-prefix=R600-CHECK %s
+
+; EG-CHECK: @call_fs
+; EG-CHECK: CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x84]
+; R600-CHECK: @call_fs
+; R600-CHECK:CALL_FS ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x89]
+
+
+define void @call_fs() #0 {
+ ret void
+}
+
+attributes #0 = { "ShaderType"="1" } ; Vertex Shader
diff --git a/test/CodeGen/R600/cf_end.ll b/test/CodeGen/R600/cf_end.ll
new file mode 100644
index 000000000000..138004df6df9
--- /dev/null
+++ b/test/CodeGen/R600/cf_end.ll
@@ -0,0 +1,9 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood --show-mc-encoding | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=caicos --show-mc-encoding | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=cayman --show-mc-encoding | FileCheck --check-prefix=CM-CHECK %s
+
+; EG-CHECK: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x20,0x80]
+; CM-CHECK: CF_END ; encoding: [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x88]
+define void @eop() {
+ ret void
+}
diff --git a/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll b/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
index fd958b365961..6607c1218b56 100644
--- a/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
+++ b/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
@@ -8,7 +8,7 @@
; CHECK: @sint
-; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
@@ -22,7 +22,7 @@ entry:
}
;CHECK: @uint
-;CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
diff --git a/test/CodeGen/R600/disconnected-predset-break-bug.ll b/test/CodeGen/R600/disconnected-predset-break-bug.ll
index 09baee7a1dcd..012c17b8fe4b 100644
--- a/test/CodeGen/R600/disconnected-predset-break-bug.ll
+++ b/test/CodeGen/R600/disconnected-predset-break-bug.ll
@@ -6,7 +6,7 @@
; CHECK: @loop_ge
; CHECK: LOOP_START_DX10
-; CHECK: PRED_SET
+; CHECK: ALU_PUSH_BEFORE
; CHECK-NEXT: JUMP
; CHECK-NEXT: LOOP_BREAK
define void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) nounwind {
diff --git a/test/CodeGen/R600/elf.ll b/test/CodeGen/R600/elf.ll
new file mode 100644
index 000000000000..f460f13d53e0
--- /dev/null
+++ b/test/CodeGen/R600/elf.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=r600 -mcpu=SI -filetype=obj | llvm-readobj -s - | FileCheck --check-prefix=ELF-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=SI -o - | FileCheck --check-prefix=CONFIG-CHECK %s
+
+; ELF-CHECK: Format: ELF32
+; ELF-CHECK: Name: .AMDGPU.config
+; ELF-CHECK: Type: SHT_PROGBITS
+
+; CONFIG-CHECK: .section .AMDGPU.config
+; CONFIG-CHECK-NEXT: .long 45096
+; CONFIG-CHECK-NEXT: .long 0
+define void @test(i32 %p) #0 {
+ %i = add i32 %p, 2
+ %r = bitcast i32 %i to float
+ call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
+ ret void
+}
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" } ; Pixel Shader
diff --git a/test/CodeGen/R600/elf.r600.ll b/test/CodeGen/R600/elf.r600.ll
new file mode 100644
index 000000000000..0590efb0915f
--- /dev/null
+++ b/test/CodeGen/R600/elf.r600.ll
@@ -0,0 +1,17 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood -filetype=obj | llvm-readobj -s - | FileCheck --check-prefix=ELF-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=redwood -o - | FileCheck --check-prefix=CONFIG-CHECK %s
+
+; ELF-CHECK: Format: ELF32
+; ELF-CHECK: Name: .AMDGPU.config
+
+; CONFIG-CHECK: .section .AMDGPU.config
+; CONFIG-CHECK-NEXT: .long 166100
+; CONFIG-CHECK-NEXT: .long 258
+; CONFIG-CHECK-NEXT: .long 165900
+; CONFIG-CHECK-NEXT: .long 0
+define void @test(float addrspace(1)* %out, i32 %p) {
+ %i = add i32 %p, 2
+ %r = bitcast i32 %i to float
+ store float %r, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fabs.ll b/test/CodeGen/R600/fabs.ll
index 0407533eaa5f..85f2882289fa 100644
--- a/test/CodeGen/R600/fabs.ll
+++ b/test/CodeGen/R600/fabs.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: MOV T{{[0-9]+\.[XYZW], \|T[0-9]+\.[XYZW]\|}}
+;CHECK: MOV * T{{[0-9]+\.[XYZW], \|T[0-9]+\.[XYZW]\|}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll
index d7d1b6572c41..9a672329e75c 100644
--- a/test/CodeGen/R600/fadd.ll
+++ b/test/CodeGen/R600/fadd.ll
@@ -1,8 +1,9 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: @fadd_f32
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @test() {
+define void @fadd_f32() {
%r0 = call float @llvm.R600.load.input(i32 0)
%r1 = call float @llvm.R600.load.input(i32 1)
%r2 = fadd float %r0, %r1
@@ -14,3 +15,17 @@ declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
+; CHECK: @fadd_v4f32
+; CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %a = load <4 x float> addrspace(1) * %in
+ %b = load <4 x float> addrspace(1) * %b_ptr
+ %result = fadd <4 x float> %a, %b
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fadd.v4f32.ll b/test/CodeGen/R600/fadd.v4f32.ll
deleted file mode 100644
index 85dbfd52cbb3..000000000000
--- a/test/CodeGen/R600/fadd.v4f32.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
- %a = load <4 x float> addrspace(1) * %in
- %b = load <4 x float> addrspace(1) * %b_ptr
- %result = fadd <4 x float> %a, %b
- store <4 x float> %result, <4 x float> addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/fcmp-cnd.ll b/test/CodeGen/R600/fcmp-cnd.ll
index a94cfb5cf2fe..7373a214790e 100644
--- a/test/CodeGen/R600/fcmp-cnd.ll
+++ b/test/CodeGen/R600/fcmp-cnd.ll
@@ -2,7 +2,7 @@
;Not checking arguments 2 and 3 to CNDE, because they may change between
;registers and literal.x depending on what the optimizer does.
-;CHECK: CNDE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: CNDE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(i32 addrspace(1)* %out, float addrspace(1)* %in) {
entry:
diff --git a/test/CodeGen/R600/fcmp.ll b/test/CodeGen/R600/fcmp.ll
index 37f621d23958..dc3a779dd609 100644
--- a/test/CodeGen/R600/fcmp.ll
+++ b/test/CodeGen/R600/fcmp.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
; CHECK: @fcmp_sext
-; CHECK: SETE_DX10 T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @fcmp_sext(i32 addrspace(1)* %out, float addrspace(1)* %in) {
entry:
@@ -19,7 +19,8 @@ entry:
; SET* + FP_TO_SINT
; CHECK: @fcmp_br
-; CHECK: SET{{[N]*}}E_DX10 T{{[0-9]+\.[XYZW], [a-zA-Z0-9, .]+}}(5.0
+; CHECK: SET{{[N]*}}E_DX10 * T{{[0-9]+\.[XYZW],}}
+; CHECK-NEXT {{[0-9]+(5.0}}
define void @fcmp_br(i32 addrspace(1)* %out, float %in) {
entry:
diff --git a/test/CodeGen/R600/fdiv.v4f32.ll b/test/CodeGen/R600/fdiv.ll
index 79e677f541f5..2e68e36be4d8 100644
--- a/test/CodeGen/R600/fdiv.v4f32.ll
+++ b/test/CodeGen/R600/fdiv.ll
@@ -1,13 +1,13 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/floor.ll b/test/CodeGen/R600/floor.ll
index 845330f28419..877d69a65b43 100644
--- a/test/CodeGen/R600/floor.ll
+++ b/test/CodeGen/R600/floor.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: FLOOR T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: FLOOR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/fmad.ll b/test/CodeGen/R600/fmad.ll
index a3d4d0ff0db7..62001edc3aa5 100644
--- a/test/CodeGen/R600/fmad.ll
+++ b/test/CodeGen/R600/fmad.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: MULADD_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MULADD_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/fmax.ll b/test/CodeGen/R600/fmax.ll
index 3708f0b9eed2..8b704e56484b 100644
--- a/test/CodeGen/R600/fmax.ll
+++ b/test/CodeGen/R600/fmax.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: MAX T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MAX * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/fmin.ll b/test/CodeGen/R600/fmin.ll
index 19d59ab3061e..5e34b7c8902e 100644
--- a/test/CodeGen/R600/fmin.ll
+++ b/test/CodeGen/R600/fmin.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: MIN T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MIN * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/fmul.ll b/test/CodeGen/R600/fmul.ll
index eb1d523c0bb4..c29294632dc0 100644
--- a/test/CodeGen/R600/fmul.ll
+++ b/test/CodeGen/R600/fmul.ll
@@ -1,8 +1,9 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: @fmul_f32
+; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-define void @test() {
+define void @fmul_f32() {
%r0 = call float @llvm.R600.load.input(i32 0)
%r1 = call float @llvm.R600.load.input(i32 1)
%r2 = fmul float %r0, %r1
@@ -14,3 +15,17 @@ declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
+; CHECK: @fmul_v4f32
+; CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %a = load <4 x float> addrspace(1) * %in
+ %b = load <4 x float> addrspace(1) * %b_ptr
+ %result = fmul <4 x float> %a, %b
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fmul.v4f32.ll b/test/CodeGen/R600/fmul.v4f32.ll
index 6d44a0c5c782..74a58f74026a 100644
--- a/test/CodeGen/R600/fmul.v4f32.ll
+++ b/test/CodeGen/R600/fmul.v4f32.ll
@@ -1,9 +1,9 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/fp_to_sint.ll b/test/CodeGen/R600/fp_to_sint.ll
new file mode 100644
index 000000000000..f5716e1d47e6
--- /dev/null
+++ b/test/CodeGen/R600/fp_to_sint.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @fp_to_sint_v4i32
+; CHECK: FLT_TO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: FLT_TO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: FLT_TO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: FLT_TO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @fp_to_sint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+ %value = load <4 x float> addrspace(1) * %in
+ %result = fptosi <4 x float> %value to <4 x i32>
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fp_to_uint.ll b/test/CodeGen/R600/fp_to_uint.ll
new file mode 100644
index 000000000000..1c3c0c62cf50
--- /dev/null
+++ b/test/CodeGen/R600/fp_to_uint.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @fp_to_uint_v4i32
+; CHECK: FLT_TO_UINT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: FLT_TO_UINT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: FLT_TO_UINT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: FLT_TO_UINT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @fp_to_uint_v4i32(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+ %value = load <4 x float> addrspace(1) * %in
+ %result = fptoui <4 x float> %value to <4 x i32>
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll
index 591aa52676a4..f784cde46cd2 100644
--- a/test/CodeGen/R600/fsub.ll
+++ b/test/CodeGen/R600/fsub.ll
@@ -1,8 +1,9 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
+; CHECK: @fsub_f32
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
-define void @test() {
+define void @fsub_f32() {
%r0 = call float @llvm.R600.load.input(i32 0)
%r1 = call float @llvm.R600.load.input(i32 1)
%r2 = fsub float %r0, %r1
@@ -14,3 +15,17 @@ declare float @llvm.R600.load.input(i32) readnone
declare void @llvm.AMDGPU.store.output(float, i32)
+; CHECK: @fsub_v4f32
+; CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %a = load <4 x float> addrspace(1) * %in
+ %b = load <4 x float> addrspace(1) * %b_ptr
+ %result = fsub <4 x float> %a, %b
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/fsub.v4f32.ll b/test/CodeGen/R600/fsub.v4f32.ll
deleted file mode 100644
index 612a57e4b609..000000000000
--- a/test/CodeGen/R600/fsub.v4f32.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK: ADD T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
- %a = load <4 x float> addrspace(1) * %in
- %b = load <4 x float> addrspace(1) * %b_ptr
- %result = fsub <4 x float> %a, %b
- store <4 x float> %result, <4 x float> addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/i8_to_double_to_float.ll b/test/CodeGen/R600/i8-to-double-to-float.ll
index 39f33227fa4b..604746627666 100644
--- a/test/CodeGen/R600/i8_to_double_to_float.ll
+++ b/test/CodeGen/R600/i8-to-double-to-float.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test(float addrspace(1)* %out, i8 addrspace(1)* %in) {
%1 = load i8 addrspace(1)* %in
diff --git a/test/CodeGen/R600/icmp-select-sete-reverse-args.ll b/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
index 71705a64f50e..e3005fe82da1 100644
--- a/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
+++ b/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
@@ -3,7 +3,7 @@
;Test that a select with reversed True/False values is correctly lowered
;to a SETNE_INT. There should only be one SETNE_INT instruction.
-;CHECK: SETNE_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: SETNE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;CHECK-NOT: SETNE_INT
define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/imm.ll b/test/CodeGen/R600/imm.ll
new file mode 100644
index 000000000000..979efb00e7bd
--- /dev/null
+++ b/test/CodeGen/R600/imm.ll
@@ -0,0 +1,23 @@
+; RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
+
+; Use a 64-bit value with lo bits that can be represented as an inline constant
+; CHECK: @i64_imm_inline_lo
+; CHECK: S_MOV_B32 [[LO:SGPR[0-9]+]], 5
+; CHECK: V_MOV_B32_e32 [[LO_VGPR:VGPR[0-9]+]], [[LO]]
+; CHECK: BUFFER_STORE_DWORDX2 [[LO_VGPR]]_
+define void @i64_imm_inline_lo(i64 addrspace(1) *%out) {
+entry:
+ store i64 1311768464867721221, i64 addrspace(1) *%out ; 0x1234567800000005
+ ret void
+}
+
+; Use a 64-bit value with hi bits that can be represented as an inline constant
+; CHECK: @i64_imm_inline_hi
+; CHECK: S_MOV_B32 [[HI:SGPR[0-9]+]], 5
+; CHECK: V_MOV_B32_e32 [[HI_VGPR:VGPR[0-9]+]], [[HI]]
+; CHECK: BUFFER_STORE_DWORDX2 {{VGPR[0-9]+}}_[[HI_VGPR]]
+define void @i64_imm_inline_hi(i64 addrspace(1) *%out) {
+entry:
+ store i64 21780256376, i64 addrspace(1) *%out ; 0x0000000512345678
+ ret void
+}
diff --git a/test/CodeGen/R600/jump_address.ll b/test/CodeGen/R600/jump-address.ll
index cd35bffb1304..ae9c8bba4fd6 100644
--- a/test/CodeGen/R600/jump_address.ll
+++ b/test/CodeGen/R600/jump-address.ll
@@ -1,6 +1,8 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-; CHECK: JUMP @4
+; CHECK: JUMP @3
+; CHECK: EXPORT
+; CHECK-NOT: EXPORT
define void @main() #0 {
main_body:
diff --git a/test/CodeGen/R600/literals.ll b/test/CodeGen/R600/literals.ll
index e69f64e0e142..21e5d4c4de9a 100644
--- a/test/CodeGen/R600/literals.ll
+++ b/test/CodeGen/R600/literals.ll
@@ -7,7 +7,8 @@
; ADD_INT literal.x REG, 5
; CHECK: @i32_literal
-; CHECK: ADD_INT {{[A-Z0-9,. ]*}}literal.x,{{[A-Z0-9,. ]*}} 5
+; CHECK: ADD_INT * {{[A-Z0-9,. ]*}}literal.x
+; CHECK-NEXT: 5
define void @i32_literal(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = add i32 5, %in
@@ -22,7 +23,8 @@ entry:
; ADD literal.x REG, 5.0
; CHECK: @float_literal
-; CHECK: ADD {{[A-Z0-9,. ]*}}literal.x,{{[A-Z0-9,. ]*}} {{[0-9]+}}(5.0
+; CHECK: ADD * {{[A-Z0-9,. ]*}}literal.x
+; CHECK-NEXT: 1084227584(5.0
define void @float_literal(float addrspace(1)* %out, float %in) {
entry:
%0 = fadd float 5.0, %in
@@ -30,3 +32,168 @@ entry:
ret void
}
+; CHECK: @main
+; CHECK: -2147483648
+; CHECK-NEXT-NOT: -2147483648
+
+define void @main() #0 {
+main_body:
+ %0 = call float @llvm.R600.load.input(i32 4)
+ %1 = call float @llvm.R600.load.input(i32 5)
+ %2 = call float @llvm.R600.load.input(i32 6)
+ %3 = call float @llvm.R600.load.input(i32 7)
+ %4 = call float @llvm.R600.load.input(i32 8)
+ %5 = call float @llvm.R600.load.input(i32 9)
+ %6 = call float @llvm.R600.load.input(i32 10)
+ %7 = call float @llvm.R600.load.input(i32 11)
+ %8 = call float @llvm.R600.load.input(i32 12)
+ %9 = call float @llvm.R600.load.input(i32 13)
+ %10 = call float @llvm.R600.load.input(i32 14)
+ %11 = call float @llvm.R600.load.input(i32 15)
+ %12 = load <4 x float> addrspace(8)* null
+ %13 = extractelement <4 x float> %12, i32 0
+ %14 = fsub float -0.000000e+00, %13
+ %15 = fadd float %0, %14
+ %16 = load <4 x float> addrspace(8)* null
+ %17 = extractelement <4 x float> %16, i32 1
+ %18 = fsub float -0.000000e+00, %17
+ %19 = fadd float %1, %18
+ %20 = load <4 x float> addrspace(8)* null
+ %21 = extractelement <4 x float> %20, i32 2
+ %22 = fsub float -0.000000e+00, %21
+ %23 = fadd float %2, %22
+ %24 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %25 = extractelement <4 x float> %24, i32 0
+ %26 = fmul float %25, %0
+ %27 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %28 = extractelement <4 x float> %27, i32 1
+ %29 = fmul float %28, %0
+ %30 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %31 = extractelement <4 x float> %30, i32 2
+ %32 = fmul float %31, %0
+ %33 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %34 = extractelement <4 x float> %33, i32 3
+ %35 = fmul float %34, %0
+ %36 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %37 = extractelement <4 x float> %36, i32 0
+ %38 = fmul float %37, %1
+ %39 = fadd float %38, %26
+ %40 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %41 = extractelement <4 x float> %40, i32 1
+ %42 = fmul float %41, %1
+ %43 = fadd float %42, %29
+ %44 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %45 = extractelement <4 x float> %44, i32 2
+ %46 = fmul float %45, %1
+ %47 = fadd float %46, %32
+ %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %49 = extractelement <4 x float> %48, i32 3
+ %50 = fmul float %49, %1
+ %51 = fadd float %50, %35
+ %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %53 = extractelement <4 x float> %52, i32 0
+ %54 = fmul float %53, %2
+ %55 = fadd float %54, %39
+ %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %57 = extractelement <4 x float> %56, i32 1
+ %58 = fmul float %57, %2
+ %59 = fadd float %58, %43
+ %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %61 = extractelement <4 x float> %60, i32 2
+ %62 = fmul float %61, %2
+ %63 = fadd float %62, %47
+ %64 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %65 = extractelement <4 x float> %64, i32 3
+ %66 = fmul float %65, %2
+ %67 = fadd float %66, %51
+ %68 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %69 = extractelement <4 x float> %68, i32 0
+ %70 = fmul float %69, %3
+ %71 = fadd float %70, %55
+ %72 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %73 = extractelement <4 x float> %72, i32 1
+ %74 = fmul float %73, %3
+ %75 = fadd float %74, %59
+ %76 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %77 = extractelement <4 x float> %76, i32 2
+ %78 = fmul float %77, %3
+ %79 = fadd float %78, %63
+ %80 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %81 = extractelement <4 x float> %80, i32 3
+ %82 = fmul float %81, %3
+ %83 = fadd float %82, %67
+ %84 = insertelement <4 x float> undef, float %15, i32 0
+ %85 = insertelement <4 x float> %84, float %19, i32 1
+ %86 = insertelement <4 x float> %85, float %23, i32 2
+ %87 = insertelement <4 x float> %86, float 0.000000e+00, i32 3
+ %88 = insertelement <4 x float> undef, float %15, i32 0
+ %89 = insertelement <4 x float> %88, float %19, i32 1
+ %90 = insertelement <4 x float> %89, float %23, i32 2
+ %91 = insertelement <4 x float> %90, float 0.000000e+00, i32 3
+ %92 = call float @llvm.AMDGPU.dp4(<4 x float> %87, <4 x float> %91)
+ %93 = call float @fabs(float %92)
+ %94 = call float @llvm.AMDGPU.rsq(float %93)
+ %95 = fmul float %15, %94
+ %96 = fmul float %19, %94
+ %97 = fmul float %23, %94
+ %98 = insertelement <4 x float> undef, float %4, i32 0
+ %99 = insertelement <4 x float> %98, float %5, i32 1
+ %100 = insertelement <4 x float> %99, float %6, i32 2
+ %101 = insertelement <4 x float> %100, float 0.000000e+00, i32 3
+ %102 = insertelement <4 x float> undef, float %4, i32 0
+ %103 = insertelement <4 x float> %102, float %5, i32 1
+ %104 = insertelement <4 x float> %103, float %6, i32 2
+ %105 = insertelement <4 x float> %104, float 0.000000e+00, i32 3
+ %106 = call float @llvm.AMDGPU.dp4(<4 x float> %101, <4 x float> %105)
+ %107 = call float @fabs(float %106)
+ %108 = call float @llvm.AMDGPU.rsq(float %107)
+ %109 = fmul float %4, %108
+ %110 = fmul float %5, %108
+ %111 = fmul float %6, %108
+ %112 = insertelement <4 x float> undef, float %95, i32 0
+ %113 = insertelement <4 x float> %112, float %96, i32 1
+ %114 = insertelement <4 x float> %113, float %97, i32 2
+ %115 = insertelement <4 x float> %114, float 0.000000e+00, i32 3
+ %116 = insertelement <4 x float> undef, float %109, i32 0
+ %117 = insertelement <4 x float> %116, float %110, i32 1
+ %118 = insertelement <4 x float> %117, float %111, i32 2
+ %119 = insertelement <4 x float> %118, float 0.000000e+00, i32 3
+ %120 = call float @llvm.AMDGPU.dp4(<4 x float> %115, <4 x float> %119)
+ %121 = fsub float -0.000000e+00, %120
+ %122 = fcmp uge float 0.000000e+00, %121
+ %123 = select i1 %122, float 0.000000e+00, float %121
+ %124 = insertelement <4 x float> undef, float %8, i32 0
+ %125 = insertelement <4 x float> %124, float %9, i32 1
+ %126 = insertelement <4 x float> %125, float 5.000000e-01, i32 2
+ %127 = insertelement <4 x float> %126, float 1.000000e+00, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %127, i32 60, i32 1)
+ %128 = insertelement <4 x float> undef, float %71, i32 0
+ %129 = insertelement <4 x float> %128, float %75, i32 1
+ %130 = insertelement <4 x float> %129, float %79, i32 2
+ %131 = insertelement <4 x float> %130, float %83, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %131, i32 0, i32 2)
+ %132 = insertelement <4 x float> undef, float %123, i32 0
+ %133 = insertelement <4 x float> %132, float %96, i32 1
+ %134 = insertelement <4 x float> %133, float %97, i32 2
+ %135 = insertelement <4 x float> %134, float 0.000000e+00, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %135, i32 1, i32 2)
+ ret void
+}
+
+; Function Attrs: readnone
+declare float @llvm.R600.load.input(i32) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
+
+; Function Attrs: readonly
+declare float @fabs(float) #2
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.rsq(float) #1
+
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="1" }
+attributes #1 = { readnone }
+attributes #2 = { readonly }
diff --git a/test/CodeGen/R600/llvm.AMDGPU.mul.ll b/test/CodeGen/R600/llvm.AMDGPU.mul.ll
index 693eb27457c2..cc0732b3fffd 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.mul.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.mul.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MUL NON-IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
index fac957f7eeec..ff22a6919677 100644
--- a/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
+++ b/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: TRUNC T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: TRUNC * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll b/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll
index bf0cdaa2fa3a..e45722c3fa67 100644
--- a/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll
+++ b/test/CodeGen/R600/llvm.SI.fs.interp.constant.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
;CHECK: S_MOV_B32
;CHECK-NEXT: V_INTERP_MOV_F32
diff --git a/test/CodeGen/R600/llvm.SI.sample.ll b/test/CodeGen/R600/llvm.SI.sample.ll
index c724395b98c2..de06354a5646 100644
--- a/test/CodeGen/R600/llvm.SI.sample.ll
+++ b/test/CodeGen/R600/llvm.SI.sample.ll
@@ -1,21 +1,21 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE_C
-;CHECK: IMAGE_SAMPLE_C
-;CHECK: IMAGE_SAMPLE_C
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE_C
-;CHECK: IMAGE_SAMPLE_C
-;CHECK: IMAGE_SAMPLE_C
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
-;CHECK: IMAGE_SAMPLE
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 15
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+}}, 3
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+}}, 2
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+}}, 1
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+}}, 4
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+}}, 8
+;CHECK: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+}}, 5
+;CHECK: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+}}, 9
+;CHECK: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+}}, 6
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+}}, 10
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+}}, 12
+;CHECK: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 7
+;CHECK: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 11
+;CHECK: IMAGE_SAMPLE_C {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 13
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+_VGPR[0-9]+_VGPR[0-9]+}}, 14
+;CHECK: IMAGE_SAMPLE {{VGPR[0-9]+}}, 8
define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
%v1 = insertelement <4 x i32> undef, i32 %a1, i32 0
@@ -34,54 +34,88 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
%v14 = insertelement <4 x i32> undef, i32 %a4, i32 1
%v15 = insertelement <4 x i32> undef, i32 %a4, i32 2
%v16 = insertelement <4 x i32> undef, i32 %a4, i32 3
- %res1 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v1,
+ %res1 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v1,
<8 x i32> undef, <4 x i32> undef, i32 1)
- %res2 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v2,
+ %res2 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v2,
<8 x i32> undef, <4 x i32> undef, i32 2)
- %res3 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v3,
+ %res3 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v3,
<8 x i32> undef, <4 x i32> undef, i32 3)
- %res4 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v4,
+ %res4 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v4,
<8 x i32> undef, <4 x i32> undef, i32 4)
- %res5 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v5,
+ %res5 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v5,
<8 x i32> undef, <4 x i32> undef, i32 5)
- %res6 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v6,
+ %res6 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v6,
<8 x i32> undef, <4 x i32> undef, i32 6)
- %res7 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v7,
+ %res7 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v7,
<8 x i32> undef, <4 x i32> undef, i32 7)
- %res8 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v8,
+ %res8 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v8,
<8 x i32> undef, <4 x i32> undef, i32 8)
- %res9 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v9,
+ %res9 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v9,
<8 x i32> undef, <4 x i32> undef, i32 9)
- %res10 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v10,
+ %res10 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v10,
<8 x i32> undef, <4 x i32> undef, i32 10)
- %res11 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v11,
+ %res11 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v11,
<8 x i32> undef, <4 x i32> undef, i32 11)
- %res12 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v12,
+ %res12 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v12,
<8 x i32> undef, <4 x i32> undef, i32 12)
- %res13 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v13,
+ %res13 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v13,
<8 x i32> undef, <4 x i32> undef, i32 13)
- %res14 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v14,
+ %res14 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v14,
<8 x i32> undef, <4 x i32> undef, i32 14)
- %res15 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v15,
+ %res15 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v15,
<8 x i32> undef, <4 x i32> undef, i32 15)
- %res16 = call <4 x float> @llvm.SI.sample.(i32 15, <4 x i32> %v16,
+ %res16 = call <4 x float> @llvm.SI.sample.(<4 x i32> %v16,
<8 x i32> undef, <4 x i32> undef, i32 16)
%e1 = extractelement <4 x float> %res1, i32 0
- %e2 = extractelement <4 x float> %res2, i32 0
- %e3 = extractelement <4 x float> %res3, i32 0
- %e4 = extractelement <4 x float> %res4, i32 0
- %e5 = extractelement <4 x float> %res5, i32 0
- %e6 = extractelement <4 x float> %res6, i32 0
- %e7 = extractelement <4 x float> %res7, i32 0
- %e8 = extractelement <4 x float> %res8, i32 0
- %e9 = extractelement <4 x float> %res9, i32 0
- %e10 = extractelement <4 x float> %res10, i32 0
- %e11 = extractelement <4 x float> %res11, i32 0
- %e12 = extractelement <4 x float> %res12, i32 0
- %e13 = extractelement <4 x float> %res13, i32 0
- %e14 = extractelement <4 x float> %res14, i32 0
- %e15 = extractelement <4 x float> %res15, i32 0
- %e16 = extractelement <4 x float> %res16, i32 0
+ %e2 = extractelement <4 x float> %res2, i32 1
+ %e3 = extractelement <4 x float> %res3, i32 2
+ %e4 = extractelement <4 x float> %res4, i32 3
+ %t0 = extractelement <4 x float> %res5, i32 0
+ %t1 = extractelement <4 x float> %res5, i32 1
+ %e5 = fadd float %t0, %t1
+ %t2 = extractelement <4 x float> %res6, i32 0
+ %t3 = extractelement <4 x float> %res6, i32 2
+ %e6 = fadd float %t2, %t3
+ %t4 = extractelement <4 x float> %res7, i32 0
+ %t5 = extractelement <4 x float> %res7, i32 3
+ %e7 = fadd float %t4, %t5
+ %t6 = extractelement <4 x float> %res8, i32 1
+ %t7 = extractelement <4 x float> %res8, i32 2
+ %e8 = fadd float %t6, %t7
+ %t8 = extractelement <4 x float> %res9, i32 1
+ %t9 = extractelement <4 x float> %res9, i32 3
+ %e9 = fadd float %t8, %t9
+ %t10 = extractelement <4 x float> %res10, i32 2
+ %t11 = extractelement <4 x float> %res10, i32 3
+ %e10 = fadd float %t10, %t11
+ %t12 = extractelement <4 x float> %res11, i32 0
+ %t13 = extractelement <4 x float> %res11, i32 1
+ %t14 = extractelement <4 x float> %res11, i32 2
+ %t15 = fadd float %t12, %t13
+ %e11 = fadd float %t14, %t15
+ %t16 = extractelement <4 x float> %res12, i32 0
+ %t17 = extractelement <4 x float> %res12, i32 1
+ %t18 = extractelement <4 x float> %res12, i32 3
+ %t19 = fadd float %t16, %t17
+ %e12 = fadd float %t18, %t19
+ %t20 = extractelement <4 x float> %res13, i32 0
+ %t21 = extractelement <4 x float> %res13, i32 2
+ %t22 = extractelement <4 x float> %res13, i32 3
+ %t23 = fadd float %t20, %t21
+ %e13 = fadd float %t22, %t23
+ %t24 = extractelement <4 x float> %res14, i32 1
+ %t25 = extractelement <4 x float> %res14, i32 2
+ %t26 = extractelement <4 x float> %res14, i32 3
+ %t27 = fadd float %t24, %t25
+ %e14 = fadd float %t26, %t27
+ %t28 = extractelement <4 x float> %res15, i32 0
+ %t29 = extractelement <4 x float> %res15, i32 1
+ %t30 = extractelement <4 x float> %res15, i32 2
+ %t31 = extractelement <4 x float> %res15, i32 3
+ %t32 = fadd float %t28, %t29
+ %t33 = fadd float %t30, %t31
+ %e15 = fadd float %t32, %t33
+ %e16 = extractelement <4 x float> %res16, i32 3
%s1 = fadd float %e1, %e2
%s2 = fadd float %s1, %e3
%s3 = fadd float %s2, %e4
@@ -101,6 +135,6 @@ define void @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
ret void
}
-declare <4 x float> @llvm.SI.sample.(i32, <4 x i32>, <8 x i32>, <4 x i32>, i32) readnone
+declare <4 x float> @llvm.SI.sample.(<4 x i32>, <8 x i32>, <4 x i32>, i32) readnone
declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/R600/llvm.cos.ll b/test/CodeGen/R600/llvm.cos.ll
index dc120bfb00c2..9b2816707042 100644
--- a/test/CodeGen/R600/llvm.cos.ll
+++ b/test/CodeGen/R600/llvm.cos.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: COS T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: COS * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/llvm.pow.ll b/test/CodeGen/R600/llvm.pow.ll
index b4ce9f429f16..91b774282906 100644
--- a/test/CodeGen/R600/llvm.pow.ll
+++ b/test/CodeGen/R600/llvm.pow.ll
@@ -1,8 +1,8 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: LOG_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK-NEXT: MUL NON-IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-;CHECK-NEXT: EXP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: LOG_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK-NEXT: MUL NON-IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK-NEXT: EXP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/llvm.sin.ll b/test/CodeGen/R600/llvm.sin.ll
index 5cd6998c9370..803dc2d6debc 100644
--- a/test/CodeGen/R600/llvm.sin.ll
+++ b/test/CodeGen/R600/llvm.sin.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: SIN T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: SIN * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/load.constant_addrspace.f32.ll b/test/CodeGen/R600/load.constant_addrspace.f32.ll
deleted file mode 100644
index 93627283bb94..000000000000
--- a/test/CodeGen/R600/load.constant_addrspace.f32.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: VTX_READ_32 T{{[0-9]+\.X, T[0-9]+\.X}}
-
-define void @test(float addrspace(1)* %out, float addrspace(2)* %in) {
- %1 = load float addrspace(2)* %in
- store float %1, float addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/load.i8.ll b/test/CodeGen/R600/load.i8.ll
deleted file mode 100644
index b070dcd52049..000000000000
--- a/test/CodeGen/R600/load.i8.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
-
-define void @test(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
- %1 = load i8 addrspace(1)* %in
- %2 = zext i8 %1 to i32
- store i32 %2, i32 addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
new file mode 100644
index 000000000000..b03245ae87b3
--- /dev/null
+++ b/test/CodeGen/R600/load.ll
@@ -0,0 +1,20 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; Load an i8 value from the global address space.
+; CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+
+define void @load_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %in) {
+ %1 = load i8 addrspace(1)* %in
+ %2 = zext i8 %1 to i32
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load a f32 value from the constant address space.
+; CHECK: VTX_READ_32 T{{[0-9]+\.X, T[0-9]+\.X}}
+
+define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
+ %1 = load float addrspace(2)* %in
+ store float %1, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/loop-address.ll b/test/CodeGen/R600/loop-address.ll
new file mode 100644
index 000000000000..8a5458b89809
--- /dev/null
+++ b/test/CodeGen/R600/loop-address.ll
@@ -0,0 +1,41 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+;CHECK: TEX
+;CHECK: ALU_PUSH
+;CHECK: JUMP @4
+;CHECK: ELSE @16
+;CHECK: TEX
+;CHECK: LOOP_START_DX10 @15
+;CHECK: LOOP_BREAK @14
+;CHECK: POP @16
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-v2048:2048:2048-n32:64"
+target triple = "r600--"
+
+define void @loop_ge(i32 addrspace(1)* nocapture %out, i32 %iterations) #0 {
+entry:
+ %cmp5 = icmp sgt i32 %iterations, 0
+ br i1 %cmp5, label %for.body, label %for.end
+
+for.body: ; preds = %for.body, %entry
+ %i.07.in = phi i32 [ %i.07, %for.body ], [ %iterations, %entry ]
+ %ai.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ %i.07 = add nsw i32 %i.07.in, -1
+ %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %ai.06
+ store i32 %i.07, i32 addrspace(1)* %arrayidx, align 4
+ %add = add nsw i32 %ai.06, 1
+ %exitcond = icmp eq i32 %add, %iterations
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+attributes #0 = { nounwind "fp-contract-model"="standard" "relocation-model"="pic" "ssp-buffers-size"="8" }
+
+!opencl.kernels = !{!0, !1, !2, !3}
+
+!0 = metadata !{void (i32 addrspace(1)*, i32)* @loop_ge}
+!1 = metadata !{null}
+!2 = metadata !{null}
+!3 = metadata !{null}
diff --git a/test/CodeGen/R600/lshl.ll b/test/CodeGen/R600/lshl.ll
index 423adb9da900..fb698da62719 100644
--- a/test/CodeGen/R600/lshl.ll
+++ b/test/CodeGen/R600/lshl.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
;CHECK: V_LSHLREV_B32_e32 VGPR0, 1, VGPR0
diff --git a/test/CodeGen/R600/lshr.ll b/test/CodeGen/R600/lshr.ll
index 551eac1d76bf..e0ed3ac07866 100644
--- a/test/CodeGen/R600/lshr.ll
+++ b/test/CodeGen/R600/lshr.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
;CHECK: V_LSHRREV_B32_e32 VGPR0, 1, VGPR0
diff --git a/test/CodeGen/R600/mul.ll b/test/CodeGen/R600/mul.ll
new file mode 100644
index 000000000000..7278e9039840
--- /dev/null
+++ b/test/CodeGen/R600/mul.ll
@@ -0,0 +1,16 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; mul24 and mad24 are affected
+;CHECK: MULLO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MULLO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MULLO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: MULLO_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %a = load <4 x i32> addrspace(1) * %in
+ %b = load <4 x i32> addrspace(1) * %b_ptr
+ %result = mul <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/mulhu.ll b/test/CodeGen/R600/mulhu.ll
index 28744e00c3cf..bc17a597873e 100644
--- a/test/CodeGen/R600/mulhu.ll
+++ b/test/CodeGen/R600/mulhu.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
;CHECK: V_MOV_B32_e32 VGPR1, -1431655765
;CHECK-NEXT: V_MUL_HI_U32 VGPR0, VGPR0, VGPR1, 0, 0, 0, 0, 0
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
new file mode 100644
index 000000000000..b0dbb021e822
--- /dev/null
+++ b/test/CodeGen/R600/or.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @or_v4i32
+; CHECK: OR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: OR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: OR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: OR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %result = or <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/predicates.ll b/test/CodeGen/R600/predicates.ll
index eb8b052b6f72..0d3eeef26307 100644
--- a/test/CodeGen/R600/predicates.ll
+++ b/test/CodeGen/R600/predicates.ll
@@ -4,8 +4,8 @@
; when it is legal to do so.
; CHECK: @simple_if
-; CHECK: PRED_SET{{[EGN][ET]*}}_INT Pred,
-; CHECK: LSHL T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, 0(0.000000e+00) Pred_sel
+; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
+; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
@@ -22,9 +22,9 @@ ENDIF:
}
; CHECK: @simple_if_else
-; CHECK: PRED_SET{{[EGN][ET]*}}_INT Pred,
-; CHECK: LSH{{[LR] T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, 0(0.000000e+00) Pred_sel
-; CHECK: LSH{{[LR] T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, 0(0.000000e+00) Pred_sel
+; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
+; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
+; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
define void @simple_if_else(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
@@ -46,11 +46,11 @@ ENDIF:
; CHECK: @nested_if
; CHECK: ALU_PUSH_BEFORE
-; CHECK: PRED_SET{{[EGN][ET]*}}_INT Exec
; CHECK: JUMP
-; CHECK: PRED_SET{{[EGN][ET]*}}_INT Pred,
-; CHECK: LSHL T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, 0(0.000000e+00) Pred_sel
; CHECK: POP
+; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Exec
+; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
+; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
define void @nested_if(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
@@ -73,12 +73,12 @@ ENDIF:
; CHECK: @nested_if_else
; CHECK: ALU_PUSH_BEFORE
-; CHECK: PRED_SET{{[EGN][ET]*}}_INT Exec
; CHECK: JUMP
-; CHECK: PRED_SET{{[EGN][ET]*}}_INT Pred,
-; CHECK: LSH{{[LR] T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, 0(0.000000e+00) Pred_sel
-; CHECK: LSH{{[LR] T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, 0(0.000000e+00) Pred_sel
; CHECK: POP
+; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Exec
+; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
+; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
+; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
define void @nested_if_else(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sgt i32 %in, 0
diff --git a/test/CodeGen/R600/pv.ll b/test/CodeGen/R600/pv.ll
new file mode 100644
index 000000000000..37c3d9d7d6d1
--- /dev/null
+++ b/test/CodeGen/R600/pv.ll
@@ -0,0 +1,244 @@
+; RUN: llc < %s -march=r600 | FileCheck %s
+
+;CHECK: DOT4 * T{{[0-9]\.W}} (MASKED)
+;CHECK-NEXT: CNDGE T{{[0-9].[XYZW]}}, PV.x
+
+define void @main() #0 {
+main_body:
+ %0 = call float @llvm.R600.load.input(i32 4)
+ %1 = call float @llvm.R600.load.input(i32 5)
+ %2 = call float @llvm.R600.load.input(i32 6)
+ %3 = call float @llvm.R600.load.input(i32 7)
+ %4 = call float @llvm.R600.load.input(i32 8)
+ %5 = call float @llvm.R600.load.input(i32 9)
+ %6 = call float @llvm.R600.load.input(i32 10)
+ %7 = call float @llvm.R600.load.input(i32 11)
+ %8 = call float @llvm.R600.load.input(i32 12)
+ %9 = call float @llvm.R600.load.input(i32 13)
+ %10 = call float @llvm.R600.load.input(i32 14)
+ %11 = call float @llvm.R600.load.input(i32 15)
+ %12 = call float @llvm.R600.load.input(i32 16)
+ %13 = call float @llvm.R600.load.input(i32 17)
+ %14 = call float @llvm.R600.load.input(i32 18)
+ %15 = call float @llvm.R600.load.input(i32 19)
+ %16 = call float @llvm.R600.load.input(i32 20)
+ %17 = call float @llvm.R600.load.input(i32 21)
+ %18 = call float @llvm.R600.load.input(i32 22)
+ %19 = call float @llvm.R600.load.input(i32 23)
+ %20 = call float @llvm.R600.load.input(i32 24)
+ %21 = call float @llvm.R600.load.input(i32 25)
+ %22 = call float @llvm.R600.load.input(i32 26)
+ %23 = call float @llvm.R600.load.input(i32 27)
+ %24 = call float @llvm.R600.load.input(i32 28)
+ %25 = call float @llvm.R600.load.input(i32 29)
+ %26 = call float @llvm.R600.load.input(i32 30)
+ %27 = call float @llvm.R600.load.input(i32 31)
+ %28 = load <4 x float> addrspace(8)* null
+ %29 = extractelement <4 x float> %28, i32 0
+ %30 = fmul float %0, %29
+ %31 = load <4 x float> addrspace(8)* null
+ %32 = extractelement <4 x float> %31, i32 1
+ %33 = fmul float %0, %32
+ %34 = load <4 x float> addrspace(8)* null
+ %35 = extractelement <4 x float> %34, i32 2
+ %36 = fmul float %0, %35
+ %37 = load <4 x float> addrspace(8)* null
+ %38 = extractelement <4 x float> %37, i32 3
+ %39 = fmul float %0, %38
+ %40 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %41 = extractelement <4 x float> %40, i32 0
+ %42 = fmul float %1, %41
+ %43 = fadd float %42, %30
+ %44 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %45 = extractelement <4 x float> %44, i32 1
+ %46 = fmul float %1, %45
+ %47 = fadd float %46, %33
+ %48 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %49 = extractelement <4 x float> %48, i32 2
+ %50 = fmul float %1, %49
+ %51 = fadd float %50, %36
+ %52 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 1)
+ %53 = extractelement <4 x float> %52, i32 3
+ %54 = fmul float %1, %53
+ %55 = fadd float %54, %39
+ %56 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %57 = extractelement <4 x float> %56, i32 0
+ %58 = fmul float %2, %57
+ %59 = fadd float %58, %43
+ %60 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %61 = extractelement <4 x float> %60, i32 1
+ %62 = fmul float %2, %61
+ %63 = fadd float %62, %47
+ %64 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %65 = extractelement <4 x float> %64, i32 2
+ %66 = fmul float %2, %65
+ %67 = fadd float %66, %51
+ %68 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 2)
+ %69 = extractelement <4 x float> %68, i32 3
+ %70 = fmul float %2, %69
+ %71 = fadd float %70, %55
+ %72 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %73 = extractelement <4 x float> %72, i32 0
+ %74 = fmul float %3, %73
+ %75 = fadd float %74, %59
+ %76 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %77 = extractelement <4 x float> %76, i32 1
+ %78 = fmul float %3, %77
+ %79 = fadd float %78, %63
+ %80 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %81 = extractelement <4 x float> %80, i32 2
+ %82 = fmul float %3, %81
+ %83 = fadd float %82, %67
+ %84 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 3)
+ %85 = extractelement <4 x float> %84, i32 3
+ %86 = fmul float %3, %85
+ %87 = fadd float %86, %71
+ %88 = insertelement <4 x float> undef, float %4, i32 0
+ %89 = insertelement <4 x float> %88, float %5, i32 1
+ %90 = insertelement <4 x float> %89, float %6, i32 2
+ %91 = insertelement <4 x float> %90, float 0.000000e+00, i32 3
+ %92 = insertelement <4 x float> undef, float %4, i32 0
+ %93 = insertelement <4 x float> %92, float %5, i32 1
+ %94 = insertelement <4 x float> %93, float %6, i32 2
+ %95 = insertelement <4 x float> %94, float 0.000000e+00, i32 3
+ %96 = call float @llvm.AMDGPU.dp4(<4 x float> %91, <4 x float> %95)
+ %97 = call float @fabs(float %96)
+ %98 = call float @llvm.AMDGPU.rsq(float %97)
+ %99 = fmul float %4, %98
+ %100 = fmul float %5, %98
+ %101 = fmul float %6, %98
+ %102 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %103 = extractelement <4 x float> %102, i32 0
+ %104 = fmul float %103, %8
+ %105 = fadd float %104, %20
+ %106 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %107 = extractelement <4 x float> %106, i32 1
+ %108 = fmul float %107, %9
+ %109 = fadd float %108, %21
+ %110 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 4)
+ %111 = extractelement <4 x float> %110, i32 2
+ %112 = fmul float %111, %10
+ %113 = fadd float %112, %22
+ %114 = call float @llvm.AMDIL.clamp.(float %105, float 0.000000e+00, float 1.000000e+00)
+ %115 = call float @llvm.AMDIL.clamp.(float %109, float 0.000000e+00, float 1.000000e+00)
+ %116 = call float @llvm.AMDIL.clamp.(float %113, float 0.000000e+00, float 1.000000e+00)
+ %117 = call float @llvm.AMDIL.clamp.(float %15, float 0.000000e+00, float 1.000000e+00)
+ %118 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+ %119 = extractelement <4 x float> %118, i32 0
+ %120 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+ %121 = extractelement <4 x float> %120, i32 1
+ %122 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 5)
+ %123 = extractelement <4 x float> %122, i32 2
+ %124 = insertelement <4 x float> undef, float %99, i32 0
+ %125 = insertelement <4 x float> %124, float %100, i32 1
+ %126 = insertelement <4 x float> %125, float %101, i32 2
+ %127 = insertelement <4 x float> %126, float 0.000000e+00, i32 3
+ %128 = insertelement <4 x float> undef, float %119, i32 0
+ %129 = insertelement <4 x float> %128, float %121, i32 1
+ %130 = insertelement <4 x float> %129, float %123, i32 2
+ %131 = insertelement <4 x float> %130, float 0.000000e+00, i32 3
+ %132 = call float @llvm.AMDGPU.dp4(<4 x float> %127, <4 x float> %131)
+ %133 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+ %134 = extractelement <4 x float> %133, i32 0
+ %135 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+ %136 = extractelement <4 x float> %135, i32 1
+ %137 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 7)
+ %138 = extractelement <4 x float> %137, i32 2
+ %139 = insertelement <4 x float> undef, float %99, i32 0
+ %140 = insertelement <4 x float> %139, float %100, i32 1
+ %141 = insertelement <4 x float> %140, float %101, i32 2
+ %142 = insertelement <4 x float> %141, float 0.000000e+00, i32 3
+ %143 = insertelement <4 x float> undef, float %134, i32 0
+ %144 = insertelement <4 x float> %143, float %136, i32 1
+ %145 = insertelement <4 x float> %144, float %138, i32 2
+ %146 = insertelement <4 x float> %145, float 0.000000e+00, i32 3
+ %147 = call float @llvm.AMDGPU.dp4(<4 x float> %142, <4 x float> %146)
+ %148 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+ %149 = extractelement <4 x float> %148, i32 0
+ %150 = fmul float %149, %8
+ %151 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+ %152 = extractelement <4 x float> %151, i32 1
+ %153 = fmul float %152, %9
+ %154 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 8)
+ %155 = extractelement <4 x float> %154, i32 2
+ %156 = fmul float %155, %10
+ %157 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+ %158 = extractelement <4 x float> %157, i32 0
+ %159 = fmul float %158, %12
+ %160 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+ %161 = extractelement <4 x float> %160, i32 1
+ %162 = fmul float %161, %13
+ %163 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 9)
+ %164 = extractelement <4 x float> %163, i32 2
+ %165 = fmul float %164, %14
+ %166 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+ %167 = extractelement <4 x float> %166, i32 0
+ %168 = fmul float %167, %16
+ %169 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+ %170 = extractelement <4 x float> %169, i32 1
+ %171 = fmul float %170, %17
+ %172 = load <4 x float> addrspace(8)* getelementptr ([1024 x <4 x float>] addrspace(8)* null, i64 0, i32 10)
+ %173 = extractelement <4 x float> %172, i32 2
+ %174 = fmul float %173, %18
+ %175 = fcmp uge float %132, 0.000000e+00
+ %176 = select i1 %175, float %132, float 0.000000e+00
+ %177 = fcmp uge float %147, 0.000000e+00
+ %178 = select i1 %177, float %147, float 0.000000e+00
+ %179 = call float @llvm.pow.f32(float %178, float %24)
+ %180 = fcmp ult float %132, 0.000000e+00
+ %181 = select i1 %180, float 0.000000e+00, float %179
+ %182 = fadd float %150, %105
+ %183 = fadd float %153, %109
+ %184 = fadd float %156, %113
+ %185 = fmul float %176, %159
+ %186 = fadd float %185, %182
+ %187 = fmul float %176, %162
+ %188 = fadd float %187, %183
+ %189 = fmul float %176, %165
+ %190 = fadd float %189, %184
+ %191 = fmul float %181, %168
+ %192 = fadd float %191, %186
+ %193 = fmul float %181, %171
+ %194 = fadd float %193, %188
+ %195 = fmul float %181, %174
+ %196 = fadd float %195, %190
+ %197 = call float @llvm.AMDIL.clamp.(float %192, float 0.000000e+00, float 1.000000e+00)
+ %198 = call float @llvm.AMDIL.clamp.(float %194, float 0.000000e+00, float 1.000000e+00)
+ %199 = call float @llvm.AMDIL.clamp.(float %196, float 0.000000e+00, float 1.000000e+00)
+ %200 = insertelement <4 x float> undef, float %75, i32 0
+ %201 = insertelement <4 x float> %200, float %79, i32 1
+ %202 = insertelement <4 x float> %201, float %83, i32 2
+ %203 = insertelement <4 x float> %202, float %87, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %203, i32 60, i32 1)
+ %204 = insertelement <4 x float> undef, float %197, i32 0
+ %205 = insertelement <4 x float> %204, float %198, i32 1
+ %206 = insertelement <4 x float> %205, float %199, i32 2
+ %207 = insertelement <4 x float> %206, float %117, i32 3
+ call void @llvm.R600.store.swizzle(<4 x float> %207, i32 0, i32 2)
+ ret void
+}
+
+; Function Attrs: readnone
+declare float @llvm.R600.load.input(i32) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.dp4(<4 x float>, <4 x float>) #1
+
+; Function Attrs: readonly
+declare float @fabs(float) #2
+
+; Function Attrs: readnone
+declare float @llvm.AMDGPU.rsq(float) #1
+
+; Function Attrs: readnone
+declare float @llvm.AMDIL.clamp.(float, float, float) #1
+
+; Function Attrs: nounwind readonly
+declare float @llvm.pow.f32(float, float) #3
+
+declare void @llvm.R600.store.swizzle(<4 x float>, i32, i32)
+
+attributes #0 = { "ShaderType"="1" }
+attributes #1 = { readnone }
+attributes #2 = { readonly }
+attributes #3 = { nounwind readonly }
diff --git a/test/CodeGen/R600/r600-encoding.ll b/test/CodeGen/R600/r600-encoding.ll
new file mode 100644
index 000000000000..c8040a1b4cd5
--- /dev/null
+++ b/test/CodeGen/R600/r600-encoding.ll
@@ -0,0 +1,24 @@
+; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -show-mc-encoding -mcpu=rs880 | FileCheck --check-prefix=R600-CHECK %s
+
+; The earliest R600 GPUs have a slightly different encoding than the rest of
+; the VLIW4/5 GPUs.
+
+; EG-CHECK: @test
+; EG-CHECK: MUL_IEEE {{[ *TXYZW.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x01,0x[0-9a-f]+,0x[0-9a-f]+}}]
+
+; R600-CHECK: @test
+; R600-CHECK: MUL_IEEE {{[ *TXYZW.,0-9]+}} ; encoding: [{{0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x[0-9a-f]+,0x10,0x02,0x[0-9a-f]+,0x[0-9a-f]+}}]
+
+define void @test() {
+entry:
+ %0 = call float @llvm.R600.load.input(i32 0)
+ %1 = call float @llvm.R600.load.input(i32 1)
+ %2 = fmul float %0, %1
+ call void @llvm.AMDGPU.store.output(float %2, i32 0)
+ ret void
+}
+
+declare float @llvm.R600.load.input(i32) readnone
+
+declare void @llvm.AMDGPU.store.output(float, i32)
diff --git a/test/CodeGen/R600/reciprocal.ll b/test/CodeGen/R600/reciprocal.ll
index 6838c1ae3662..27839296703f 100644
--- a/test/CodeGen/R600/reciprocal.ll
+++ b/test/CodeGen/R600/reciprocal.ll
@@ -1,6 +1,6 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-;CHECK: RECIP_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @test() {
%r0 = call float @llvm.R600.load.input(i32 0)
diff --git a/test/CodeGen/R600/sdiv.ll b/test/CodeGen/R600/sdiv.ll
index 3556facfbab3..3dd10c8a61c1 100644
--- a/test/CodeGen/R600/sdiv.ll
+++ b/test/CodeGen/R600/sdiv.ll
@@ -9,7 +9,7 @@
; This was fixed by adding an additional pattern in R600Instructions.td to
; match this pattern with a CNDGE_INT.
-; CHECK: RETURN
+; CHECK: CF_END
define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/selectcc_cnde.ll b/test/CodeGen/R600/selectcc-cnd.ll
index f0a0f512ba15..d7287b487896 100644
--- a/test/CodeGen/R600/selectcc_cnde.ll
+++ b/test/CodeGen/R600/selectcc-cnd.ll
@@ -1,7 +1,8 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
;CHECK-NOT: SETE
-;CHECK: CNDE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], 1.0, literal.x, [-0-9]+\(2.0}}
+;CHECK: CNDE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1.0, literal.x,
+;CHECK-NEXT: {{[-0-9]+\(2.0}}
define void @test(float addrspace(1)* %out, float addrspace(1)* %in) {
%1 = load float addrspace(1)* %in
%2 = fcmp oeq float %1, 0.0
diff --git a/test/CodeGen/R600/selectcc_cnde_int.ll b/test/CodeGen/R600/selectcc-cnde-int.ll
index b38078e26db6..768dc7dbf418 100644
--- a/test/CodeGen/R600/selectcc_cnde_int.ll
+++ b/test/CodeGen/R600/selectcc-cnde-int.ll
@@ -1,7 +1,8 @@
;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
;CHECK-NOT: SETE_INT
-;CHECK: CNDE_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], 1, literal.x, 2}}
+;CHECK: CNDE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, literal.x,
+;CHECK-NEXT: 2
define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%1 = load i32 addrspace(1)* %in
%2 = icmp eq i32 %1, 0
diff --git a/test/CodeGen/R600/selectcc-icmp-select-float.ll b/test/CodeGen/R600/selectcc-icmp-select-float.ll
index 359ca1e6f8ce..6743800490b3 100644
--- a/test/CodeGen/R600/selectcc-icmp-select-float.ll
+++ b/test/CodeGen/R600/selectcc-icmp-select-float.ll
@@ -2,7 +2,8 @@
; Note additional optimizations may cause this SGT to be replaced with a
; CND* instruction.
-; CHECK: SETGT_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], literal.x, -1}}
+; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: -1
; Test a selectcc with i32 LHS/RHS and float True/False
define void @test(float addrspace(1)* %out, i32 addrspace(1)* %in) {
diff --git a/test/CodeGen/R600/set-dx10.ll b/test/CodeGen/R600/set-dx10.ll
index 54febcf0e68e..eb6e9d2f2ba5 100644
--- a/test/CodeGen/R600/set-dx10.ll
+++ b/test/CodeGen/R600/set-dx10.ll
@@ -5,7 +5,8 @@
; SET*DX10 instructions.
; CHECK: @fcmp_une_select_fptosi
-; CHECK: SETNE_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETNE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_une_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp une float %in, 5.0
@@ -17,7 +18,8 @@ entry:
}
; CHECK: @fcmp_une_select_i32
-; CHECK: SETNE_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETNE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_une_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp une float %in, 5.0
@@ -27,7 +29,8 @@ entry:
}
; CHECK: @fcmp_ueq_select_fptosi
-; CHECK: SETE_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ueq_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ueq float %in, 5.0
@@ -39,7 +42,8 @@ entry:
}
; CHECK: @fcmp_ueq_select_i32
-; CHECK: SETE_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ueq_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ueq float %in, 5.0
@@ -49,7 +53,8 @@ entry:
}
; CHECK: @fcmp_ugt_select_fptosi
-; CHECK: SETGT_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ugt_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ugt float %in, 5.0
@@ -61,7 +66,8 @@ entry:
}
; CHECK: @fcmp_ugt_select_i32
-; CHECK: SETGT_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ugt_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ugt float %in, 5.0
@@ -71,7 +77,8 @@ entry:
}
; CHECK: @fcmp_uge_select_fptosi
-; CHECK: SETGE_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_uge_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp uge float %in, 5.0
@@ -83,7 +90,8 @@ entry:
}
; CHECK: @fcmp_uge_select_i32
-; CHECK: SETGE_DX10 T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x, 1084227584(5.000000e+00)
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, T{{[0-9]+\.[XYZW]}}, literal.x,
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_uge_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp uge float %in, 5.0
@@ -93,7 +101,8 @@ entry:
}
; CHECK: @fcmp_ule_select_fptosi
-; CHECK: SETGE_DX10 T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ule_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ule float %in, 5.0
@@ -105,7 +114,8 @@ entry:
}
; CHECK: @fcmp_ule_select_i32
-; CHECK: SETGE_DX10 T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGE_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ule_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ule float %in, 5.0
@@ -115,7 +125,8 @@ entry:
}
; CHECK: @fcmp_ult_select_fptosi
-; CHECK: SETGT_DX10 T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ult_select_fptosi(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ult float %in, 5.0
@@ -127,7 +138,8 @@ entry:
}
; CHECK: @fcmp_ult_select_i32
-; CHECK: SETGT_DX10 T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGT_DX10 * T{{[0-9]+\.[XYZW]}}, literal.x, T{{[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @fcmp_ult_select_i32(i32 addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ult float %in, 5.0
diff --git a/test/CodeGen/R600/setcc.v4i32.ll b/test/CodeGen/R600/setcc.ll
index 0752f2e63dbf..0752f2e63dbf 100644
--- a/test/CodeGen/R600/setcc.v4i32.ll
+++ b/test/CodeGen/R600/setcc.ll
diff --git a/test/CodeGen/R600/seto.ll b/test/CodeGen/R600/seto.ll
index 5ab4b87d570c..4622203ffdbc 100644
--- a/test/CodeGen/R600/seto.ll
+++ b/test/CodeGen/R600/seto.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
;CHECK: V_CMP_O_F32_e64 SGPR0_SGPR1, VGPR0, VGPR0, 0, 0, 0, 0
diff --git a/test/CodeGen/R600/setuo.ll b/test/CodeGen/R600/setuo.ll
index 320835576d41..0bf5801b1c33 100644
--- a/test/CodeGen/R600/setuo.ll
+++ b/test/CodeGen/R600/setuo.ll
@@ -1,4 +1,4 @@
-;RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
;CHECK: V_CMP_U_F32_e64 SGPR0_SGPR1, VGPR0, VGPR0, 0, 0, 0, 0
diff --git a/test/CodeGen/R600/shl.ll b/test/CodeGen/R600/shl.ll
new file mode 100644
index 000000000000..43cc1e26fc01
--- /dev/null
+++ b/test/CodeGen/R600/shl.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @shl_v4i32
+; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %result = shl <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/sint_to_fp.ll b/test/CodeGen/R600/sint_to_fp.ll
new file mode 100644
index 000000000000..91a8eb7f57b4
--- /dev/null
+++ b/test/CodeGen/R600/sint_to_fp.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @sint_to_fp_v4i32
+; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: INT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %value = load <4 x i32> addrspace(1) * %in
+ %result = sitofp <4 x i32> %value to <4 x float>
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/sra.ll b/test/CodeGen/R600/sra.ll
new file mode 100644
index 000000000000..972542d346f4
--- /dev/null
+++ b/test/CodeGen/R600/sra.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @ashr_v4i32
+; CHECK: ASHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ASHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ASHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: ASHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %result = ashr <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/srl.ll b/test/CodeGen/R600/srl.ll
new file mode 100644
index 000000000000..5f63600b75f0
--- /dev/null
+++ b/test/CodeGen/R600/srl.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @lshr_v4i32
+; CHECK: LSHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: LSHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: LSHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: LSHR * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %result = lshr <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/store.ll b/test/CodeGen/R600/store.ll
new file mode 100644
index 000000000000..4d673f3ea326
--- /dev/null
+++ b/test/CodeGen/R600/store.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
+; RUN: llc < %s -march=r600 -mcpu=verde | FileCheck --check-prefix=SI-CHECK %s
+
+; floating-point store
+; EG-CHECK: @store_f32
+; EG-CHECK: RAT_WRITE_CACHELESS_32_eg T{{[0-9]+\.X, T[0-9]+\.X}}, 1
+; SI-CHECK: @store_f32
+; SI-CHECK: BUFFER_STORE_DWORD
+
+define void @store_f32(float addrspace(1)* %out, float %in) {
+ store float %in, float addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/store.r600.ll b/test/CodeGen/R600/store.r600.ll
new file mode 100644
index 000000000000..5ffb7f1809f8
--- /dev/null
+++ b/test/CodeGen/R600/store.r600.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG-CHECK %s
+
+; XXX: Merge this test into store.ll once it is supported on SI
+
+; v4i32 store
+; EG-CHECK: @store_v4i32
+; EG-CHECK: RAT_WRITE_CACHELESS_128 T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
+
+define void @store_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %1 = load <4 x i32> addrspace(1) * %in
+ store <4 x i32> %1, <4 x i32> addrspace(1)* %out
+ ret void
+}
+
+; v4f32 store
+; EG-CHECK: @store_v4f32
+; EG-CHECK: RAT_WRITE_CACHELESS_128 T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
+define void @store_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
+ %1 = load <4 x float> addrspace(1) * %in
+ store <4 x float> %1, <4 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/store.v4f32.ll b/test/CodeGen/R600/store.v4f32.ll
deleted file mode 100644
index 8b0d24445971..000000000000
--- a/test/CodeGen/R600/store.v4f32.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: RAT_WRITE_CACHELESS_128 T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
-
-define void @test(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %1 = load <4 x float> addrspace(1) * %in
- store <4 x float> %1, <4 x float> addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/store.v4i32.ll b/test/CodeGen/R600/store.v4i32.ll
deleted file mode 100644
index a659815ddeba..000000000000
--- a/test/CodeGen/R600/store.v4i32.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-;CHECK: RAT_WRITE_CACHELESS_128 T{{[0-9]+\.XYZW, T[0-9]+\.X}}, 1
-
-define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %1 = load <4 x i32> addrspace(1) * %in
- store <4 x i32> %1, <4 x i32> addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/sub.ll b/test/CodeGen/R600/sub.ll
new file mode 100644
index 000000000000..12bfba39753e
--- /dev/null
+++ b/test/CodeGen/R600/sub.ll
@@ -0,0 +1,15 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+;CHECK: SUB_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: SUB_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: SUB_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+;CHECK: SUB_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %a = load <4 x i32> addrspace(1) * %in
+ %b = load <4 x i32> addrspace(1) * %b_ptr
+ %result = sub <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/udiv.v4i32.ll b/test/CodeGen/R600/udiv.ll
index 47657a6be75e..b81e3667ce89 100644
--- a/test/CodeGen/R600/udiv.v4i32.ll
+++ b/test/CodeGen/R600/udiv.ll
@@ -3,7 +3,7 @@
;The code generated by udiv is long and complex and may frequently change.
;The goal of this test is to make sure the ISel doesn't fail when it gets
;a v4i32 udiv
-;CHECK: RETURN
+;CHECK: CF_END
define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/uint_to_fp.ll b/test/CodeGen/R600/uint_to_fp.ll
new file mode 100644
index 000000000000..9054fc4c2cc8
--- /dev/null
+++ b/test/CodeGen/R600/uint_to_fp.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @uint_to_fp_v4i32
+; CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: UINT_TO_FLT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+ %value = load <4 x i32> addrspace(1) * %in
+ %result = uitofp <4 x i32> %value to <4 x float>
+ store <4 x float> %result, <4 x float> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/unsupported-cc.ll b/test/CodeGen/R600/unsupported-cc.ll
index b48c59151831..b311f4cfa7f9 100644
--- a/test/CodeGen/R600/unsupported-cc.ll
+++ b/test/CodeGen/R600/unsupported-cc.ll
@@ -3,7 +3,8 @@
; These tests are for condition codes that are not supported by the hardware
; CHECK: @slt
-; CHECK: SETGT_INT T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 5(7.006492e-45)
+; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 5(7.006492e-45)
define void @slt(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp slt i32 %in, 5
@@ -13,7 +14,8 @@ entry:
}
; CHECK: @ult_i32
-; CHECK: SETGT_UINT T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 5(7.006492e-45)
+; CHECK: SETGT_UINT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 5(7.006492e-45)
define void @ult_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp ult i32 %in, 5
@@ -23,7 +25,8 @@ entry:
}
; CHECK: @ult_float
-; CHECK: SETGT T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @ult_float(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ult float %in, 5.0
@@ -33,7 +36,8 @@ entry:
}
; CHECK: @olt
-; CHECK: SETGT T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+;CHECK-NEXT: 1084227584(5.000000e+00)
define void @olt(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp olt float %in, 5.0
@@ -43,7 +47,8 @@ entry:
}
; CHECK: @sle
-; CHECK: SETGT_INT T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 6(8.407791e-45)
+; CHECK: SETGT_INT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 6(8.407791e-45)
define void @sle(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp sle i32 %in, 5
@@ -53,7 +58,8 @@ entry:
}
; CHECK: @ule_i32
-; CHECK: SETGT_UINT T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 6(8.407791e-45)
+; CHECK: SETGT_UINT * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 6(8.407791e-45)
define void @ule_i32(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = icmp ule i32 %in, 5
@@ -63,7 +69,8 @@ entry:
}
; CHECK: @ule_float
-; CHECK: SETGE T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGE * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT: 1084227584(5.000000e+00)
define void @ule_float(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ule float %in, 5.0
@@ -73,7 +80,8 @@ entry:
}
; CHECK: @ole
-; CHECK: SETGE T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}}, 1084227584(5.000000e+00)
+; CHECK: SETGE * T{{[0-9]+\.[XYZW]}}, literal.x, {{T[0-9]+\.[XYZW]}},
+; CHECK-NEXT:1084227584(5.000000e+00)
define void @ole(float addrspace(1)* %out, float %in) {
entry:
%0 = fcmp ole float %in, 5.0
diff --git a/test/CodeGen/R600/urecip.ll b/test/CodeGen/R600/urecip.ll
new file mode 100644
index 000000000000..dad02dd76f0a
--- /dev/null
+++ b/test/CodeGen/R600/urecip.ll
@@ -0,0 +1,12 @@
+;RUN: llc < %s -march=r600 -mcpu=verde | FileCheck %s
+
+;CHECK: V_RCP_IFLAG_F32_e32
+
+define void @test(i32 %p, i32 %q) {
+ %i = udiv i32 %p, %q
+ %r = bitcast i32 %i to float
+ call void @llvm.SI.export(i32 15, i32 0, i32 1, i32 12, i32 0, float %r, float %r, float %r, float %r)
+ ret void
+}
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
diff --git a/test/CodeGen/R600/urem.v4i32.ll b/test/CodeGen/R600/urem.ll
index 2e7388caa6ce..a2cc0bd2e84e 100644
--- a/test/CodeGen/R600/urem.v4i32.ll
+++ b/test/CodeGen/R600/urem.ll
@@ -3,7 +3,7 @@
;The code generated by urem is long and complex and may frequently change.
;The goal of this test is to make sure the ISel doesn't fail when it gets
;a v4i32 urem
-;CHECK: RETURN
+;CHECK: CF_END
define void @test(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
diff --git a/test/CodeGen/R600/vec4-expand.ll b/test/CodeGen/R600/vec4-expand.ll
deleted file mode 100644
index 8f62bc692908..000000000000
--- a/test/CodeGen/R600/vec4-expand.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
-
-; CHECK: @fp_to_sint
-; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: FLT_TO_INT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-define void @fp_to_sint(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %value = load <4 x float> addrspace(1) * %in
- %result = fptosi <4 x float> %value to <4 x i32>
- store <4 x i32> %result, <4 x i32> addrspace(1)* %out
- ret void
-}
-
-; CHECK: @fp_to_uint
-; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: FLT_TO_UINT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-define void @fp_to_uint(<4 x i32> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %value = load <4 x float> addrspace(1) * %in
- %result = fptoui <4 x float> %value to <4 x i32>
- store <4 x i32> %result, <4 x i32> addrspace(1)* %out
- ret void
-}
-
-; CHECK: @sint_to_fp
-; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: INT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-define void @sint_to_fp(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %value = load <4 x i32> addrspace(1) * %in
- %result = sitofp <4 x i32> %value to <4 x float>
- store <4 x float> %result, <4 x float> addrspace(1)* %out
- ret void
-}
-
-; CHECK: @uint_to_fp
-; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-; CHECK: UINT_TO_FLT T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
-
-define void @uint_to_fp(<4 x float> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %value = load <4 x i32> addrspace(1) * %in
- %result = uitofp <4 x i32> %value to <4 x float>
- store <4 x float> %result, <4 x float> addrspace(1)* %out
- ret void
-}
diff --git a/test/CodeGen/R600/vselect.ll b/test/CodeGen/R600/vselect.ll
new file mode 100644
index 000000000000..6e459df847e7
--- /dev/null
+++ b/test/CodeGen/R600/vselect.ll
@@ -0,0 +1,17 @@
+;RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @test_select_v4i32
+; CHECK: CNDE_INT T{{[0-9]+\.[XYZW], PV\.[xyzw], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: CNDE_INT * T{{[0-9]+\.[XYZW], PV\.[xyzw], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: CNDE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: CNDE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1) {
+entry:
+ %0 = load <4 x i32> addrspace(1)* %in0
+ %1 = load <4 x i32> addrspace(1)* %in1
+ %cmp = icmp ne <4 x i32> %0, %1
+ %result = select <4 x i1> %cmp, <4 x i32> %0, <4 x i32> %1
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/R600/xor.ll b/test/CodeGen/R600/xor.ll
new file mode 100644
index 000000000000..cf612e0a1fbe
--- /dev/null
+++ b/test/CodeGen/R600/xor.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
+
+; CHECK: @xor_v4i32
+; CHECK: XOR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: XOR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: XOR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+; CHECK: XOR_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
+
+define void @xor_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <4 x i32> %b) {
+ %result = xor <4 x i32> %a, %b
+ store <4 x i32> %result, <4 x i32> addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/SPARC/64abi.ll b/test/CodeGen/SPARC/64abi.ll
new file mode 100644
index 000000000000..ec9713572141
--- /dev/null
+++ b/test/CodeGen/SPARC/64abi.ll
@@ -0,0 +1,378 @@
+; RUN: llc < %s -march=sparcv9 -disable-sparc-delay-filler | FileCheck %s
+
+; CHECK: intarg
+; The save/restore frame is not strictly necessary here, but we would need to
+; refer to %o registers instead.
+; CHECK: save %sp, -128, %sp
+; CHECK: stb %i0, [%i4]
+; CHECK: stb %i1, [%i4]
+; CHECK: sth %i2, [%i4]
+; CHECK: st %i3, [%i4]
+; CHECK: stx %i4, [%i4]
+; CHECK: st %i5, [%i4]
+; CHECK: ld [%fp+2227], [[R:%[gilo][0-7]]]
+; CHECK: st [[R]], [%i4]
+; CHECK: ldx [%fp+2231], [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%i4]
+; CHECK: restore
+define void @intarg(i8 %a0, ; %i0
+ i8 %a1, ; %i1
+ i16 %a2, ; %i2
+ i32 %a3, ; %i3
+ i8* %a4, ; %i4
+ i32 %a5, ; %i5
+ i32 signext %a6, ; [%fp+BIAS+176]
+ i8* %a7) { ; [%fp+BIAS+184]
+ store i8 %a0, i8* %a4
+ store i8 %a1, i8* %a4
+ %p16 = bitcast i8* %a4 to i16*
+ store i16 %a2, i16* %p16
+ %p32 = bitcast i8* %a4 to i32*
+ store i32 %a3, i32* %p32
+ %pp = bitcast i8* %a4 to i8**
+ store i8* %a4, i8** %pp
+ store i32 %a5, i32* %p32
+ store i32 %a6, i32* %p32
+ store i8* %a7, i8** %pp
+ ret void
+}
+
+; CHECK: call_intarg
+; 16 saved + 8 args.
+; CHECK: save %sp, -192, %sp
+; Sign-extend and store the full 64 bits.
+; CHECK: sra %i0, 0, [[R:%[gilo][0-7]]]
+; CHECK: stx [[R]], [%sp+2223]
+; Use %o0-%o5 for outgoing arguments
+; CHECK: or %g0, 5, %o5
+; CHECK: call intarg
+; CHECK-NOT: add %sp
+; CHECK: restore
+define void @call_intarg(i32 %i0, i8* %i1) {
+ call void @intarg(i8 0, i8 1, i16 2, i32 3, i8* undef, i32 5, i32 %i0, i8* %i1)
+ ret void
+}
+
+; CHECK: floatarg
+; CHECK: save %sp, -128, %sp
+; CHECK: fstod %f1,
+; CHECK: faddd %f2,
+; CHECK: faddd %f4,
+; CHECK: faddd %f6,
+; CHECK: ld [%fp+2307], [[F:%f[0-9]+]]
+; CHECK: fadds %f31, [[F]]
+define double @floatarg(float %a0, ; %f1
+ double %a1, ; %d2
+ double %a2, ; %d4
+ double %a3, ; %d6
+ float %a4, ; %f9
+ float %a5, ; %f11
+ float %a6, ; %f13
+ float %a7, ; %f15
+ float %a8, ; %f17
+ float %a9, ; %f19
+ float %a10, ; %f21
+ float %a11, ; %f23
+ float %a12, ; %f25
+ float %a13, ; %f27
+ float %a14, ; %f29
+ float %a15, ; %f31
+ float %a16, ; [%fp+BIAS+256] (using 8 bytes)
+ double %a17) { ; [%fp+BIAS+264] (using 8 bytes)
+ %d0 = fpext float %a0 to double
+ %s1 = fadd double %a1, %d0
+ %s2 = fadd double %a2, %s1
+ %s3 = fadd double %a3, %s2
+ %s16 = fadd float %a15, %a16
+ %d16 = fpext float %s16 to double
+ %s17 = fadd double %d16, %s3
+ ret double %s17
+}
+
+; CHECK: call_floatarg
+; CHECK: save %sp, -272, %sp
+; Store 4 bytes, right-aligned in slot.
+; CHECK: st %f1, [%sp+2307]
+; Store 8 bytes in full slot.
+; CHECK: std %f2, [%sp+2311]
+; CHECK: fmovd %f2, %f4
+; CHECK: call floatarg
+; CHECK-NOT: add %sp
+; CHECK: restore
+define void @call_floatarg(float %f1, double %d2, float %f5, double *%p) {
+ %r = call double @floatarg(float %f5, double %d2, double %d2, double %d2,
+ float %f5, float %f5, float %f5, float %f5,
+ float %f5, float %f5, float %f5, float %f5,
+ float %f5, float %f5, float %f5, float %f5,
+ float %f1, double %d2)
+ store double %r, double* %p
+ ret void
+}
+
+; CHECK: mixedarg
+; CHECK: fstod %f3
+; CHECK: faddd %f6
+; CHECK: faddd %f16
+; CHECK: ldx [%fp+2231]
+; CHECK: ldx [%fp+2247]
+define void @mixedarg(i8 %a0, ; %i0
+ float %a1, ; %f3
+ i16 %a2, ; %i2
+ double %a3, ; %d6
+ i13 %a4, ; %i4
+ float %a5, ; %f11
+ i64 %a6, ; [%fp+BIAS+176]
+ double *%a7, ; [%fp+BIAS+184]
+ double %a8, ; %d16
+ i16* %a9) { ; [%fp+BIAS+200]
+ %d1 = fpext float %a1 to double
+ %s3 = fadd double %a3, %d1
+ %s8 = fadd double %a8, %s3
+ store double %s8, double* %a7
+ store i16 %a2, i16* %a9
+ ret void
+}
+
+; CHECK: call_mixedarg
+; CHECK: stx %i2, [%sp+2247]
+; CHECK: stx %i0, [%sp+2223]
+; CHECK: fmovd %f2, %f6
+; CHECK: fmovd %f2, %f16
+; CHECK: call mixedarg
+; CHECK-NOT: add %sp
+; CHECK: restore
+define void @call_mixedarg(i64 %i0, double %f2, i16* %i2) {
+ call void @mixedarg(i8 undef,
+ float undef,
+ i16 undef,
+ double %f2,
+ i13 undef,
+ float undef,
+ i64 %i0,
+ double* undef,
+ double %f2,
+ i16* %i2)
+ ret void
+}
+
+; The inreg attribute is used to indicate 32-bit sized struct elements that
+; share an 8-byte slot.
+; CHECK: inreg_fi
+; CHECK: fstoi %f1
+; CHECK: srlx %i0, 32, [[R:%[gilo][0-7]]]
+; CHECK: sub [[R]],
+define i32 @inreg_fi(i32 inreg %a0, ; high bits of %i0
+ float inreg %a1) { ; %f1
+ %b1 = fptosi float %a1 to i32
+ %rv = sub i32 %a0, %b1
+ ret i32 %rv
+}
+
+; CHECK: call_inreg_fi
+; Allocate space for 6 arguments, even when only 2 are used.
+; CHECK: save %sp, -176, %sp
+; CHECK: sllx %i1, 32, %o0
+; CHECK: fmovs %f5, %f1
+; CHECK: call inreg_fi
+define void @call_inreg_fi(i32* %p, i32 %i1, float %f5) {
+ %x = call i32 @inreg_fi(i32 %i1, float %f5)
+ ret void
+}
+
+; CHECK: inreg_ff
+; CHECK: fsubs %f0, %f1, %f1
+define float @inreg_ff(float inreg %a0, ; %f0
+ float inreg %a1) { ; %f1
+ %rv = fsub float %a0, %a1
+ ret float %rv
+}
+
+; CHECK: call_inreg_ff
+; CHECK: fmovs %f3, %f0
+; CHECK: fmovs %f5, %f1
+; CHECK: call inreg_ff
+define void @call_inreg_ff(i32* %p, float %f3, float %f5) {
+ %x = call float @inreg_ff(float %f3, float %f5)
+ ret void
+}
+
+; CHECK: inreg_if
+; CHECK: fstoi %f0
+; CHECK: sub %i0
+define i32 @inreg_if(float inreg %a0, ; %f0
+ i32 inreg %a1) { ; low bits of %i0
+ %b0 = fptosi float %a0 to i32
+ %rv = sub i32 %a1, %b0
+ ret i32 %rv
+}
+
+; CHECK: call_inreg_if
+; CHECK: fmovs %f3, %f0
+; CHECK: or %g0, %i2, %o0
+; CHECK: call inreg_if
+define void @call_inreg_if(i32* %p, float %f3, i32 %i2) {
+ %x = call i32 @inreg_if(float %f3, i32 %i2)
+ ret void
+}
+
+; The frontend shouldn't do this. Just pass i64 instead.
+; CHECK: inreg_ii
+; CHECK: srlx %i0, 32, [[R:%[gilo][0-7]]]
+; CHECK: sub %i0, [[R]], %i0
+define i32 @inreg_ii(i32 inreg %a0, ; high bits of %i0
+ i32 inreg %a1) { ; low bits of %i0
+ %rv = sub i32 %a1, %a0
+ ret i32 %rv
+}
+
+; CHECK: call_inreg_ii
+; CHECK: srl %i2, 0, [[R2:%[gilo][0-7]]]
+; CHECK: sllx %i1, 32, [[R1:%[gilo][0-7]]]
+; CHECK: or [[R1]], [[R2]], %o0
+; CHECK: call inreg_ii
+define void @call_inreg_ii(i32* %p, i32 %i1, i32 %i2) {
+ %x = call i32 @inreg_ii(i32 %i1, i32 %i2)
+ ret void
+}
+
+; Structs up to 32 bytes in size can be returned in registers.
+; CHECK: ret_i64_pair
+; CHECK: ldx [%i2], %i0
+; CHECK: ldx [%i3], %i1
+define { i64, i64 } @ret_i64_pair(i32 %a0, i32 %a1, i64* %p, i64* %q) {
+ %r1 = load i64* %p
+ %rv1 = insertvalue { i64, i64 } undef, i64 %r1, 0
+ store i64 0, i64* %p
+ %r2 = load i64* %q
+ %rv2 = insertvalue { i64, i64 } %rv1, i64 %r2, 1
+ ret { i64, i64 } %rv2
+}
+
+; CHECK: call_ret_i64_pair
+; CHECK: call ret_i64_pair
+; CHECK: stx %o0, [%i0]
+; CHECK: stx %o1, [%i0]
+define void @call_ret_i64_pair(i64* %i0) {
+ %rv = call { i64, i64 } @ret_i64_pair(i32 undef, i32 undef,
+ i64* undef, i64* undef)
+ %e0 = extractvalue { i64, i64 } %rv, 0
+ store i64 %e0, i64* %i0
+ %e1 = extractvalue { i64, i64 } %rv, 1
+ store i64 %e1, i64* %i0
+ ret void
+}
+
+; This is not a C struct, each member uses 8 bytes.
+; CHECK: ret_i32_float_pair
+; CHECK: ld [%i2], %i0
+; CHECK: ld [%i3], %f3
+define { i32, float } @ret_i32_float_pair(i32 %a0, i32 %a1,
+ i32* %p, float* %q) {
+ %r1 = load i32* %p
+ %rv1 = insertvalue { i32, float } undef, i32 %r1, 0
+ store i32 0, i32* %p
+ %r2 = load float* %q
+ %rv2 = insertvalue { i32, float } %rv1, float %r2, 1
+ ret { i32, float } %rv2
+}
+
+; CHECK: call_ret_i32_float_pair
+; CHECK: call ret_i32_float_pair
+; CHECK: st %o0, [%i0]
+; CHECK: st %f3, [%i1]
+define void @call_ret_i32_float_pair(i32* %i0, float* %i1) {
+ %rv = call { i32, float } @ret_i32_float_pair(i32 undef, i32 undef,
+ i32* undef, float* undef)
+ %e0 = extractvalue { i32, float } %rv, 0
+ store i32 %e0, i32* %i0
+ %e1 = extractvalue { i32, float } %rv, 1
+ store float %e1, float* %i1
+ ret void
+}
+
+; This is a C struct, each member uses 4 bytes.
+; CHECK: ret_i32_float_packed
+; CHECK: ld [%i2], [[R:%[gilo][0-7]]]
+; CHECK: sllx [[R]], 32, %i0
+; CHECK: ld [%i3], %f1
+define inreg { i32, float } @ret_i32_float_packed(i32 %a0, i32 %a1,
+ i32* %p, float* %q) {
+ %r1 = load i32* %p
+ %rv1 = insertvalue { i32, float } undef, i32 %r1, 0
+ store i32 0, i32* %p
+ %r2 = load float* %q
+ %rv2 = insertvalue { i32, float } %rv1, float %r2, 1
+ ret { i32, float } %rv2
+}
+
+; CHECK: call_ret_i32_float_packed
+; CHECK: call ret_i32_float_packed
+; CHECK: srlx %o0, 32, [[R:%[gilo][0-7]]]
+; CHECK: st [[R]], [%i0]
+; CHECK: st %f1, [%i1]
+define void @call_ret_i32_float_packed(i32* %i0, float* %i1) {
+ %rv = call { i32, float } @ret_i32_float_packed(i32 undef, i32 undef,
+ i32* undef, float* undef)
+ %e0 = extractvalue { i32, float } %rv, 0
+ store i32 %e0, i32* %i0
+ %e1 = extractvalue { i32, float } %rv, 1
+ store float %e1, float* %i1
+ ret void
+}
+
+; The C frontend should use i64 to return { i32, i32 } structs, but verify that
+; we don't miscompile thi case where both struct elements are placed in %i0.
+; CHECK: ret_i32_packed
+; CHECK: ld [%i2], [[R1:%[gilo][0-7]]]
+; CHECK: ld [%i3], [[R2:%[gilo][0-7]]]
+; CHECK: sllx [[R2]], 32, [[R3:%[gilo][0-7]]]
+; CHECK: or [[R3]], [[R1]], %i0
+define inreg { i32, i32 } @ret_i32_packed(i32 %a0, i32 %a1,
+ i32* %p, i32* %q) {
+ %r1 = load i32* %p
+ %rv1 = insertvalue { i32, i32 } undef, i32 %r1, 1
+ store i32 0, i32* %p
+ %r2 = load i32* %q
+ %rv2 = insertvalue { i32, i32 } %rv1, i32 %r2, 0
+ ret { i32, i32 } %rv2
+}
+
+; CHECK: call_ret_i32_packed
+; CHECK: call ret_i32_packed
+; CHECK: srlx %o0, 32, [[R:%[gilo][0-7]]]
+; CHECK: st [[R]], [%i0]
+; CHECK: st %o0, [%i1]
+define void @call_ret_i32_packed(i32* %i0, i32* %i1) {
+ %rv = call { i32, i32 } @ret_i32_packed(i32 undef, i32 undef,
+ i32* undef, i32* undef)
+ %e0 = extractvalue { i32, i32 } %rv, 0
+ store i32 %e0, i32* %i0
+ %e1 = extractvalue { i32, i32 } %rv, 1
+ store i32 %e1, i32* %i1
+ ret void
+}
+
+; The return value must be sign-extended to 64 bits.
+; CHECK: ret_sext
+; CHECK: sra %i0, 0, %i0
+define signext i32 @ret_sext(i32 %a0) {
+ ret i32 %a0
+}
+
+; CHECK: ret_zext
+; CHECK: srl %i0, 0, %i0
+define zeroext i32 @ret_zext(i32 %a0) {
+ ret i32 %a0
+}
+
+; CHECK: ret_nosext
+; CHECK-NOT: sra
+define signext i32 @ret_nosext(i32 signext %a0) {
+ ret i32 %a0
+}
+
+; CHECK: ret_nozext
+; CHECK-NOT: srl
+define signext i32 @ret_nozext(i32 signext %a0) {
+ ret i32 %a0
+}
diff --git a/test/CodeGen/SPARC/64bit.ll b/test/CodeGen/SPARC/64bit.ll
index 0d4e191c9509..2bbf7deb1684 100644
--- a/test/CodeGen/SPARC/64bit.ll
+++ b/test/CodeGen/SPARC/64bit.ll
@@ -66,6 +66,12 @@ define i64 @ret_bigimm() {
ret i64 6800754272627607872
}
+; CHECK: ret_bigimm2
+; CHECK: sethi 1048576
+define i64 @ret_bigimm2() {
+ ret i64 4611686018427387904 ; 0x4000000000000000
+}
+
; CHECK: reg_reg_alu
; CHECK: add %i0, %i1, [[R0:%[goli][0-7]]]
; CHECK: sub [[R0]], %i2, [[R1:%[goli][0-7]]]
@@ -144,3 +150,34 @@ define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
ret void
}
+
+; CHECK: promote_shifts
+; CHECK: ldub [%i0], [[R:%[goli][0-7]]]
+; CHECK: sll [[R]], [[R]], %i0
+define i8 @promote_shifts(i8* %p) {
+ %L24 = load i8* %p
+ %L32 = load i8* %p
+ %B36 = shl i8 %L24, %L32
+ ret i8 %B36
+}
+
+; CHECK: multiply
+; CHECK: mulx %i0, %i1, %i0
+define i64 @multiply(i64 %a, i64 %b) {
+ %r = mul i64 %a, %b
+ ret i64 %r
+}
+
+; CHECK: signed_divide
+; CHECK: sdivx %i0, %i1, %i0
+define i64 @signed_divide(i64 %a, i64 %b) {
+ %r = sdiv i64 %a, %b
+ ret i64 %r
+}
+
+; CHECK: unsigned_divide
+; CHECK: udivx %i0, %i1, %i0
+define i64 @unsigned_divide(i64 %a, i64 %b) {
+ %r = udiv i64 %a, %b
+ ret i64 %r
+}
diff --git a/test/CodeGen/SPARC/constpool.ll b/test/CodeGen/SPARC/constpool.ll
new file mode 100644
index 000000000000..d93a53b3ac04
--- /dev/null
+++ b/test/CodeGen/SPARC/constpool.ll
@@ -0,0 +1,48 @@
+; RUN: llc < %s -march=sparc -relocation-model=static -code-model=small | FileCheck --check-prefix=abs32 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static -code-model=small | FileCheck --check-prefix=abs32 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static -code-model=medium | FileCheck --check-prefix=abs44 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static -code-model=large | FileCheck --check-prefix=abs64 %s
+; RUN: llc < %s -march=sparc -relocation-model=pic -code-model=medium | FileCheck --check-prefix=v8pic32 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=pic -code-model=medium | FileCheck --check-prefix=v9pic32 %s
+
+define float @floatCP() {
+entry:
+ ret float 1.000000e+00
+}
+
+; abs32: floatCP
+; abs32: sethi %hi(.LCPI0_0), %[[R:[gilo][0-7]]]
+; abs32: ld [%[[R]]+%lo(.LCPI0_0)], %f
+; abs32: jmp %i7+8
+
+; abs44: floatCP
+; abs44: sethi %h44(.LCPI0_0), %[[R1:[gilo][0-7]]]
+; abs44: add %[[R1]], %m44(.LCPI0_0), %[[R2:[gilo][0-7]]]
+; abs44: sllx %[[R2]], 12, %[[R3:[gilo][0-7]]]
+; abs44: ld [%[[R3]]+%l44(.LCPI0_0)], %f1
+; abs44: jmp %i7+8
+
+; abs64: floatCP
+; abs64: sethi %hi(.LCPI0_0), %[[R1:[gilo][0-7]]]
+; abs64: add %[[R1]], %lo(.LCPI0_0), %[[R2:[gilo][0-7]]]
+; abs64: sethi %hh(.LCPI0_0), %[[R3:[gilo][0-7]]]
+; abs64: add %[[R3]], %hm(.LCPI0_0), %[[R4:[gilo][0-7]]]
+; abs64: sllx %[[R4]], 32, %[[R5:[gilo][0-7]]]
+; abs64: ld [%[[R5]]+%[[R2]]], %f1
+; abs64: jmp %i7+8
+
+; v8pic32: floatCP
+; v8pic32: _GLOBAL_OFFSET_TABLE_
+; v8pic32: sethi %hi(.LCPI0_0), %[[R1:[gilo][0-7]]]
+; v8pic32: add %[[R1]], %lo(.LCPI0_0), %[[Goffs:[gilo][0-7]]]
+; v8pic32: ld [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
+; v8pic32: ld [%[[Gaddr]]], %f0
+; v8pic32: jmp %i7+8
+
+; v9pic32: floatCP
+; v9pic32: _GLOBAL_OFFSET_TABLE_
+; v9pic32: sethi %hi(.LCPI0_0), %[[R1:[gilo][0-7]]]
+; v9pic32: add %[[R1]], %lo(.LCPI0_0), %[[Goffs:[gilo][0-7]]]
+; v9pic32: ldx [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
+; v9pic32: ld [%[[Gaddr]]], %f1
+; v9pic32: jmp %i7+8
diff --git a/test/CodeGen/SPARC/globals.ll b/test/CodeGen/SPARC/globals.ll
new file mode 100644
index 000000000000..8d8de58f7ccf
--- /dev/null
+++ b/test/CodeGen/SPARC/globals.ll
@@ -0,0 +1,50 @@
+; RUN: llc < %s -march=sparc -relocation-model=static -code-model=small | FileCheck --check-prefix=abs32 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static -code-model=small | FileCheck --check-prefix=abs32 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static -code-model=medium | FileCheck --check-prefix=abs44 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=static -code-model=large | FileCheck --check-prefix=abs64 %s
+; RUN: llc < %s -march=sparc -relocation-model=pic -code-model=medium | FileCheck --check-prefix=v8pic32 %s
+; RUN: llc < %s -march=sparcv9 -relocation-model=pic -code-model=medium | FileCheck --check-prefix=v9pic32 %s
+
+@G = external global i8
+
+define zeroext i8 @loadG() {
+ %tmp = load i8* @G
+ ret i8 %tmp
+}
+
+; abs32: loadG
+; abs32: sethi %hi(G), %[[R:[gilo][0-7]]]
+; abs32: ldub [%[[R]]+%lo(G)], %i0
+; abs32: jmp %i7+8
+
+; abs44: loadG
+; abs44: sethi %h44(G), %[[R1:[gilo][0-7]]]
+; abs44: add %[[R1]], %m44(G), %[[R2:[gilo][0-7]]]
+; abs44: sllx %[[R2]], 12, %[[R3:[gilo][0-7]]]
+; abs44: ldub [%[[R3]]+%l44(G)], %i0
+; abs44: jmp %i7+8
+
+; abs64: loadG
+; abs64: sethi %hi(G), %[[R1:[gilo][0-7]]]
+; abs64: add %[[R1]], %lo(G), %[[R2:[gilo][0-7]]]
+; abs64: sethi %hh(G), %[[R3:[gilo][0-7]]]
+; abs64: add %[[R3]], %hm(G), %[[R4:[gilo][0-7]]]
+; abs64: sllx %[[R4]], 32, %[[R5:[gilo][0-7]]]
+; abs64: ldub [%[[R5]]+%[[R2]]], %i0
+; abs64: jmp %i7+8
+
+; v8pic32: loadG
+; v8pic32: _GLOBAL_OFFSET_TABLE_
+; v8pic32: sethi %hi(G), %[[R1:[gilo][0-7]]]
+; v8pic32: add %[[R1]], %lo(G), %[[Goffs:[gilo][0-7]]]
+; v8pic32: ld [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
+; v8pic32: ldub [%[[Gaddr]]], %i0
+; v8pic32: jmp %i7+8
+
+; v9pic32: loadG
+; v9pic32: _GLOBAL_OFFSET_TABLE_
+; v9pic32: sethi %hi(G), %[[R1:[gilo][0-7]]]
+; v9pic32: add %[[R1]], %lo(G), %[[Goffs:[gilo][0-7]]]
+; v9pic32: ldx [%[[GOT:[gilo][0-7]]]+%[[Goffs]]], %[[Gaddr:[gilo][0-7]]]
+; v9pic32: ldub [%[[Gaddr]]], %i0
+; v9pic32: jmp %i7+8
diff --git a/test/CodeGen/SPARC/varargs.ll b/test/CodeGen/SPARC/varargs.ll
new file mode 100644
index 000000000000..b13f90e6ca71
--- /dev/null
+++ b/test/CodeGen/SPARC/varargs.ll
@@ -0,0 +1,75 @@
+; RUN: llc < %s -disable-block-placement | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32:64-S128"
+target triple = "sparcv9-sun-solaris"
+
+; CHECK: varargsfunc
+; 128 byte save ares + 1 alloca rounded up to 16 bytes alignment.
+; CHECK: save %sp, -144, %sp
+; Store the ... arguments to the argument array. The order is not important.
+; CHECK: stx %i5, [%fp+2215]
+; CHECK: stx %i4, [%fp+2207]
+; CHECK: stx %i3, [%fp+2199]
+; CHECK: stx %i2, [%fp+2191]
+; Store the address of the ... args to %ap at %fp+BIAS+128-8
+; add %fp, 2191, [[R:[gilo][0-7]]]
+; stx [[R]], [%fp+2039]
+define double @varargsfunc(i8* nocapture %fmt, double %sum, ...) {
+entry:
+ %ap = alloca i8*, align 4
+ %ap1 = bitcast i8** %ap to i8*
+ call void @llvm.va_start(i8* %ap1)
+ br label %for.cond
+
+for.cond:
+ %fmt.addr.0 = phi i8* [ %fmt, %entry ], [ %incdec.ptr, %for.cond.backedge ]
+ %sum.addr.0 = phi double [ %sum, %entry ], [ %sum.addr.0.be, %for.cond.backedge ]
+ %incdec.ptr = getelementptr inbounds i8* %fmt.addr.0, i64 1
+ %0 = load i8* %fmt.addr.0, align 1
+ %conv = sext i8 %0 to i32
+ switch i32 %conv, label %sw.default [
+ i32 105, label %sw.bb
+ i32 102, label %sw.bb3
+ ]
+
+; CHECK: sw.bb
+; ldx [%fp+2039], %[[AP:[gilo][0-7]]]
+; add %[[AP]], 4, %[[AP2:[gilo][0-7]]]
+; stx %[[AP2]], [%fp+2039]
+; ld [%[[AP]]]
+sw.bb:
+ %1 = va_arg i8** %ap, i32
+ %conv2 = sitofp i32 %1 to double
+ br label %for.cond.backedge
+
+; CHECK: sw.bb3
+; ldx [%fp+2039], %[[AP:[gilo][0-7]]]
+; add %[[AP]], 8, %[[AP2:[gilo][0-7]]]
+; stx %[[AP2]], [%fp+2039]
+; ldd [%[[AP]]]
+sw.bb3:
+ %2 = va_arg i8** %ap, double
+ br label %for.cond.backedge
+
+for.cond.backedge:
+ %.pn = phi double [ %2, %sw.bb3 ], [ %conv2, %sw.bb ]
+ %sum.addr.0.be = fadd double %.pn, %sum.addr.0
+ br label %for.cond
+
+sw.default:
+ ret double %sum.addr.0
+}
+
+declare void @llvm.va_start(i8*)
+
+@.str = private unnamed_addr constant [4 x i8] c"abc\00", align 1
+
+; CHECK: call_1d
+; The fixed-arg double goes in %d2, the second goes in %o2.
+; CHECK: sethi 1048576
+; CHECK: , %o2
+; CHECK: , %f2
+define i32 @call_1d() #0 {
+entry:
+ %call = call double (i8*, double, ...)* @varargsfunc(i8* undef, double 1.000000e+00, double 2.000000e+00)
+ ret i32 1
+}
diff --git a/test/CodeGen/SystemZ/addr-01.ll b/test/CodeGen/SystemZ/addr-01.ll
new file mode 100644
index 000000000000..c125ffa71a71
--- /dev/null
+++ b/test/CodeGen/SystemZ/addr-01.ll
@@ -0,0 +1,107 @@
+; Test selection of addresses with indices in cases where the address
+; is used once.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; A simple index address.
+define void @f1(i64 %addr, i64 %index) {
+; CHECK: f1:
+; CHECK: lb %r0, 0(%r3,%r2)
+; CHECK: br %r14
+ %add = add i64 %addr, %index
+ %ptr = inttoptr i64 %add to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; An address with an index and a displacement (order 1).
+define void @f2(i64 %addr, i64 %index) {
+; CHECK: f2:
+; CHECK: lb %r0, 100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %addr, %index
+ %add2 = add i64 %add1, 100
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; An address with an index and a displacement (order 2).
+define void @f3(i64 %addr, i64 %index) {
+; CHECK: f3:
+; CHECK: lb %r0, 100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %addr, 100
+ %add2 = add i64 %add1, %index
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; An address with an index and a subtracted displacement (order 1).
+define void @f4(i64 %addr, i64 %index) {
+; CHECK: f4:
+; CHECK: lb %r0, -100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %addr, %index
+ %add2 = sub i64 %add1, 100
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; An address with an index and a subtracted displacement (order 2).
+define void @f5(i64 %addr, i64 %index) {
+; CHECK: f5:
+; CHECK: lb %r0, -100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = sub i64 %addr, 100
+ %add2 = add i64 %add1, %index
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; An address with an index and a displacement added using OR.
+define void @f6(i64 %addr, i64 %index) {
+; CHECK: f6:
+; CHECK: nill %r2, 65528
+; CHECK: lb %r0, 6(%r3,%r2)
+; CHECK: br %r14
+ %aligned = and i64 %addr, -8
+ %or = or i64 %aligned, 6
+ %add = add i64 %or, %index
+ %ptr = inttoptr i64 %add to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; Like f6, but without the masking. This OR doesn't count as a displacement.
+define void @f7(i64 %addr, i64 %index) {
+; CHECK: f7:
+; CHECK: oill %r2, 6
+; CHECK: lb %r0, 0(%r3,%r2)
+; CHECK: br %r14
+ %or = or i64 %addr, 6
+ %add = add i64 %or, %index
+ %ptr = inttoptr i64 %add to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
+
+; Like f6, but with the OR applied after the index. We don't know anything
+; about the alignment of %add here.
+define void @f8(i64 %addr, i64 %index) {
+; CHECK: f8:
+; CHECK: nill %r2, 65528
+; CHECK: agr %r2, %r3
+; CHECK: oill %r2, 6
+; CHECK: lb %r0, 0(%r2)
+; CHECK: br %r14
+ %aligned = and i64 %addr, -8
+ %add = add i64 %aligned, %index
+ %or = or i64 %add, 6
+ %ptr = inttoptr i64 %or to i8 *
+ %a = load volatile i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/addr-02.ll b/test/CodeGen/SystemZ/addr-02.ll
new file mode 100644
index 000000000000..6772c1d41800
--- /dev/null
+++ b/test/CodeGen/SystemZ/addr-02.ll
@@ -0,0 +1,116 @@
+; addr-01.ll in which the address is also used in a non-address context.
+; The assumption here is that we should match complex addresses where
+; possible, but this might well need to change in future.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; A simple index address.
+define void @f1(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f1:
+; CHECK: lb %r0, 0(%r3,%r2)
+; CHECK: br %r14
+ %add = add i64 %addr, %index
+ %ptr = inttoptr i64 %add to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; An address with an index and a displacement (order 1).
+define void @f2(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f2:
+; CHECK: lb %r0, 100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %addr, %index
+ %add2 = add i64 %add1, 100
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; An address with an index and a displacement (order 2).
+define void @f3(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f3:
+; CHECK: lb %r0, 100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %addr, 100
+ %add2 = add i64 %add1, %index
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; An address with an index and a subtracted displacement (order 1).
+define void @f4(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f4:
+; CHECK: lb %r0, -100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %addr, %index
+ %add2 = sub i64 %add1, 100
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; An address with an index and a subtracted displacement (order 2).
+define void @f5(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f5:
+; CHECK: lb %r0, -100(%r3,%r2)
+; CHECK: br %r14
+ %add1 = sub i64 %addr, 100
+ %add2 = add i64 %add1, %index
+ %ptr = inttoptr i64 %add2 to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; An address with an index and a displacement added using OR.
+define void @f6(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f6:
+; CHECK: nill %r2, 65528
+; CHECK: lb %r0, 6(%r3,%r2)
+; CHECK: br %r14
+ %aligned = and i64 %addr, -8
+ %or = or i64 %aligned, 6
+ %add = add i64 %or, %index
+ %ptr = inttoptr i64 %add to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; Like f6, but without the masking. This OR doesn't count as a displacement.
+define void @f7(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f7:
+; CHECK: oill %r2, 6
+; CHECK: lb %r0, 0(%r3,%r2)
+; CHECK: br %r14
+ %or = or i64 %addr, 6
+ %add = add i64 %or, %index
+ %ptr = inttoptr i64 %add to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
+
+; Like f6, but with the OR applied after the index. We don't know anything
+; about the alignment of %add here.
+define void @f8(i64 %addr, i64 %index, i8 **%dst) {
+; CHECK: f8:
+; CHECK: nill %r2, 65528
+; CHECK: agr %r2, %r3
+; CHECK: oill %r2, 6
+; CHECK: lb %r0, 0(%r2)
+; CHECK: br %r14
+ %aligned = and i64 %addr, -8
+ %add = add i64 %aligned, %index
+ %or = or i64 %add, 6
+ %ptr = inttoptr i64 %or to i8 *
+ %a = load volatile i8 *%ptr
+ store volatile i8 *%ptr, i8 **%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/addr-03.ll b/test/CodeGen/SystemZ/addr-03.ll
new file mode 100644
index 000000000000..dbdb9f15b4f1
--- /dev/null
+++ b/test/CodeGen/SystemZ/addr-03.ll
@@ -0,0 +1,48 @@
+; Test constant addresses, unlikely as they are.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1() {
+; CHECK: f1:
+; CHECK: lb %r0, 0
+; CHECK: br %r14
+ %ptr = inttoptr i64 0 to i8 *
+ %val = load volatile i8 *%ptr
+ ret void
+}
+
+define void @f2() {
+; CHECK: f2:
+; CHECK: lb %r0, -524288
+; CHECK: br %r14
+ %ptr = inttoptr i64 -524288 to i8 *
+ %val = load volatile i8 *%ptr
+ ret void
+}
+
+define void @f3() {
+; CHECK: f3:
+; CHECK-NOT: lb %r0, -524289
+; CHECK: br %r14
+ %ptr = inttoptr i64 -524289 to i8 *
+ %val = load volatile i8 *%ptr
+ ret void
+}
+
+define void @f4() {
+; CHECK: f4:
+; CHECK: lb %r0, 524287
+; CHECK: br %r14
+ %ptr = inttoptr i64 524287 to i8 *
+ %val = load volatile i8 *%ptr
+ ret void
+}
+
+define void @f5() {
+; CHECK: f5:
+; CHECK-NOT: lb %r0, 524288
+; CHECK: br %r14
+ %ptr = inttoptr i64 524288 to i8 *
+ %val = load volatile i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/alloca-01.ll b/test/CodeGen/SystemZ/alloca-01.ll
new file mode 100644
index 000000000000..1852c9135059
--- /dev/null
+++ b/test/CodeGen/SystemZ/alloca-01.ll
@@ -0,0 +1,81 @@
+; Test variable-sized allocas and addresses based on them in cases where
+; stack arguments are needed.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK2
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-A
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-B
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-C
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-D
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP
+
+declare i64 @bar(i8 *%a, i8 *%b, i8 *%c, i8 *%d, i8 *%e, i64 %f, i64 %g)
+
+; Allocate %length bytes and take addresses based on the result.
+; There are two stack arguments, so an offset of 160 + 2 * 8 == 176
+; is added to the copy of %r15.
+define i64 @f1(i64 %length, i64 %index) {
+; The full allocation sequence is:
+;
+; la %r0, 7(%r2) 1
+; nill %r0, 0xfff8 1
+; lgr %r1, %r15 2
+; sgr %r1, %r0 1 2
+; lgr %r15, %r1 2
+;
+; The third instruction does not depend on the first two, so check for
+; two fully-ordered sequences.
+;
+; FIXME: a better sequence would be:
+;
+; lgr %r1, %r15
+; sgr %r1, %r2
+; nill %r1, 0xfff8
+; lgr %r15, %r1
+;
+; CHECK1: f1:
+; CHECK1: la %r0, 7(%r2)
+; CHECK1: nill %r0, 65528
+; CHECK1: sgr %r1, %r0
+; CHECK1: lgr %r15, %r1
+;
+; CHECK2: f1:
+; CHECK2: lgr %r1, %r15
+; CHECK2: sgr %r1, %r0
+; CHECK2: lgr %r15, %r1
+;
+; CHECK-A: f1:
+; CHECK-A: lgr %r15, %r1
+; CHECK-A: la %r2, 176(%r1)
+;
+; CHECK-B: f1:
+; CHECK-B: lgr %r15, %r1
+; CHECK-B: la %r3, 177(%r1)
+;
+; CHECK-C: f1:
+; CHECK-C: lgr %r15, %r1
+; CHECK-C: la %r4, 4095({{%r3,%r1|%r1,%r3}})
+;
+; CHECK-D: f1:
+; CHECK-D: lgr %r15, %r1
+; CHECK-D: lay %r5, 4096({{%r3,%r1|%r1,%r3}})
+;
+; CHECK-E: f1:
+; CHECK-E: lgr %r15, %r1
+; CHECK-E: lay %r6, 4271({{%r3,%r1|%r1,%r3}})
+;
+; CHECK-FP: f1:
+; CHECK-FP: lgr %r11, %r15
+; CHECK-FP: lmg %r6, %r15, 224(%r11)
+ %a = alloca i8, i64 %length
+ %b = getelementptr i8 *%a, i64 1
+ %cindex = add i64 %index, 3919
+ %c = getelementptr i8 *%a, i64 %cindex
+ %dindex = add i64 %index, 3920
+ %d = getelementptr i8 *%a, i64 %dindex
+ %eindex = add i64 %index, 4095
+ %e = getelementptr i8 *%a, i64 %eindex
+ %count = call i64 @bar(i8 *%a, i8 *%b, i8 *%c, i8 *%d, i8 *%e, i64 0, i64 0)
+ %res = add i64 %count, 1
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/alloca-02.ll b/test/CodeGen/SystemZ/alloca-02.ll
new file mode 100644
index 000000000000..fbb095f4d12d
--- /dev/null
+++ b/test/CodeGen/SystemZ/alloca-02.ll
@@ -0,0 +1,49 @@
+; Make sure that the alloca offset isn't lost when the alloca result is
+; used directly in a load or store. There must always be an LA or LAY.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-A
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-B
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-C
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-D
+
+declare i64 @bar(i8 *%a)
+
+define i64 @f1(i64 %length, i64 %index) {
+; CHECK-A: f1:
+; CHECK-A: lgr %r15, [[ADDR:%r[1-5]]]
+; CHECK-A: la %r2, 160([[ADDR]])
+; CHECK-A: mvi 0(%r2), 0
+;
+; CHECK-B: f1:
+; CHECK-B: lgr %r15, [[ADDR:%r[1-5]]]
+; CHECK-B: la %r2, 160([[ADDR]])
+; CHECK-B: mvi 4095(%r2), 1
+;
+; CHECK-C: f1:
+; CHECK-C: lgr %r15, [[ADDR:%r[1-5]]]
+; CHECK-C: la [[TMP:%r[1-5]]], 160(%r3,[[ADDR]])
+; CHECK-C: mvi 0([[TMP]]), 2
+;
+; CHECK-D: f1:
+; CHECK-D: lgr %r15, [[ADDR:%r[1-5]]]
+; CHECK-D: la [[TMP:%r[1-5]]], 160(%r3,[[ADDR]])
+; CHECK-D: mvi 4095([[TMP]]), 3
+;
+; CHECK-E: f1:
+; CHECK-E: lgr %r15, [[ADDR:%r[1-5]]]
+; CHECK-E: la [[TMP:%r[1-5]]], 160(%r3,[[ADDR]])
+; CHECK-E: mviy 4096([[TMP]]), 4
+ %a = alloca i8, i64 %length
+ store i8 0, i8 *%a
+ %b = getelementptr i8 *%a, i64 4095
+ store i8 1, i8 *%b
+ %c = getelementptr i8 *%a, i64 %index
+ store i8 2, i8 *%c
+ %d = getelementptr i8 *%c, i64 4095
+ store i8 3, i8 *%d
+ %e = getelementptr i8 *%d, i64 1
+ store i8 4, i8 *%e
+ %count = call i64 @bar(i8 *%a)
+ %res = add i64 %count, 1
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/and-01.ll b/test/CodeGen/SystemZ/and-01.ll
new file mode 100644
index 000000000000..8dd106b7c015
--- /dev/null
+++ b/test/CodeGen/SystemZ/and-01.ll
@@ -0,0 +1,129 @@
+; Test 32-bit ANDs in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check NR.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: nr %r2, %r3
+; CHECK: br %r14
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the low end of the N range.
+define i32 @f2(i32 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: n %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the high end of the aligned N range.
+define i32 @f3(i32 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: n %r2, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the next word up, which should use NY instead of N.
+define i32 @f4(i32 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: ny %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the high end of the aligned NY range.
+define i32 @f5(i32 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: ny %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: n %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the high end of the negative aligned NY range.
+define i32 @f7(i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: ny %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the low end of the NY range.
+define i32 @f8(i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: ny %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: n %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check that N allows an index.
+define i32 @f10(i32 %a, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: n %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
+
+; Check that NY allows an index.
+define i32 @f11(i32 %a, i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: ny %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %and = and i32 %a, %b
+ ret i32 %and
+}
diff --git a/test/CodeGen/SystemZ/and-02.ll b/test/CodeGen/SystemZ/and-02.ll
new file mode 100644
index 000000000000..a0fff81492ad
--- /dev/null
+++ b/test/CodeGen/SystemZ/and-02.ll
@@ -0,0 +1,93 @@
+; Test 32-bit ANDs in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful NILF value.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: nilf %r2, 1
+; CHECK: br %r14
+ %and = and i32 %a, 1
+ ret i32 %and
+}
+
+; Check the highest 16-bit constant that must be handled by NILF.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: nilf %r2, 65534
+; CHECK: br %r14
+ %and = and i32 %a, 65534
+ ret i32 %and
+}
+
+; ANDs of 0xffff are zero extensions from i16.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK: llhr %r2, %r2
+; CHECK: br %r14
+ %and = and i32 %a, 65535
+ ret i32 %and
+}
+
+; Check the next value up, which must again use NILF.
+define i32 @f4(i32 %a) {
+; CHECK: f4:
+; CHECK: nilf %r2, 65536
+; CHECK: br %r14
+ %and = and i32 %a, 65536
+ ret i32 %and
+}
+
+; Check the lowest useful NILH value. (LLHR is used instead of NILH of 0.)
+define i32 @f5(i32 %a) {
+; CHECK: f5:
+; CHECK: nilh %r2, 1
+; CHECK: br %r14
+ %and = and i32 %a, 131071
+ ret i32 %and
+}
+
+; Check the highest useful NILF value.
+define i32 @f6(i32 %a) {
+; CHECK: f6:
+; CHECK: nilf %r2, 4294901758
+; CHECK: br %r14
+ %and = and i32 %a, -65538
+ ret i32 %and
+}
+
+; Check the highest useful NILH value, which is one up from the above.
+define i32 @f7(i32 %a) {
+; CHECK: f7:
+; CHECK: nilh %r2, 65534
+; CHECK: br %r14
+ %and = and i32 %a, -65537
+ ret i32 %and
+}
+
+; Check the low end of the NILL range, which is one up again.
+define i32 @f8(i32 %a) {
+; CHECK: f8:
+; CHECK: nill %r2, 0
+; CHECK: br %r14
+ %and = and i32 %a, -65536
+ ret i32 %and
+}
+
+; Check the next value up.
+define i32 @f9(i32 %a) {
+; CHECK: f9:
+; CHECK: nill %r2, 1
+; CHECK: br %r14
+ %and = and i32 %a, -65535
+ ret i32 %and
+}
+
+; Check the highest useful NILL value.
+define i32 @f10(i32 %a) {
+; CHECK: f10:
+; CHECK: nill %r2, 65534
+; CHECK: br %r14
+ %and = and i32 %a, -2
+ ret i32 %and
+}
diff --git a/test/CodeGen/SystemZ/and-03.ll b/test/CodeGen/SystemZ/and-03.ll
new file mode 100644
index 000000000000..3fe8d3cf3bf8
--- /dev/null
+++ b/test/CodeGen/SystemZ/and-03.ll
@@ -0,0 +1,94 @@
+; Test 64-bit ANDs in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check NGR.
+define i64 @f1(i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK: ngr %r2, %r3
+; CHECK: br %r14
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check NG with no displacement.
+define i64 @f2(i64 %a, i64 *%src) {
+; CHECK: f2:
+; CHECK: ng %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check the high end of the aligned NG range.
+define i64 @f3(i64 %a, i64 *%src) {
+; CHECK: f3:
+; CHECK: ng %r2, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: ng %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check the high end of the negative aligned NG range.
+define i64 @f5(i64 %a, i64 *%src) {
+; CHECK: f5:
+; CHECK: ng %r2, -8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check the low end of the NG range.
+define i64 @f6(i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK: ng %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: ng %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %and = and i64 %a, %b
+ ret i64 %and
+}
+
+; Check that NG allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: ng %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %and = and i64 %a, %b
+ ret i64 %and
+}
diff --git a/test/CodeGen/SystemZ/and-04.ll b/test/CodeGen/SystemZ/and-04.ll
new file mode 100644
index 000000000000..62def60026e1
--- /dev/null
+++ b/test/CodeGen/SystemZ/and-04.ll
@@ -0,0 +1,180 @@
+; Test 64-bit ANDs in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; There is no 64-bit AND instruction for a mask of 1.
+; FIXME: we ought to be able to require "ngr %r2, %r0", but at the moment,
+; two-address optimisations force "ngr %r0, %r2; lgr %r2, %r0" instead.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: lghi %r0, 1
+; CHECK: ngr
+; CHECK: br %r14
+ %and = and i64 %a, 1
+ ret i64 %and
+}
+
+; Likewise 0xfffe.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: llill %r0, 65534
+; CHECK: ngr
+; CHECK: br %r14
+ %and = and i64 %a, 65534
+ ret i64 %and
+}
+
+; ...but 0xffff is a 16-bit zero extension.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: llghr %r2, %r2
+; CHECK: br %r14
+ %and = and i64 %a, 65535
+ ret i64 %and
+}
+
+; Check the next value up, which again has no dedicated instruction.
+define i64 @f4(i64 %a) {
+; CHECK: f4:
+; CHECK: llilh %r0, 1
+; CHECK: ngr
+; CHECK: br %r14
+ %and = and i64 %a, 65536
+ ret i64 %and
+}
+
+; Check 0xfffffffe.
+define i64 @f5(i64 %a) {
+; CHECK: f5:
+; CHECK: lilf %r0, 4294967294
+; CHECK: ngr
+; CHECK: br %r14
+ %and = and i64 %a, 4294967294
+ ret i64 %and
+}
+
+; Check the next value up, which is a 32-bit zero extension.
+define i64 @f6(i64 %a) {
+; CHECK: f6:
+; CHECK: llgfr %r2, %r2
+; CHECK: br %r14
+ %and = and i64 %a, 4294967295
+ ret i64 %and
+}
+
+; Check the lowest useful NIHF value (0x00000001_ffffffff).
+define i64 @f7(i64 %a) {
+; CHECK: f7:
+; CHECK: nihf %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 8589934591
+ ret i64 %and
+}
+
+; Check the low end of the NIHH range (0x0000ffff_ffffffff).
+define i64 @f8(i64 %a) {
+; CHECK: f8:
+; CHECK: nihh %r2, 0
+; CHECK: br %r14
+ %and = and i64 %a, 281474976710655
+ ret i64 %and
+}
+
+; Check the highest useful NIHH value (0xfffeffff_ffffffff).
+define i64 @f9(i64 %a) {
+; CHECK: f9:
+; CHECK: nihh %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, -281474976710657
+ ret i64 %and
+}
+
+; Check the highest useful NIHF value (0xfffefffe_ffffffff).
+define i64 @f10(i64 %a) {
+; CHECK: f10:
+; CHECK: nihf %r2, 4294901758
+; CHECK: br %r14
+ %and = and i64 %a, -281479271677953
+ ret i64 %and
+}
+
+; Check the low end of the NIHL range (0xffff0000_ffffffff).
+define i64 @f11(i64 %a) {
+; CHECK: f11:
+; CHECK: nihl %r2, 0
+; CHECK: br %r14
+ %and = and i64 %a, -281470681743361
+ ret i64 %and
+}
+
+; Check the highest useful NIHL value (0xfffffffe_ffffffff).
+define i64 @f12(i64 %a) {
+; CHECK: f12:
+; CHECK: nihl %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, -4294967297
+ ret i64 %and
+}
+
+; Check the low end of the NILF range (0xffffffff_00000000).
+define i64 @f13(i64 %a) {
+; CHECK: f13:
+; CHECK: nilf %r2, 0
+; CHECK: br %r14
+ %and = and i64 %a, -4294967296
+ ret i64 %and
+}
+
+; Check the low end of the NILH range (0xffffffff_0000ffff).
+define i64 @f14(i64 %a) {
+; CHECK: f14:
+; CHECK: nilh %r2, 0
+; CHECK: br %r14
+ %and = and i64 %a, -4294901761
+ ret i64 %and
+}
+
+; Check the next value up, which must use NILF.
+define i64 @f15(i64 %a) {
+; CHECK: f15:
+; CHECK: nilf %r2, 65536
+; CHECK: br %r14
+ %and = and i64 %a, -4294901760
+ ret i64 %and
+}
+
+; Check the maximum useful NILF value (0xffffffff_fffefffe).
+define i64 @f16(i64 %a) {
+; CHECK: f16:
+; CHECK: nilf %r2, 4294901758
+; CHECK: br %r14
+ %and = and i64 %a, -65538
+ ret i64 %and
+}
+
+; Check the highest useful NILH value, which is one greater than the above.
+define i64 @f17(i64 %a) {
+; CHECK: f17:
+; CHECK: nilh %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, -65537
+ ret i64 %and
+}
+
+; Check the low end of the NILL range, which is one greater again.
+define i64 @f18(i64 %a) {
+; CHECK: f18:
+; CHECK: nill %r2, 0
+; CHECK: br %r14
+ %and = and i64 %a, -65536
+ ret i64 %and
+}
+
+; Check the highest useful NILL value.
+define i64 @f19(i64 %a) {
+; CHECK: f19:
+; CHECK: nill %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, -2
+ ret i64 %and
+}
diff --git a/test/CodeGen/SystemZ/and-05.ll b/test/CodeGen/SystemZ/and-05.ll
new file mode 100644
index 000000000000..457391165d5e
--- /dev/null
+++ b/test/CodeGen/SystemZ/and-05.ll
@@ -0,0 +1,165 @@
+; Test ANDs of a constant into a byte of memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful constant, expressed as a signed integer.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: ni 0(%r2), 1
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %and = and i8 %val, -255
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the highest useful constant, expressed as a signed integer.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %and = and i8 %val, -2
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the lowest useful constant, expressed as an unsigned integer.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: ni 0(%r2), 1
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %and = and i8 %val, 1
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the highest useful constant, expressed as a unsigned integer.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %and = and i8 %val, 254
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the NI range.
+define void @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: ni 4095(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which should use NIY instead of NI.
+define void @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: niy 4096(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the NIY range.
+define void @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: niy 524287(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, 524288
+; CHECK: ni 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the negative NIY range.
+define void @f9(i8 *%src) {
+; CHECK: f9:
+; CHECK: niy -1(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the low end of the NIY range.
+define void @f10(i8 *%src) {
+; CHECK: f10:
+; CHECK: niy -524288(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f11(i8 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r2, -524289
+; CHECK: ni 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check that NI does not allow an index
+define void @f12(i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: agr %r2, %r3
+; CHECK: ni 4095(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
+
+; Check that NIY does not allow an index
+define void @f13(i64 %src, i64 %index) {
+; CHECK: f13:
+; CHECK: agr %r2, %r3
+; CHECK: niy 4096(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %and = and i8 %val, 127
+ store i8 %and, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/and-06.ll b/test/CodeGen/SystemZ/and-06.ll
new file mode 100644
index 000000000000..bbb5e7b7b9dd
--- /dev/null
+++ b/test/CodeGen/SystemZ/and-06.ll
@@ -0,0 +1,108 @@
+; Test that we can use NI for byte operations that are expressed as i32
+; or i64 operations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Zero extension to 32 bits, negative constant.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %and = and i32 %ext, -2
+ %trunc = trunc i32 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 64 bits, negative constant.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %and = and i64 %ext, -2
+ %trunc = trunc i64 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 32 bits, positive constant.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %and = and i32 %ext, 254
+ %trunc = trunc i32 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 64 bits, positive constant.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %and = and i64 %ext, 254
+ %trunc = trunc i64 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 32 bits, negative constant.
+define void @f5(i8 *%ptr) {
+; CHECK: f5:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %and = and i32 %ext, -2
+ %trunc = trunc i32 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 64 bits, negative constant.
+define void @f6(i8 *%ptr) {
+; CHECK: f6:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %and = and i64 %ext, -2
+ %trunc = trunc i64 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 32 bits, positive constant.
+define void @f7(i8 *%ptr) {
+; CHECK: f7:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %and = and i32 %ext, 254
+ %trunc = trunc i32 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 64 bits, positive constant.
+define void @f8(i8 *%ptr) {
+; CHECK: f8:
+; CHECK: ni 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %and = and i64 %ext, 254
+ %trunc = trunc i64 %and to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/args-01.ll b/test/CodeGen/SystemZ/args-01.ll
new file mode 100644
index 000000000000..a6b80c54db51
--- /dev/null
+++ b/test/CodeGen/SystemZ/args-01.ll
@@ -0,0 +1,74 @@
+; Test the handling of GPR, FPR and stack arguments when no extension
+; type is given. This type of argument is used for passing structures, etc.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-INT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FLOAT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-DOUBLE
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP128-1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP128-2
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-STACK
+
+declare void @bar(i8, i16, i32, i64, float, double, fp128, i64,
+ float, double, i8, i16, i32, i64, float, double, fp128)
+
+; There are two indirect fp128 slots, one at offset 224 (the first available
+; byte after the outgoing arguments) and one immediately after it at 240.
+; These slots should be set up outside the glued call sequence, so would
+; normally use %f0/%f2 as the first available 128-bit pair. This choice
+; is hard-coded in the FP128 tests.
+;
+; The order of the CHECK-INT loads doesn't matter. The same goes for the
+; CHECK_FP128-* stores and the CHECK-STACK stores. It would be OK to reorder
+; them in response to future code changes.
+define void @foo() {
+; CHECK-INT: foo:
+; CHECK-INT: lhi %r2, 1
+; CHECK-INT: lhi %r3, 2
+; CHECK-INT: lhi %r4, 3
+; CHECK-INT: lghi %r5, 4
+; CHECK-INT: la %r6, {{224|240}}(%r15)
+; CHECK-INT: brasl %r14, bar@PLT
+;
+; CHECK-FLOAT: foo:
+; CHECK-FLOAT: lzer %f0
+; CHECK-FLOAT: lcebr %f4, %f0
+; CHECK-FLOAT: brasl %r14, bar@PLT
+;
+; CHECK-DOUBLE: foo:
+; CHECK-DOUBLE: lzdr %f2
+; CHECK-DOUBLE: lcdbr %f6, %f2
+; CHECK-DOUBLE: brasl %r14, bar@PLT
+;
+; CHECK-FP128-1: foo:
+; CHECK-FP128-1: aghi %r15, -256
+; CHECK-FP128-1: lzxr %f0
+; CHECK-FP128-1: std %f0, 224(%r15)
+; CHECK-FP128-1: std %f2, 232(%r15)
+; CHECK-FP128-1: brasl %r14, bar@PLT
+;
+; CHECK-FP128-2: foo:
+; CHECK-FP128-2: aghi %r15, -256
+; CHECK-FP128-2: lzxr %f0
+; CHECK-FP128-2: std %f0, 240(%r15)
+; CHECK-FP128-2: std %f2, 248(%r15)
+; CHECK-FP128-2: brasl %r14, bar@PLT
+;
+; CHECK-STACK: foo:
+; CHECK-STACK: aghi %r15, -256
+; CHECK-STACK: la [[REGISTER:%r[0-5]+]], {{224|240}}(%r15)
+; CHECK-STACK: stg [[REGISTER]], 216(%r15)
+; CHECK-STACK: mvghi 208(%r15), 0
+; CHECK-STACK: mvhi 204(%r15), 0
+; CHECK-STACK: mvghi 192(%r15), 9
+; CHECK-STACK: mvhi 188(%r15), 8
+; CHECK-STACK: mvhi 180(%r15), 7
+; CHECK-STACK: mvhi 172(%r15), 6
+; CHECK-STACK: mvghi 160(%r15), 5
+; CHECK-STACK: brasl %r14, bar@PLT
+
+ call void @bar (i8 1, i16 2, i32 3, i64 4, float 0.0, double 0.0,
+ fp128 0xL00000000000000000000000000000000, i64 5,
+ float -0.0, double -0.0, i8 6, i16 7, i32 8, i64 9, float 0.0,
+ double 0.0, fp128 0xL00000000000000000000000000000000)
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/args-02.ll b/test/CodeGen/SystemZ/args-02.ll
new file mode 100644
index 000000000000..9ea111c2e021
--- /dev/null
+++ b/test/CodeGen/SystemZ/args-02.ll
@@ -0,0 +1,76 @@
+; Test the handling of GPR, FPR and stack arguments when integers are
+; sign-extended.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-INT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FLOAT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-DOUBLE
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP128-1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP128-2
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-STACK
+
+declare void @bar(i8 signext, i16 signext, i32 signext, i64, float, double,
+ fp128, i64, float, double, i8 signext, i16 signext,
+ i32 signext, i64, float, double, fp128)
+
+; There are two indirect fp128 slots, one at offset 224 (the first available
+; byte after the outgoing arguments) and one immediately after it at 240.
+; These slots should be set up outside the glued call sequence, so would
+; normally use %f0/%f2 as the first available 128-bit pair. This choice
+; is hard-coded in the FP128 tests.
+;
+; The order of the CHECK-INT loads doesn't matter. The same goes for the
+; CHECK_FP128-* stores and the CHECK-STACK stores. It would be OK to reorder
+; them in response to future code changes.
+define void @foo() {
+; CHECK-INT: foo:
+; CHECK-INT: lghi %r2, -1
+; CHECK-INT: lghi %r3, -2
+; CHECK-INT: lghi %r4, -3
+; CHECK-INT: lghi %r5, -4
+; CHECK-INT: la %r6, {{224|240}}(%r15)
+; CHECK-INT: brasl %r14, bar@PLT
+;
+; CHECK-FLOAT: foo:
+; CHECK-FLOAT: lzer %f0
+; CHECK-FLOAT: lcebr %f4, %f0
+; CHECK-FLOAT: brasl %r14, bar@PLT
+;
+; CHECK-DOUBLE: foo:
+; CHECK-DOUBLE: lzdr %f2
+; CHECK-DOUBLE: lcdbr %f6, %f2
+; CHECK-DOUBLE: brasl %r14, bar@PLT
+;
+; CHECK-FP128-1: foo:
+; CHECK-FP128-1: aghi %r15, -256
+; CHECK-FP128-1: lzxr %f0
+; CHECK-FP128-1: std %f0, 224(%r15)
+; CHECK-FP128-1: std %f2, 232(%r15)
+; CHECK-FP128-1: brasl %r14, bar@PLT
+;
+; CHECK-FP128-2: foo:
+; CHECK-FP128-2: aghi %r15, -256
+; CHECK-FP128-2: lzxr %f0
+; CHECK-FP128-2: std %f0, 240(%r15)
+; CHECK-FP128-2: std %f2, 248(%r15)
+; CHECK-FP128-2: brasl %r14, bar@PLT
+;
+; CHECK-STACK: foo:
+; CHECK-STACK: aghi %r15, -256
+; CHECK-STACK: la [[REGISTER:%r[0-5]+]], {{224|240}}(%r15)
+; CHECK-STACK: stg [[REGISTER]], 216(%r15)
+; CHECK-STACK: mvghi 208(%r15), 0
+; CHECK-STACK: mvhi 204(%r15), 0
+; CHECK-STACK: mvghi 192(%r15), -9
+; CHECK-STACK: mvghi 184(%r15), -8
+; CHECK-STACK: mvghi 176(%r15), -7
+; CHECK-STACK: mvghi 168(%r15), -6
+; CHECK-STACK: mvghi 160(%r15), -5
+; CHECK-STACK: brasl %r14, bar@PLT
+
+ call void @bar (i8 -1, i16 -2, i32 -3, i64 -4, float 0.0, double 0.0,
+ fp128 0xL00000000000000000000000000000000, i64 -5,
+ float -0.0, double -0.0, i8 -6, i16 -7, i32 -8, i64 -9,
+ float 0.0, double 0.0,
+ fp128 0xL00000000000000000000000000000000)
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/args-03.ll b/test/CodeGen/SystemZ/args-03.ll
new file mode 100644
index 000000000000..f954d584fcf4
--- /dev/null
+++ b/test/CodeGen/SystemZ/args-03.ll
@@ -0,0 +1,78 @@
+; Test the handling of GPR, FPR and stack arguments when integers are
+; zero-extended.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-INT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FLOAT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-DOUBLE
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP128-1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-FP128-2
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-STACK
+
+declare void @bar(i8 zeroext, i16 zeroext, i32 zeroext, i64, float, double,
+ fp128, i64, float, double, i8 zeroext, i16 zeroext,
+ i32 zeroext, i64, float, double, fp128)
+
+; There are two indirect fp128 slots, one at offset 224 (the first available
+; byte after the outgoing arguments) and one immediately after it at 240.
+; These slots should be set up outside the glued call sequence, so would
+; normally use %f0/%f2 as the first available 128-bit pair. This choice
+; is hard-coded in the FP128 tests.
+;
+; The order of the CHECK-INT loads doesn't matter. The same goes for the
+; CHECK_FP128-* stores and the CHECK-STACK stores. It would be OK to reorder
+; them in response to future code changes.
+define void @foo() {
+; CHECK-INT: foo:
+; CHECK-INT: lghi %r2, 255
+; CHECK-INT: llill %r3, 65534
+; CHECK-INT: llilf %r4, 4294967293
+; CHECK-INT: lghi %r5, -4
+; CHECK-INT: la %r6, {{224|240}}(%r15)
+; CHECK-INT: brasl %r14, bar@PLT
+;
+; CHECK-FLOAT: foo:
+; CHECK-FLOAT: lzer %f0
+; CHECK-FLOAT: lcebr %f4, %f0
+; CHECK-FLOAT: brasl %r14, bar@PLT
+;
+; CHECK-DOUBLE: foo:
+; CHECK-DOUBLE: lzdr %f2
+; CHECK-DOUBLE: lcdbr %f6, %f2
+; CHECK-DOUBLE: brasl %r14, bar@PLT
+;
+; CHECK-FP128-1: foo:
+; CHECK-FP128-1: aghi %r15, -256
+; CHECK-FP128-1: lzxr %f0
+; CHECK-FP128-1: std %f0, 224(%r15)
+; CHECK-FP128-1: std %f2, 232(%r15)
+; CHECK-FP128-1: brasl %r14, bar@PLT
+;
+; CHECK-FP128-2: foo:
+; CHECK-FP128-2: aghi %r15, -256
+; CHECK-FP128-2: lzxr %f0
+; CHECK-FP128-2: std %f0, 240(%r15)
+; CHECK-FP128-2: std %f2, 248(%r15)
+; CHECK-FP128-2: brasl %r14, bar@PLT
+;
+; CHECK-STACK: foo:
+; CHECK-STACK: aghi %r15, -256
+; CHECK-STACK: la [[REGISTER:%r[0-5]+]], {{224|240}}(%r15)
+; CHECK-STACK: stg [[REGISTER]], 216(%r15)
+; CHECK-STACK: llilf [[AT184:%r[0-5]+]], 4294967288
+; CHECK-STACK: stg [[AT184]], 184(%r15)
+; CHECK-STACK: llill [[AT176:%r[0-5]+]], 65529
+; CHECK-STACK: stg [[AT176]], 176(%r15)
+; CHECK-STACK: mvghi 208(%r15), 0
+; CHECK-STACK: mvhi 204(%r15), 0
+; CHECK-STACK: mvghi 192(%r15), -9
+; CHECK-STACK: mvghi 168(%r15), 250
+; CHECK-STACK: mvghi 160(%r15), -5
+; CHECK-STACK: brasl %r14, bar@PLT
+
+ call void @bar (i8 -1, i16 -2, i32 -3, i64 -4, float 0.0, double 0.0,
+ fp128 0xL00000000000000000000000000000000, i64 -5,
+ float -0.0, double -0.0, i8 -6, i16 -7, i32 -8, i64 -9,
+ float 0.0, double 0.0,
+ fp128 0xL00000000000000000000000000000000)
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/args-04.ll b/test/CodeGen/SystemZ/args-04.ll
new file mode 100644
index 000000000000..8340494ff4dc
--- /dev/null
+++ b/test/CodeGen/SystemZ/args-04.ll
@@ -0,0 +1,126 @@
+; Test incoming GPR, FPR and stack arguments when no extension type is given.
+; This type of argument is used for passing structures, etc.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Do some arithmetic so that we can see the register being used.
+define i8 @f1(i8 %r2) {
+; CHECK: f1:
+; CHECK: ahi %r2, 1
+; CHECK: br %r14
+ %y = add i8 %r2, 1
+ ret i8 %y
+}
+
+define i16 @f2(i8 %r2, i16 %r3) {
+; CHECK: f2:
+; CHECK: {{lr|lgr}} %r2, %r3
+; CHECK: br %r14
+ ret i16 %r3
+}
+
+define i32 @f3(i8 %r2, i16 %r3, i32 %r4) {
+; CHECK: f3:
+; CHECK: {{lr|lgr}} %r2, %r4
+; CHECK: br %r14
+ ret i32 %r4
+}
+
+define i64 @f4(i8 %r2, i16 %r3, i32 %r4, i64 %r5) {
+; CHECK: f4:
+; CHECK: {{lr|lgr}} %r2, %r5
+; CHECK: br %r14
+ ret i64 %r5
+}
+
+; Do some arithmetic so that we can see the register being used.
+define float @f5(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0) {
+; CHECK: f5:
+; CHECK: aebr %f0, %f0
+; CHECK: br %r14
+ %y = fadd float %f0, %f0
+ ret float %y
+}
+
+define double @f6(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2) {
+; CHECK: f6:
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ ret double %f2
+}
+
+; fp128s are passed indirectly. Do some arithmetic so that the value
+; must be interpreted as a float, rather than as a block of memory to
+; be copied.
+define void @f7(fp128 *%r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6) {
+; CHECK: f7:
+; CHECK: ld %f0, 0(%r6)
+; CHECK: ld %f2, 8(%r6)
+; CHECK: axbr %f0, %f0
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %y = fadd fp128 %r6, %r6
+ store fp128 %y, fp128 *%r2
+ ret void
+}
+
+define i64 @f8(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6, i64 %s1) {
+; CHECK: f8:
+; CHECK: lg %r2, 160(%r15)
+; CHECK: br %r14
+ ret i64 %s1
+}
+
+define float @f9(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6, i64 %s1, float %f4) {
+; CHECK: f9:
+; CHECK: ler %f0, %f4
+; CHECK: br %r14
+ ret float %f4
+}
+
+define double @f10(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6, i64 %s1, float %f4, double %f6) {
+; CHECK: f10:
+; CHECK: ldr %f0, %f6
+; CHECK: br %r14
+ ret double %f6
+}
+
+define i64 @f11(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6, i64 %s1, float %f4, double %f6, i64 %s2) {
+; CHECK: f11:
+; CHECK: lg %r2, 168(%r15)
+; CHECK: br %r14
+ ret i64 %s2
+}
+
+; Floats are passed right-justified.
+define float @f12(i8 %r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6, i64 %s1, float %f4, double %f6, i64 %s2,
+ float %s3) {
+; CHECK: f12:
+; CHECK: le %f0, 180(%r15)
+; CHECK: br %r14
+ ret float %s3
+}
+
+; Test a case where the fp128 address is passed on the stack.
+define void @f13(fp128 *%r2, i16 %r3, i32 %r4, i64 %r5, float %f0, double %f2,
+ fp128 %r6, i64 %s1, float %f4, double %f6, i64 %s2,
+ float %s3, fp128 %s4) {
+; CHECK: f13:
+; CHECK: lg [[REGISTER:%r[1-5]+]], 184(%r15)
+; CHECK: ld %f0, 0([[REGISTER]])
+; CHECK: ld %f2, 8([[REGISTER]])
+; CHECK: axbr %f0, %f0
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %y = fadd fp128 %s4, %s4
+ store fp128 %y, fp128 *%r2
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/args-05.ll b/test/CodeGen/SystemZ/args-05.ll
new file mode 100644
index 000000000000..9fa193a68e57
--- /dev/null
+++ b/test/CodeGen/SystemZ/args-05.ll
@@ -0,0 +1,47 @@
+; Test that we take advantage of signext and zeroext annotations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Zero extension of something that is already zero-extended.
+define void @f1(i32 zeroext %r2, i64 *%r3) {
+; CHECK: f1:
+; CHECK-NOT: %r2
+; CHECK: stg %r2, 0(%r3)
+; CHECK: br %r14
+ %conv = zext i32 %r2 to i64
+ store i64 %conv, i64* %r3
+ ret void
+}
+
+; Sign extension of something that is already sign-extended.
+define void @f2(i32 signext %r2, i64 *%r3) {
+; CHECK: f2:
+; CHECK-NOT: %r2
+; CHECK: stg %r2, 0(%r3)
+; CHECK: br %r14
+ %conv = sext i32 %r2 to i64
+ store i64 %conv, i64* %r3
+ ret void
+}
+
+; Sign extension of something that is already zero-extended.
+define void @f3(i32 zeroext %r2, i64 *%r3) {
+; CHECK: f3:
+; CHECK: lgfr [[REGISTER:%r[0-5]+]], %r2
+; CHECK: stg [[REGISTER]], 0(%r3)
+; CHECK: br %r14
+ %conv = sext i32 %r2 to i64
+ store i64 %conv, i64* %r3
+ ret void
+}
+
+; Zero extension of something that is already sign-extended.
+define void @f4(i32 signext %r2, i64 *%r3) {
+; CHECK: f4:
+; CHECK: llgfr [[REGISTER:%r[0-5]+]], %r2
+; CHECK: stg [[REGISTER]], 0(%r3)
+; CHECK: br %r14
+ %conv = zext i32 %r2 to i64
+ store i64 %conv, i64* %r3
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/args-06.ll b/test/CodeGen/SystemZ/args-06.ll
new file mode 100644
index 000000000000..b2f8bee2c6b5
--- /dev/null
+++ b/test/CodeGen/SystemZ/args-06.ll
@@ -0,0 +1,76 @@
+; Test the padding of unextended integer stack parameters. These are used
+; to pass structures.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define i8 @f1(i8 %a, i8 %b, i8 %c, i8 %d, i8 %e, i8 %f, i8 %g) {
+; CHECK: f1:
+; CHECK: ar %r2, %r3
+; CHECK: ar %r2, %r4
+; CHECK: ar %r2, %r5
+; CHECK: ar %r2, %r6
+; CHECK: lb {{%r[0-5]}}, 167(%r15)
+; CHECK: lb {{%r[0-5]}}, 175(%r15)
+; CHECK: br %r14
+ %addb = add i8 %a, %b
+ %addc = add i8 %addb, %c
+ %addd = add i8 %addc, %d
+ %adde = add i8 %addd, %e
+ %addf = add i8 %adde, %f
+ %addg = add i8 %addf, %g
+ ret i8 %addg
+}
+
+define i16 @f2(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16 %f, i16 %g) {
+; CHECK: f2:
+; CHECK: ar %r2, %r3
+; CHECK: ar %r2, %r4
+; CHECK: ar %r2, %r5
+; CHECK: ar %r2, %r6
+; CHECK: lh {{%r[0-5]}}, 166(%r15)
+; CHECK: lh {{%r[0-5]}}, 174(%r15)
+; CHECK: br %r14
+ %addb = add i16 %a, %b
+ %addc = add i16 %addb, %c
+ %addd = add i16 %addc, %d
+ %adde = add i16 %addd, %e
+ %addf = add i16 %adde, %f
+ %addg = add i16 %addf, %g
+ ret i16 %addg
+}
+
+define i32 @f3(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g) {
+; CHECK: f3:
+; CHECK: ar %r2, %r3
+; CHECK: ar %r2, %r4
+; CHECK: ar %r2, %r5
+; CHECK: ar %r2, %r6
+; CHECK: a %r2, 164(%r15)
+; CHECK: a %r2, 172(%r15)
+; CHECK: br %r14
+ %addb = add i32 %a, %b
+ %addc = add i32 %addb, %c
+ %addd = add i32 %addc, %d
+ %adde = add i32 %addd, %e
+ %addf = add i32 %adde, %f
+ %addg = add i32 %addf, %g
+ ret i32 %addg
+}
+
+define i64 @f4(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g) {
+; CHECK: f4:
+; CHECK: agr %r2, %r3
+; CHECK: agr %r2, %r4
+; CHECK: agr %r2, %r5
+; CHECK: agr %r2, %r6
+; CHECK: ag %r2, 160(%r15)
+; CHECK: ag %r2, 168(%r15)
+; CHECK: br %r14
+ %addb = add i64 %a, %b
+ %addc = add i64 %addb, %c
+ %addd = add i64 %addc, %d
+ %adde = add i64 %addd, %e
+ %addf = add i64 %adde, %f
+ %addg = add i64 %addf, %g
+ ret i64 %addg
+}
diff --git a/test/CodeGen/SystemZ/asm-01.ll b/test/CodeGen/SystemZ/asm-01.ll
new file mode 100644
index 000000000000..016d04c614cb
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-01.ll
@@ -0,0 +1,61 @@
+; Test the "Q" asm constraint, which accepts addresses that have a base
+; and a 12-bit displacement.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest range.
+define void @f1(i64 %base) {
+; CHECK: f1:
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %addr = inttoptr i64 %base to i64 *
+ call void asm "blah $0", "=*Q" (i64 *%addr)
+ ret void
+}
+
+; Check the next lowest byte.
+define void @f2(i64 %base) {
+; CHECK: f2:
+; CHECK: aghi %r2, -1
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, -1
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*Q" (i64 *%addr)
+ ret void
+}
+
+; Check the highest range.
+define void @f3(i64 %base) {
+; CHECK: f3:
+; CHECK: blah 4095(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, 4095
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*Q" (i64 *%addr)
+ ret void
+}
+
+; Check the next highest byte.
+define void @f4(i64 %base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, 4096
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*Q" (i64 *%addr)
+ ret void
+}
+
+; Check that indices aren't allowed
+define void @f5(i64 %base, i64 %index) {
+; CHECK: f5:
+; CHECK: agr %r2, %r3
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*Q" (i64 *%addr)
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/asm-02.ll b/test/CodeGen/SystemZ/asm-02.ll
new file mode 100644
index 000000000000..12d8bec161ce
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-02.ll
@@ -0,0 +1,52 @@
+; Test the "R" asm constraint, which accepts addresses that have a base,
+; an index and a 12-bit displacement.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest range.
+define void @f1(i64 %base) {
+; CHECK: f1:
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %addr = inttoptr i64 %base to i64 *
+ call void asm "blah $0", "=*R" (i64 *%addr)
+ ret void
+}
+
+; Check the next lowest byte.
+define void @f2(i64 %base) {
+; CHECK: f2:
+; CHECK: aghi %r2, -1
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, -1
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*R" (i64 *%addr)
+ ret void
+}
+
+; Check the highest range.
+define void @f3(i64 %base) {
+; CHECK: f3:
+; CHECK: blah 4095(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, 4095
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*R" (i64 *%addr)
+ ret void
+}
+
+; Check the next highest byte.
+define void @f4(i64 %base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %base, 4096
+ %addr = inttoptr i64 %add to i64 *
+ call void asm "blah $0", "=*R" (i64 *%addr)
+ ret void
+}
+
+; FIXME: at the moment the precise constraint is not passed down to
+; target code, so we must conservatively treat "R" as "Q".
diff --git a/test/CodeGen/SystemZ/asm-03.ll b/test/CodeGen/SystemZ/asm-03.ll
new file mode 100644
index 000000000000..a6f3f2a5cb60
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-03.ll
@@ -0,0 +1,16 @@
+; Test the "S" asm constraint, which accepts addresses that have a base
+; and a 20-bit displacement.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(i64 %base) {
+; CHECK: f1:
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %addr = inttoptr i64 %base to i64 *
+ call void asm "blah $0", "=*S" (i64 *%addr)
+ ret void
+}
+
+; FIXME: at the moment the precise constraint is not passed down to
+; target code, so we must conservatively treat "S" as "Q".
diff --git a/test/CodeGen/SystemZ/asm-04.ll b/test/CodeGen/SystemZ/asm-04.ll
new file mode 100644
index 000000000000..0560949eb069
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-04.ll
@@ -0,0 +1,16 @@
+; Test the "T" asm constraint, which accepts addresses that have a base,
+; an index and a 20-bit displacement.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(i64 %base) {
+; CHECK: f1:
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %addr = inttoptr i64 %base to i64 *
+ call void asm "blah $0", "=*T" (i64 *%addr)
+ ret void
+}
+
+; FIXME: at the moment the precise constraint is not passed down to
+; target code, so we must conservatively treat "T" as "Q".
diff --git a/test/CodeGen/SystemZ/asm-05.ll b/test/CodeGen/SystemZ/asm-05.ll
new file mode 100644
index 000000000000..dae90b09eafe
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-05.ll
@@ -0,0 +1,15 @@
+; Test the "m" asm constraint, which is equivalent to "T".
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(i64 %base) {
+; CHECK: f1:
+; CHECK: blah 0(%r2)
+; CHECK: br %r14
+ %addr = inttoptr i64 %base to i64 *
+ call void asm "blah $0", "=*m" (i64 *%addr)
+ ret void
+}
+
+; FIXME: at the moment the precise constraint is not passed down to
+; target code, so we must conservatively treat "m" as "Q".
diff --git a/test/CodeGen/SystemZ/asm-06.ll b/test/CodeGen/SystemZ/asm-06.ll
new file mode 100644
index 000000000000..c0e24a366486
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-06.ll
@@ -0,0 +1,39 @@
+; Test the GPR constraint "a", which forbids %r0.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define i64 @f1() {
+; CHECK: f1:
+; CHECK: lhi %r1, 1
+; CHECK: blah %r2 %r1
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,a" (i8 1)
+ ret i64 %val
+}
+
+define i64 @f2() {
+; CHECK: f2:
+; CHECK: lhi %r1, 2
+; CHECK: blah %r2 %r1
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,a" (i16 2)
+ ret i64 %val
+}
+
+define i64 @f3() {
+; CHECK: f3:
+; CHECK: lhi %r1, 3
+; CHECK: blah %r2 %r1
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,a" (i32 3)
+ ret i64 %val
+}
+
+define i64 @f4() {
+; CHECK: f4:
+; CHECK: lghi %r1, 4
+; CHECK: blah %r2 %r1
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,a" (i64 4)
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-07.ll b/test/CodeGen/SystemZ/asm-07.ll
new file mode 100644
index 000000000000..e07286d9a4d6
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-07.ll
@@ -0,0 +1,39 @@
+; Test the GPR constraint "r".
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define i64 @f1() {
+; CHECK: f1:
+; CHECK: lhi %r0, 1
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,r" (i8 1)
+ ret i64 %val
+}
+
+define i64 @f2() {
+; CHECK: f2:
+; CHECK: lhi %r0, 2
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,r" (i16 2)
+ ret i64 %val
+}
+
+define i64 @f3() {
+; CHECK: f3:
+; CHECK: lhi %r0, 3
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,r" (i32 3)
+ ret i64 %val
+}
+
+define i64 @f4() {
+; CHECK: f4:
+; CHECK: lghi %r0, 4
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=r,r" (i64 4)
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-08.ll b/test/CodeGen/SystemZ/asm-08.ll
new file mode 100644
index 000000000000..15abc4d0d2ed
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-08.ll
@@ -0,0 +1,39 @@
+; Test the GPR constraint "d", which is equivalent to "r".
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define i64 @f1() {
+; CHECK: f1:
+; CHECK: lhi %r0, 1
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=d,d" (i8 1)
+ ret i64 %val
+}
+
+define i64 @f2() {
+; CHECK: f2:
+; CHECK: lhi %r0, 2
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=d,d" (i16 2)
+ ret i64 %val
+}
+
+define i64 @f3() {
+; CHECK: f3:
+; CHECK: lhi %r0, 3
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=d,d" (i32 3)
+ ret i64 %val
+}
+
+define i64 @f4() {
+; CHECK: f4:
+; CHECK: lghi %r0, 4
+; CHECK: blah %r2 %r0
+; CHECK: br %r14
+ %val = call i64 asm "blah $0 $1", "=d,d" (i64 4)
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-09.ll b/test/CodeGen/SystemZ/asm-09.ll
new file mode 100644
index 000000000000..1541170924b7
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-09.ll
@@ -0,0 +1,83 @@
+; Test matching operands with the GPR constraint "r".
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(i32 *%dst) {
+; CHECK: f1:
+; CHECK: lhi %r0, 100
+; CHECK: blah %r0
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %val = call i32 asm "blah $0", "=r,0" (i8 100)
+ store i32 %val, i32 *%dst
+ ret void
+}
+
+define void @f2(i32 *%dst) {
+; CHECK: f2:
+; CHECK: lhi %r0, 101
+; CHECK: blah %r0
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %val = call i32 asm "blah $0", "=r,0" (i16 101)
+ store i32 %val, i32 *%dst
+ ret void
+}
+
+define void @f3(i32 *%dst) {
+; CHECK: f3:
+; CHECK: lhi %r0, 102
+; CHECK: blah %r0
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %val = call i32 asm "blah $0", "=r,0" (i32 102)
+ store i32 %val, i32 *%dst
+ ret void
+}
+
+; FIXME: this uses "lhi %r0, 103", but should use "lghi %r0, 103".
+define void @f4(i32 *%dst) {
+; CHECK: f4:
+; CHECK: blah %r0
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %val = call i32 asm "blah $0", "=r,0" (i64 103)
+ store i32 %val, i32 *%dst
+ ret void
+}
+
+define i64 @f5() {
+; CHECK: f5:
+; CHECK: lghi %r2, 104
+; CHECK: blah %r2
+; CHECK: br %r14
+ %val = call i64 asm "blah $0", "=r,0" (i8 104)
+ ret i64 %val
+}
+
+define i64 @f6() {
+; CHECK: f6:
+; CHECK: lghi %r2, 105
+; CHECK: blah %r2
+; CHECK: br %r14
+ %val = call i64 asm "blah $0", "=r,0" (i16 105)
+ ret i64 %val
+}
+
+define i64 @f7() {
+; CHECK: f7:
+; CHECK: lghi %r2, 106
+; CHECK: blah %r2
+; CHECK: br %r14
+ %val = call i64 asm "blah $0", "=r,0" (i32 106)
+ ret i64 %val
+}
+
+define i64 @f8() {
+; CHECK: f8:
+; CHECK: lghi %r2, 107
+; CHECK: blah %r2
+; CHECK: br %r14
+ %val = call i64 asm "blah $0", "=r,0" (i64 107)
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-10.ll b/test/CodeGen/SystemZ/asm-10.ll
new file mode 100644
index 000000000000..676c2028b056
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-10.ll
@@ -0,0 +1,30 @@
+; Test the FPR constraint "f".
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define float @f1() {
+; CHECK: f1:
+; CHECK: lzer %f1
+; CHECK: blah %f0 %f1
+; CHECK: br %r14
+ %val = call float asm "blah $0 $1", "=&f,f" (float 0.0)
+ ret float %val
+}
+
+define double @f2() {
+; CHECK: f2:
+; CHECK: lzdr %f1
+; CHECK: blah %f0 %f1
+; CHECK: br %r14
+ %val = call double asm "blah $0 $1", "=&f,f" (double 0.0)
+ ret double %val
+}
+
+define double @f3() {
+; CHECK: f3:
+; CHECK: lzxr %f1
+; CHECK: blah %f0 %f1
+; CHECK: br %r14
+ %val = call double asm "blah $0 $1", "=&f,f" (fp128 0xL00000000000000000000000000000000)
+ ret double %val
+}
diff --git a/test/CodeGen/SystemZ/asm-11.ll b/test/CodeGen/SystemZ/asm-11.ll
new file mode 100644
index 000000000000..9bd8d7c33f01
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-11.ll
@@ -0,0 +1,41 @@
+; Test the "I" constraint (8-bit unsigned constants).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 1 below the first valid value.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: lhi [[REG:%r[0-5]]], -1
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rI" (i32 -1)
+ ret i32 %val
+}
+
+; Test the first valid value.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: blah %r2 0
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rI" (i32 0)
+ ret i32 %val
+}
+
+; Test the last valid value.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: blah %r2 255
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rI" (i32 255)
+ ret i32 %val
+}
+
+; Test 1 above the last valid value.
+define i32 @f4() {
+; CHECK: f4:
+; CHECK: lhi [[REG:%r[0-5]]], 256
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rI" (i32 256)
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-12.ll b/test/CodeGen/SystemZ/asm-12.ll
new file mode 100644
index 000000000000..dd920f11fdec
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-12.ll
@@ -0,0 +1,41 @@
+; Test the "J" constraint (12-bit unsigned constants).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 1 below the first valid value.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: lhi [[REG:%r[0-5]]], -1
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rJ" (i32 -1)
+ ret i32 %val
+}
+
+; Test the first valid value.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: blah %r2 0
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rJ" (i32 0)
+ ret i32 %val
+}
+
+; Test the last valid value.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: blah %r2 4095
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rJ" (i32 4095)
+ ret i32 %val
+}
+
+; Test 1 above the last valid value.
+define i32 @f4() {
+; CHECK: f4:
+; CHECK: lhi [[REG:%r[0-5]]], 4096
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rJ" (i32 4096)
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-13.ll b/test/CodeGen/SystemZ/asm-13.ll
new file mode 100644
index 000000000000..af3fdb361533
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-13.ll
@@ -0,0 +1,41 @@
+; Test the "K" constraint (16-bit signed constants).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 1 below the first valid value.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: iilf [[REG:%r[0-5]]], 4294934527
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rK" (i32 -32769)
+ ret i32 %val
+}
+
+; Test the first valid value.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: blah %r2 -32768
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rK" (i32 -32768)
+ ret i32 %val
+}
+
+; Test the last valid value.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: blah %r2 32767
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rK" (i32 32767)
+ ret i32 %val
+}
+
+; Test 1 above the last valid value.
+define i32 @f4() {
+; CHECK: f4:
+; CHECK: llill [[REG:%r[0-5]]], 32768
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rK" (i32 32768)
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-14.ll b/test/CodeGen/SystemZ/asm-14.ll
new file mode 100644
index 000000000000..b6b28d6b32fc
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-14.ll
@@ -0,0 +1,41 @@
+; Test the "L" constraint (20-bit signed constants).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 1 below the first valid value.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: iilf [[REG:%r[0-5]]], 4294443007
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rL" (i32 -524289)
+ ret i32 %val
+}
+
+; Test the first valid value.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: blah %r2 -524288
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rL" (i32 -524288)
+ ret i32 %val
+}
+
+; Test the last valid value.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: blah %r2 524287
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rL" (i32 524287)
+ ret i32 %val
+}
+
+; Test 1 above the last valid value.
+define i32 @f4() {
+; CHECK: f4:
+; CHECK: llilh [[REG:%r[0-5]]], 8
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rL" (i32 524288)
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-15.ll b/test/CodeGen/SystemZ/asm-15.ll
new file mode 100644
index 000000000000..4d0e2b4c3be3
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-15.ll
@@ -0,0 +1,32 @@
+; Test the "M" constraint (0x7fffffff)
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 1 below the valid value.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: iilf [[REG:%r[0-5]]], 2147483646
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rM" (i32 2147483646)
+ ret i32 %val
+}
+
+; Test the first valid value.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: blah %r2 2147483647
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rM" (i32 2147483647)
+ ret i32 %val
+}
+
+; Test 1 above the valid value.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: llilh [[REG:%r[0-5]]], 32768
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rM" (i32 2147483648)
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/asm-16.ll b/test/CodeGen/SystemZ/asm-16.ll
new file mode 100644
index 000000000000..4d0e2b4c3be3
--- /dev/null
+++ b/test/CodeGen/SystemZ/asm-16.ll
@@ -0,0 +1,32 @@
+; Test the "M" constraint (0x7fffffff)
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 1 below the valid value.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: iilf [[REG:%r[0-5]]], 2147483646
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rM" (i32 2147483646)
+ ret i32 %val
+}
+
+; Test the first valid value.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: blah %r2 2147483647
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rM" (i32 2147483647)
+ ret i32 %val
+}
+
+; Test 1 above the valid value.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: llilh [[REG:%r[0-5]]], 32768
+; CHECK: blah %r2 [[REG]]
+; CHECK: br %r14
+ %val = call i32 asm "blah $0 $1", "=&r,rM" (i32 2147483648)
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/atomic-load-01.ll b/test/CodeGen/SystemZ/atomic-load-01.ll
new file mode 100644
index 000000000000..3e86bcf78ae6
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-load-01.ll
@@ -0,0 +1,13 @@
+; Test 8-bit atomic loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that loads are handled.
+; The CS-based sequence is probably far too conservative.
+define i8 @f1(i8 *%src) {
+; CHECK: f1:
+; CHECK: cs
+; CHECK: br %r14
+ %val = load atomic i8 *%src seq_cst, align 1
+ ret i8 %val
+}
diff --git a/test/CodeGen/SystemZ/atomic-load-02.ll b/test/CodeGen/SystemZ/atomic-load-02.ll
new file mode 100644
index 000000000000..d6168cedb8a8
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-load-02.ll
@@ -0,0 +1,13 @@
+; Test 16-bit atomic loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that loads are handled.
+; The CS-based sequence is probably far too conservative.
+define i16 @f1(i16 *%src) {
+; CHECK: f1:
+; CHECK: cs
+; CHECK: br %r14
+ %val = load atomic i16 *%src seq_cst, align 2
+ ret i16 %val
+}
diff --git a/test/CodeGen/SystemZ/atomic-load-03.ll b/test/CodeGen/SystemZ/atomic-load-03.ll
new file mode 100644
index 000000000000..fcf0cf3d5a90
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-load-03.ll
@@ -0,0 +1,14 @@
+; Test 32-bit atomic loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that loads are handled.
+; Using CS is probably too conservative.
+define i32 @f1(i32 %dummy, i32 *%src) {
+; CHECK: f1:
+; CHECK: lhi %r2, 0
+; CHECK: cs %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load atomic i32 *%src seq_cst, align 4
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/atomic-load-04.ll b/test/CodeGen/SystemZ/atomic-load-04.ll
new file mode 100644
index 000000000000..9593d35fef0c
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-load-04.ll
@@ -0,0 +1,14 @@
+; Test 64-bit atomic loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that loads are handled.
+; Using CSG is probably too conservative.
+define i64 @f1(i64 %dummy, i64 *%src) {
+; CHECK: f1:
+; CHECK: lghi %r2, 0
+; CHECK: csg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load atomic i64 *%src seq_cst, align 8
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/atomic-store-01.ll b/test/CodeGen/SystemZ/atomic-store-01.ll
new file mode 100644
index 000000000000..b316e5cd6309
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-store-01.ll
@@ -0,0 +1,13 @@
+; Test 8-bit atomic stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that stores are handled.
+; The CS-based sequence is probably far too conservative.
+define void @f1(i8 %val, i8 *%src) {
+; CHECK: f1:
+; CHECK: cs
+; CHECK: br %r14
+ store atomic i8 %val, i8 *%src seq_cst, align 1
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/atomic-store-02.ll b/test/CodeGen/SystemZ/atomic-store-02.ll
new file mode 100644
index 000000000000..c76171431883
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-store-02.ll
@@ -0,0 +1,13 @@
+; Test 16-bit atomic stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that stores are handled.
+; The CS-based sequence is probably far too conservative.
+define void @f1(i16 %val, i16 *%src) {
+; CHECK: f1:
+; CHECK: cs
+; CHECK: br %r14
+ store atomic i16 %val, i16 *%src seq_cst, align 2
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/atomic-store-03.ll b/test/CodeGen/SystemZ/atomic-store-03.ll
new file mode 100644
index 000000000000..6e2996313db6
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-store-03.ll
@@ -0,0 +1,16 @@
+; Test 32-bit atomic stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that stores are handled.
+; Using CS is probably too conservative.
+define void @f1(i32 %val, i32 *%src) {
+; CHECK: f1:
+; CHECK: l %r0, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: cs %r0, %r2, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ store atomic i32 %val, i32 *%src seq_cst, align 4
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/atomic-store-04.ll b/test/CodeGen/SystemZ/atomic-store-04.ll
new file mode 100644
index 000000000000..7a611c8cf081
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomic-store-04.ll
@@ -0,0 +1,16 @@
+; Test 64-bit atomic stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is just a placeholder to make sure that stores are handled.
+; Using CS is probably too conservative.
+define void @f1(i64 %val, i64 *%src) {
+; CHECK: f1:
+; CHECK: lg %r0, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: csg %r0, %r2, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ store atomic i64 %val, i64 *%src seq_cst, align 8
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-add-01.ll b/test/CodeGen/SystemZ/atomicrmw-add-01.ll
new file mode 100644
index 000000000000..2a84857f836d
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-add-01.ll
@@ -0,0 +1,132 @@
+; Test 8-bit atomic additions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check addition of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: ar [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: ar {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the minimum signed value. We add 0x80000000 to the rotated word.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: afi [[ROT]], -2147483648
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i8 *%src, i8 -128 seq_cst
+ ret i8 %res
+}
+
+; Check addition of -1. We add 0xff000000 to the rotated word.
+define i8 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: afi [[ROT]], -16777216
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i8 *%src, i8 -1 seq_cst
+ ret i8 %res
+}
+
+; Check addition of 1. We add 0x01000000 to the rotated word.
+define i8 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: afi [[ROT]], 16777216
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the maximum signed value. We add 0x7f000000 to the rotated word.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: afi [[ROT]], 2130706432
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i8 *%src, i8 127 seq_cst
+ ret i8 %res
+}
+
+; Check addition of a large unsigned value. We add 0xfe000000 to the
+; rotated word, expressed as a negative AFI operand.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: afi [[ROT]], -33554432
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i8 *%src, i8 254 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-add-02.ll b/test/CodeGen/SystemZ/atomicrmw-add-02.ll
new file mode 100644
index 000000000000..3dd482dd323d
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-add-02.ll
@@ -0,0 +1,132 @@
+; Test 16-bit atomic additions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check addition of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: ar [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: ar {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the minimum signed value. We add 0x80000000 to the rotated word.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: afi [[ROT]], -2147483648
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i16 *%src, i16 -32768 seq_cst
+ ret i16 %res
+}
+
+; Check addition of -1. We add 0xffff0000 to the rotated word.
+define i16 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: afi [[ROT]], -65536
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i16 *%src, i16 -1 seq_cst
+ ret i16 %res
+}
+
+; Check addition of 1. We add 0x00010000 to the rotated word.
+define i16 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: afi [[ROT]], 65536
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the maximum signed value. We add 0x7fff0000 to the rotated word.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: afi [[ROT]], 2147418112
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i16 *%src, i16 32767 seq_cst
+ ret i16 %res
+}
+
+; Check addition of a large unsigned value. We add 0xfffe0000 to the
+; rotated word, expressed as a negative AFI operand.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: afi [[ROT]], -131072
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw add i16 *%src, i16 65534 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-add-03.ll b/test/CodeGen/SystemZ/atomicrmw-add-03.ll
new file mode 100644
index 000000000000..01eb8e0d7464
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-add-03.ll
@@ -0,0 +1,94 @@
+; Test 32-bit atomic additions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check addition of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: ar %r0, %r4
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check addition of 1, which can use AHI.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: ahi %r0, 1
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the AHI range.
+define i32 @f3(i32 %dummy, i32 *%src) {
+; CHECK: f3:
+; CHECK: ahi %r0, 32767
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 32767 seq_cst
+ ret i32 %res
+}
+
+; Check the next value up, which must use AFI.
+define i32 @f4(i32 %dummy, i32 *%src) {
+; CHECK: f4:
+; CHECK: afi %r0, 32768
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 32768 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the AFI range.
+define i32 @f5(i32 %dummy, i32 *%src) {
+; CHECK: f5:
+; CHECK: afi %r0, 2147483647
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 2147483647 seq_cst
+ ret i32 %res
+}
+
+; Check the next value up, which gets treated as a negative operand.
+define i32 @f6(i32 %dummy, i32 *%src) {
+; CHECK: f6:
+; CHECK: afi %r0, -2147483648
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 2147483648 seq_cst
+ ret i32 %res
+}
+
+; Check addition of -1, which can use AHI.
+define i32 @f7(i32 %dummy, i32 *%src) {
+; CHECK: f7:
+; CHECK: ahi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 -1 seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the AHI range.
+define i32 @f8(i32 %dummy, i32 *%src) {
+; CHECK: f8:
+; CHECK: ahi %r0, -32768
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 -32768 seq_cst
+ ret i32 %res
+}
+
+; Check the next value down, which must use AFI instead.
+define i32 @f9(i32 %dummy, i32 *%src) {
+; CHECK: f9:
+; CHECK: afi %r0, -32769
+; CHECK: br %r14
+ %res = atomicrmw add i32 *%src, i32 -32769 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-add-04.ll b/test/CodeGen/SystemZ/atomicrmw-add-04.ll
new file mode 100644
index 000000000000..6b1d20bd080e
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-add-04.ll
@@ -0,0 +1,112 @@
+; Test 64-bit atomic additions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check addition of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: agr %r0, %r4
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check addition of 1, which can use AGHI.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK: f2:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: aghi %r0, 1
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the AGHI range.
+define i64 @f3(i64 %dummy, i64 *%src) {
+; CHECK: f3:
+; CHECK: aghi %r0, 32767
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 32767 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use AGFI.
+define i64 @f4(i64 %dummy, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r0, 32768
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 32768 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the AGFI range.
+define i64 @f5(i64 %dummy, i64 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r0, 2147483647
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 2147483647 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register addition.
+define i64 @f6(i64 %dummy, i64 *%src) {
+; CHECK: f6:
+; CHECK: agr
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 2147483648 seq_cst
+ ret i64 %res
+}
+
+; Check addition of -1, which can use AGHI.
+define i64 @f7(i64 %dummy, i64 *%src) {
+; CHECK: f7:
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 -1 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the AGHI range.
+define i64 @f8(i64 %dummy, i64 *%src) {
+; CHECK: f8:
+; CHECK: aghi %r0, -32768
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 -32768 seq_cst
+ ret i64 %res
+}
+
+; Check the next value down, which must use AGFI instead.
+define i64 @f9(i64 %dummy, i64 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r0, -32769
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 -32769 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the AGFI range.
+define i64 @f10(i64 %dummy, i64 *%src) {
+; CHECK: f10:
+; CHECK: agfi %r0, -2147483648
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 -2147483648 seq_cst
+ ret i64 %res
+}
+
+; Check the next value down, which must use a register addition.
+define i64 @f11(i64 %dummy, i64 *%src) {
+; CHECK: f11:
+; CHECK: agr
+; CHECK: br %r14
+ %res = atomicrmw add i64 *%src, i64 -2147483649 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-and-01.ll b/test/CodeGen/SystemZ/atomicrmw-and-01.ll
new file mode 100644
index 000000000000..ebbce8e7872b
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-and-01.ll
@@ -0,0 +1,133 @@
+; Test 8-bit atomic ANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check AND of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used, and that the low bits are set to 1. This sequence is
+; independent of the other loop prologue instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nr [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: oilf %r3, 16777215
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: nr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the minimum signed value. We AND the rotated word with 0x80ffffff.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nilh [[ROT]], 33023
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i8 *%src, i8 -128 seq_cst
+ ret i8 %res
+}
+
+; Check ANDs of -2 (-1 isn't useful). We AND the rotated word with 0xfeffffff.
+define i8 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: nilh [[ROT]], 65279
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i8 *%src, i8 -2 seq_cst
+ ret i8 %res
+}
+
+; Check ANDs of 1. We AND the rotated word with 0x01ffffff.
+define i8 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: nilh [[ROT]], 511
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the maximum signed value. We AND the rotated word with 0x7fffffff.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: nilh [[ROT]], 32767
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i8 *%src, i8 127 seq_cst
+ ret i8 %res
+}
+
+; Check ANDs of a large unsigned value. We AND the rotated word with
+; 0xfdffffff.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: nilh [[ROT]], 65023
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i8 *%src, i8 253 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-and-02.ll b/test/CodeGen/SystemZ/atomicrmw-and-02.ll
new file mode 100644
index 000000000000..b63ca4ab4407
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-and-02.ll
@@ -0,0 +1,133 @@
+; Test 16-bit atomic ANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check AND of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used, and that the low bits are set to 1. This sequence is
+; independent of the other loop prologue instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nr [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: oill %r3, 65535
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: nr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the minimum signed value. We AND the rotated word with 0x8000ffff.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nilh [[ROT]], 32768
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i16 *%src, i16 -32768 seq_cst
+ ret i16 %res
+}
+
+; Check ANDs of -2 (-1 isn't useful). We AND the rotated word with 0xfffeffff.
+define i16 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: nilh [[ROT]], 65534
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i16 *%src, i16 -2 seq_cst
+ ret i16 %res
+}
+
+; Check ANDs of 1. We AND the rotated word with 0x0001ffff.
+define i16 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: nilh [[ROT]], 1
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the maximum signed value. We AND the rotated word with 0x7fffffff.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: nilh [[ROT]], 32767
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i16 *%src, i16 32767 seq_cst
+ ret i16 %res
+}
+
+; Check ANDs of a large unsigned value. We AND the rotated word with
+; 0xfffdffff.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: nilh [[ROT]], 65533
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw and i16 *%src, i16 65533 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-and-03.ll b/test/CodeGen/SystemZ/atomicrmw-and-03.ll
new file mode 100644
index 000000000000..ec69edcf1a47
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-and-03.ll
@@ -0,0 +1,85 @@
+; Test 32-bit atomic ANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ANDs of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: nr %r0, %r4
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check ANDs of 1.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: nilf %r0, 1
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check ANDs of the low end of the NILH range.
+define i32 @f3(i32 %dummy, i32 *%src) {
+; CHECK: f3:
+; CHECK: nilh %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 65535 seq_cst
+ ret i32 %res
+}
+
+; Check the next value up, which must use NILF.
+define i32 @f4(i32 %dummy, i32 *%src) {
+; CHECK: f4:
+; CHECK: nilf %r0, 65536
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 65536 seq_cst
+ ret i32 %res
+}
+
+; Check the largest useful NILL value.
+define i32 @f5(i32 %dummy, i32 *%src) {
+; CHECK: f5:
+; CHECK: nill %r0, 65534
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 -2 seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the NILL range.
+define i32 @f6(i32 %dummy, i32 *%src) {
+; CHECK: f6:
+; CHECK: nill %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 -65536 seq_cst
+ ret i32 %res
+}
+
+; Check the largest useful NILH value, which is one less than the above.
+define i32 @f7(i32 %dummy, i32 *%src) {
+; CHECK: f7:
+; CHECK: nilh %r0, 65534
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 -65537 seq_cst
+ ret i32 %res
+}
+
+; Check the highest useful NILF value, which is one less than the above.
+define i32 @f8(i32 %dummy, i32 *%src) {
+; CHECK: f8:
+; CHECK: nilf %r0, 4294901758
+; CHECK: br %r14
+ %res = atomicrmw and i32 *%src, i32 -65538 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-and-04.ll b/test/CodeGen/SystemZ/atomicrmw-and-04.ll
new file mode 100644
index 000000000000..71f29baa0e6f
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-and-04.ll
@@ -0,0 +1,157 @@
+; Test 64-bit atomic ANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ANDs of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: ngr %r0, %r4
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check ANDs of 1, which must be done using a register.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK: f2:
+; CHECK: ngr
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NIHF range.
+define i64 @f3(i64 %dummy, i64 *%src) {
+; CHECK: f3:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: nihf %r0, 0
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 4294967295 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register.
+define i64 @f4(i64 %dummy, i64 *%src) {
+; CHECK: f4:
+; CHECK: ngr
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NIHH range.
+define i64 @f5(i64 %dummy, i64 *%src) {
+; CHECK: f5:
+; CHECK: nihh %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 281474976710655 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register.
+define i64 @f6(i64 %dummy, i64 *%src) {
+; CHECK: f6:
+; CHECK: ngr
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 281474976710656 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NILL value.
+define i64 @f7(i64 %dummy, i64 *%src) {
+; CHECK: f7:
+; CHECK: nill %r0, 65534
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -2 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NILL range.
+define i64 @f8(i64 %dummy, i64 *%src) {
+; CHECK: f8:
+; CHECK: nill %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -65536 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NILH value, which is one less than the above.
+define i64 @f9(i64 %dummy, i64 *%src) {
+; CHECK: f9:
+; CHECK: nilh %r0, 65534
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -65537 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NILF value, which is one less than the above.
+define i64 @f10(i64 %dummy, i64 *%src) {
+; CHECK: f10:
+; CHECK: nilf %r0, 4294901758
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -65538 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NILH range.
+define i64 @f11(i64 %dummy, i64 *%src) {
+; CHECK: f11:
+; CHECK: nilh %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -4294901761 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NILF range.
+define i64 @f12(i64 %dummy, i64 *%src) {
+; CHECK: f12:
+; CHECK: nilf %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NIHL value, which is one less than the above.
+define i64 @f13(i64 %dummy, i64 *%src) {
+; CHECK: f13:
+; CHECK: nihl %r0, 65534
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -4294967297 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NIHL range.
+define i64 @f14(i64 %dummy, i64 *%src) {
+; CHECK: f14:
+; CHECK: nihl %r0, 0
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -281470681743361 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NIHH value, which is 1<<32 less than the above.
+define i64 @f15(i64 %dummy, i64 *%src) {
+; CHECK: f15:
+; CHECK: nihh %r0, 65534
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -281474976710657 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NIHF value, which is 1<<32 less than the above.
+define i64 @f16(i64 %dummy, i64 *%src) {
+; CHECK: f16:
+; CHECK: nihf %r0, 4294901758
+; CHECK: br %r14
+ %res = atomicrmw and i64 *%src, i64 -281479271677953 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-minmax-01.ll b/test/CodeGen/SystemZ/atomicrmw-minmax-01.ll
new file mode 100644
index 000000000000..c6ec77e91b3d
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-minmax-01.ll
@@ -0,0 +1,228 @@
+; Test 8-bit atomic min/max operations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check signed minimum.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used, and that the low bits are set to 1. This sequence is
+; independent of the other loop prologue instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: cr [[ROT]], %r3
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 39, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: cr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw min i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check signed maximum.
+define i8 @f2(i8 *%src, i8 %b) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: cr [[ROT]], %r3
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 39, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: cr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw max i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check unsigned minimum.
+define i8 @f3(i8 *%src, i8 %b) {
+; CHECK: f3:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: clr [[ROT]], %r3
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 39, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: clr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umin i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check unsigned maximum.
+define i8 @f4(i8 *%src, i8 %b) {
+; CHECK: f4:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: clr [[ROT]], %r3
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 39, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: clr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umax i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the lowest useful signed minimum value. We need to load 0x81000000
+; into the source register.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 33024
+; CHECK: cr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 39, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw min i8 *%src, i8 -127 seq_cst
+ ret i8 %res
+}
+
+; Check the highest useful signed maximum value. We need to load 0x7e000000
+; into the source register.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 32256
+; CHECK: cr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 39, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw max i8 *%src, i8 126 seq_cst
+ ret i8 %res
+}
+
+; Check the lowest useful unsigned minimum value. We need to load 0x01000000
+; into the source register.
+define i8 @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 256
+; CHECK: clr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 39, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f7:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f7:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umin i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the highest useful unsigned maximum value. We need to load 0xfe000000
+; into the source register.
+define i8 @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 65024
+; CHECK: clr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 39, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f8:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f8:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umax i8 *%src, i8 254 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-minmax-02.ll b/test/CodeGen/SystemZ/atomicrmw-minmax-02.ll
new file mode 100644
index 000000000000..9612e99b7387
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-minmax-02.ll
@@ -0,0 +1,228 @@
+; Test 8-bit atomic min/max operations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check signed minimum.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used, and that the low bits are set to 1. This sequence is
+; independent of the other loop prologue instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: cr [[ROT]], %r3
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 47, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: cr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw min i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check signed maximum.
+define i16 @f2(i16 *%src, i16 %b) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: cr [[ROT]], %r3
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 47, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: cr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw max i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check unsigned minimum.
+define i16 @f3(i16 *%src, i16 %b) {
+; CHECK: f3:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: clr [[ROT]], %r3
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 47, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: clr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umin i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check unsigned maximum.
+define i16 @f4(i16 *%src, i16 %b) {
+; CHECK: f4:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: clr [[ROT]], %r3
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: risbg [[ROT]], %r3, 32, 47, 0
+; CHECK: [[KEEP]]:
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: clr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umax i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the lowest useful signed minimum value. We need to load 0x80010000
+; into the source register.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 32769
+; CHECK: cr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 47, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw min i16 *%src, i16 -32767 seq_cst
+ ret i16 %res
+}
+
+; Check the highest useful signed maximum value. We need to load 0x7ffe0000
+; into the source register.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 32766
+; CHECK: cr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 47, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw max i16 *%src, i16 32766 seq_cst
+ ret i16 %res
+}
+
+; Check the lowest useful unsigned maximum value. We need to load 0x00010000
+; into the source register.
+define i16 @f7(i16 *%src) {
+; CHECK: f7:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 1
+; CHECK: clr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 47, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f7:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f7:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umin i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the highest useful unsigned maximum value. We need to load 0xfffe0000
+; into the source register.
+define i16 @f8(i16 *%src) {
+; CHECK: f8:
+; CHECK: llilh [[SRC2:%r[0-9]+]], 65534
+; CHECK: clr [[ROT:%r[0-9]+]], [[SRC2]]
+; CHECK: risbg [[ROT]], [[SRC2]], 32, 47, 0
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f8:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f8:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw umax i16 *%src, i16 65534 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-minmax-03.ll b/test/CodeGen/SystemZ/atomicrmw-minmax-03.ll
new file mode 100644
index 000000000000..b5809bdc1693
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-minmax-03.ll
@@ -0,0 +1,176 @@
+; Test 32-bit atomic minimum and maximum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check signed minium.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: cr %r2, %r4
+; CHECK: lr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: lr [[NEW]], %r4
+; CHECK: cs %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw min i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check signed maximum.
+define i32 @f2(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: cr %r2, %r4
+; CHECK: lr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: lr [[NEW]], %r4
+; CHECK: cs %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw max i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check unsigned minimum.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f3:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: clr %r2, %r4
+; CHECK: lr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: lr [[NEW]], %r4
+; CHECK: cs %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw umin i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check unsigned maximum.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f4:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: clr %r2, %r4
+; CHECK: lr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: lr [[NEW]], %r4
+; CHECK: cs %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw umax i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the aligned CS range.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f5:
+; CHECK: l %r2, 4092(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which requires CSY.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f6:
+; CHECK: ly %r2, 4096(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the aligned CSY range.
+define i32 @f7(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f7:
+; CHECK: ly %r2, 524284(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f8(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f8:
+; CHECK: agfi %r3, 524288
+; CHECK: l %r2, 0(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the negative aligned CSY range.
+define i32 @f9(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f9:
+; CHECK: ly %r2, -4(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the CSY range.
+define i32 @f10(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f10:
+; CHECK: ly %r2, -524288(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f11(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f11:
+; CHECK: agfi %r3, -524292
+; CHECK: l %r2, 0(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check that indexed addresses are not allowed.
+define i32 @f12(i32 %dummy, i64 %base, i64 %index, i32 %b) {
+; CHECK: f12:
+; CHECK: agr %r3, %r4
+; CHECK: l %r2, 0(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i32 *
+ %res = atomicrmw min i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check that constants are forced into a register.
+define i32 @f13(i32 %dummy, i32 *%ptr) {
+; CHECK: f13:
+; CHECK: lhi [[LIMIT:%r[0-9]+]], 42
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: cr %r2, [[LIMIT]]
+; CHECK: lr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: lr [[NEW]], [[LIMIT]]
+; CHECK: cs %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw min i32 *%ptr, i32 42 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-minmax-04.ll b/test/CodeGen/SystemZ/atomicrmw-minmax-04.ll
new file mode 100644
index 000000000000..68978547d3e9
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-minmax-04.ll
@@ -0,0 +1,143 @@
+; Test 64-bit atomic minimum and maximum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check signed minium.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: cgr %r2, %r4
+; CHECK: lgr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: lgr [[NEW]], %r4
+; CHECK: csg %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw min i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check signed maximum.
+define i64 @f2(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f2:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: cgr %r2, %r4
+; CHECK: lgr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: lgr [[NEW]], %r4
+; CHECK: csg %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw max i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check unsigned minimum.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f3:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: clgr %r2, %r4
+; CHECK: lgr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: lgr [[NEW]], %r4
+; CHECK: csg %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw umin i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check unsigned maximum.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f4:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: clgr %r2, %r4
+; CHECK: lgr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}he [[KEEP:\..*]]
+; CHECK: lgr [[NEW]], %r4
+; CHECK: csg %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw umax i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the aligned CSG range.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f5:
+; CHECK: lg %r2, 524280(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw min i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which requires separate address logic.
+define i64 @f6(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: lg %r2, 0(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw min i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the CSG range.
+define i64 @f7(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f7:
+; CHECK: lg %r2, -524288(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw min i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which requires separate address logic.
+define i64 @f8(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f8:
+; CHECK: agfi %r3, -524296
+; CHECK: lg %r2, 0(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw min i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check that indexed addresses are not allowed.
+define i64 @f9(i64 %dummy, i64 %base, i64 %index, i64 %b) {
+; CHECK: f9:
+; CHECK: agr %r3, %r4
+; CHECK: lg %r2, 0(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i64 *
+ %res = atomicrmw min i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check that constants are forced into a register.
+define i64 @f10(i64 %dummy, i64 *%ptr) {
+; CHECK: f10:
+; CHECK: lghi [[LIMIT:%r[0-9]+]], 42
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LOOP:\.[^:]*]]:
+; CHECK: cgr %r2, [[LIMIT]]
+; CHECK: lgr [[NEW:%r[0-9]+]], %r2
+; CHECK: j{{g?}}le [[KEEP:\..*]]
+; CHECK: lgr [[NEW]], [[LIMIT]]
+; CHECK: csg %r2, [[NEW]], 0(%r3)
+; CHECK: j{{g?}}lh [[LOOP]]
+; CHECK: br %r14
+ %res = atomicrmw min i64 *%ptr, i64 42 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-nand-01.ll b/test/CodeGen/SystemZ/atomicrmw-nand-01.ll
new file mode 100644
index 000000000000..1ede3b465be2
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-nand-01.ll
@@ -0,0 +1,139 @@
+; Test 8-bit atomic NANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check NAND of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used, and that the low bits are set to 1. This sequence is
+; independent of the other loop prologue instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nr [[ROT]], %r3
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: oilf %r3, 16777215
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: nr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the minimum signed value. We AND the rotated word with 0x80ffffff.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nilh [[ROT]], 33023
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i8 *%src, i8 -128 seq_cst
+ ret i8 %res
+}
+
+; Check NANDs of -2 (-1 isn't useful). We AND the rotated word with 0xfeffffff.
+define i8 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: nilh [[ROT]], 65279
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i8 *%src, i8 -2 seq_cst
+ ret i8 %res
+}
+
+; Check NANDs of 1. We AND the rotated word with 0x01ffffff.
+define i8 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: nilh [[ROT]], 511
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the maximum signed value. We AND the rotated word with 0x7fffffff.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: nilh [[ROT]], 32767
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i8 *%src, i8 127 seq_cst
+ ret i8 %res
+}
+
+; Check NANDs of a large unsigned value. We AND the rotated word with
+; 0xfdffffff.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: nilh [[ROT]], 65023
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i8 *%src, i8 253 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-nand-02.ll b/test/CodeGen/SystemZ/atomicrmw-nand-02.ll
new file mode 100644
index 000000000000..d5cf864a3f79
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-nand-02.ll
@@ -0,0 +1,139 @@
+; Test 16-bit atomic NANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check NAND of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used, and that the low bits are set to 1. This sequence is
+; independent of the other loop prologue instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nr [[ROT]], %r3
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: oill %r3, 65535
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: nr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the minimum signed value. We AND the rotated word with 0x8000ffff.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: nilh [[ROT]], 32768
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i16 *%src, i16 -32768 seq_cst
+ ret i16 %res
+}
+
+; Check NANDs of -2 (-1 isn't useful). We AND the rotated word with 0xfffeffff.
+define i16 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: nilh [[ROT]], 65534
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i16 *%src, i16 -2 seq_cst
+ ret i16 %res
+}
+
+; Check ANDs of 1. We AND the rotated word with 0x0001ffff.
+define i16 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: nilh [[ROT]], 1
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the maximum signed value. We AND the rotated word with 0x7fffffff.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: nilh [[ROT]], 32767
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i16 *%src, i16 32767 seq_cst
+ ret i16 %res
+}
+
+; Check NANDs of a large unsigned value. We AND the rotated word with
+; 0xfffdffff.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: nilh [[ROT]], 65533
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw nand i16 *%src, i16 65533 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-nand-03.ll b/test/CodeGen/SystemZ/atomicrmw-nand-03.ll
new file mode 100644
index 000000000000..cc2a0866b391
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-nand-03.ll
@@ -0,0 +1,93 @@
+; Test 32-bit atomic NANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check NANDs of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: nr %r0, %r4
+; CHECK: xilf %r0, 4294967295
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check NANDs of 1.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: nilf %r0, 1
+; CHECK: xilf %r0, 4294967295
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check NANDs of the low end of the NILH range.
+define i32 @f3(i32 %dummy, i32 *%src) {
+; CHECK: f3:
+; CHECK: nilh %r0, 0
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 65535 seq_cst
+ ret i32 %res
+}
+
+; Check the next value up, which must use NILF.
+define i32 @f4(i32 %dummy, i32 *%src) {
+; CHECK: f4:
+; CHECK: nilf %r0, 65536
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 65536 seq_cst
+ ret i32 %res
+}
+
+; Check the largest useful NILL value.
+define i32 @f5(i32 %dummy, i32 *%src) {
+; CHECK: f5:
+; CHECK: nill %r0, 65534
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 -2 seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the NILL range.
+define i32 @f6(i32 %dummy, i32 *%src) {
+; CHECK: f6:
+; CHECK: nill %r0, 0
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 -65536 seq_cst
+ ret i32 %res
+}
+
+; Check the largest useful NILH value, which is one less than the above.
+define i32 @f7(i32 %dummy, i32 *%src) {
+; CHECK: f7:
+; CHECK: nilh %r0, 65534
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 -65537 seq_cst
+ ret i32 %res
+}
+
+; Check the highest useful NILF value, which is one less than the above.
+define i32 @f8(i32 %dummy, i32 *%src) {
+; CHECK: f8:
+; CHECK: nilf %r0, 4294901758
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw nand i32 *%src, i32 -65538 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-nand-04.ll b/test/CodeGen/SystemZ/atomicrmw-nand-04.ll
new file mode 100644
index 000000000000..0c857d97fe83
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-nand-04.ll
@@ -0,0 +1,183 @@
+; Test 64-bit atomic NANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check NANDs of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: ngr %r0, %r4
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check NANDs of 1, which must be done using a register.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK: f2:
+; CHECK: ngr
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NIHF range.
+define i64 @f3(i64 %dummy, i64 *%src) {
+; CHECK: f3:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: nihf %r0, 0
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 4294967295 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register.
+define i64 @f4(i64 %dummy, i64 *%src) {
+; CHECK: f4:
+; CHECK: ngr
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NIHH range.
+define i64 @f5(i64 %dummy, i64 *%src) {
+; CHECK: f5:
+; CHECK: nihh %r0, 0
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 281474976710655 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register.
+define i64 @f6(i64 %dummy, i64 *%src) {
+; CHECK: f6:
+; CHECK: ngr
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 281474976710656 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NILL value.
+define i64 @f7(i64 %dummy, i64 *%src) {
+; CHECK: f7:
+; CHECK: nill %r0, 65534
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -2 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NILL range.
+define i64 @f8(i64 %dummy, i64 *%src) {
+; CHECK: f8:
+; CHECK: nill %r0, 0
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -65536 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NILH value, which is one less than the above.
+define i64 @f9(i64 %dummy, i64 *%src) {
+; CHECK: f9:
+; CHECK: nilh %r0, 65534
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -65537 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NILF value, which is one less than the above.
+define i64 @f10(i64 %dummy, i64 *%src) {
+; CHECK: f10:
+; CHECK: nilf %r0, 4294901758
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -65538 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NILH range.
+define i64 @f11(i64 %dummy, i64 *%src) {
+; CHECK: f11:
+; CHECK: nilh %r0, 0
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -4294901761 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NILF range.
+define i64 @f12(i64 %dummy, i64 *%src) {
+; CHECK: f12:
+; CHECK: nilf %r0, 0
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NIHL value, which is one less than the above.
+define i64 @f13(i64 %dummy, i64 *%src) {
+; CHECK: f13:
+; CHECK: nihl %r0, 65534
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -4294967297 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the NIHL range.
+define i64 @f14(i64 %dummy, i64 *%src) {
+; CHECK: f14:
+; CHECK: nihl %r0, 0
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -281470681743361 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NIHH value, which is 1<<32 less than the above.
+define i64 @f15(i64 %dummy, i64 *%src) {
+; CHECK: f15:
+; CHECK: nihh %r0, 65534
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -281474976710657 seq_cst
+ ret i64 %res
+}
+
+; Check the highest useful NIHF value, which is 1<<32 less than the above.
+define i64 @f16(i64 %dummy, i64 *%src) {
+; CHECK: f16:
+; CHECK: nihf %r0, 4294901758
+; CHECK: lcgr %r0, %r0
+; CHECK: aghi %r0, -1
+; CHECK: br %r14
+ %res = atomicrmw nand i64 *%src, i64 -281479271677953 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-or-01.ll b/test/CodeGen/SystemZ/atomicrmw-or-01.ll
new file mode 100644
index 000000000000..31303b769237
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-or-01.ll
@@ -0,0 +1,132 @@
+; Test 8-bit atomic ORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check OR of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: or [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: or {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the minimum signed value. We OR the rotated word with 0x80000000.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: oilh [[ROT]], 32768
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i8 *%src, i8 -128 seq_cst
+ ret i8 %res
+}
+
+; Check ORs of -2 (-1 isn't useful). We OR the rotated word with 0xfe000000.
+define i8 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: oilh [[ROT]], 65024
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i8 *%src, i8 -2 seq_cst
+ ret i8 %res
+}
+
+; Check ORs of 1. We OR the rotated word with 0x01000000.
+define i8 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: oilh [[ROT]], 256
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the maximum signed value. We OR the rotated word with 0x7f000000.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: oilh [[ROT]], 32512
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i8 *%src, i8 127 seq_cst
+ ret i8 %res
+}
+
+; Check ORs of a large unsigned value. We OR the rotated word with
+; 0xfd000000.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: oilh [[ROT]], 64768
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i8 *%src, i8 253 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-or-02.ll b/test/CodeGen/SystemZ/atomicrmw-or-02.ll
new file mode 100644
index 000000000000..9880d0b9859f
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-or-02.ll
@@ -0,0 +1,132 @@
+; Test 16-bit atomic ORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check OR of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: or [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: or {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the minimum signed value. We OR the rotated word with 0x80000000.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: oilh [[ROT]], 32768
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i16 *%src, i16 -32768 seq_cst
+ ret i16 %res
+}
+
+; Check ORs of -2 (-1 isn't useful). We OR the rotated word with 0xfffe0000.
+define i16 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: oilh [[ROT]], 65534
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i16 *%src, i16 -2 seq_cst
+ ret i16 %res
+}
+
+; Check ORs of 1. We OR the rotated word with 0x00010000.
+define i16 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: oilh [[ROT]], 1
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the maximum signed value. We OR the rotated word with 0x7fff0000.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: oilh [[ROT]], 32767
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i16 *%src, i16 32767 seq_cst
+ ret i16 %res
+}
+
+; Check ORs of a large unsigned value. We OR the rotated word with
+; 0xfffd0000.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: oilh [[ROT]], 65533
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw or i16 *%src, i16 65533 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-or-03.ll b/test/CodeGen/SystemZ/atomicrmw-or-03.ll
new file mode 100644
index 000000000000..33fd21b04c6c
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-or-03.ll
@@ -0,0 +1,85 @@
+; Test 32-bit atomic ORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ORs of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: or %r0, %r4
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the lowest useful OILL value.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: oill %r0, 1
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the OILL range.
+define i32 @f3(i32 %dummy, i32 *%src) {
+; CHECK: f3:
+; CHECK: oill %r0, 65535
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 65535 seq_cst
+ ret i32 %res
+}
+
+; Check the lowest useful OILH value, which is the next value up.
+define i32 @f4(i32 %dummy, i32 *%src) {
+; CHECK: f4:
+; CHECK: oilh %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 65536 seq_cst
+ ret i32 %res
+}
+
+; Check the lowest useful OILF value, which is the next value up.
+define i32 @f5(i32 %dummy, i32 *%src) {
+; CHECK: f5:
+; CHECK: oilf %r0, 65537
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 65537 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the OILH range.
+define i32 @f6(i32 %dummy, i32 *%src) {
+; CHECK: f6:
+; CHECK: oilh %r0, 65535
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 -65536 seq_cst
+ ret i32 %res
+}
+
+; Check the next value up, which must use OILF.
+define i32 @f7(i32 %dummy, i32 *%src) {
+; CHECK: f7:
+; CHECK: oilf %r0, 4294901761
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 -65535 seq_cst
+ ret i32 %res
+}
+
+; Check the largest useful OILF value.
+define i32 @f8(i32 %dummy, i32 *%src) {
+; CHECK: f8:
+; CHECK: oilf %r0, 4294967294
+; CHECK: br %r14
+ %res = atomicrmw or i32 *%src, i32 -2 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-or-04.ll b/test/CodeGen/SystemZ/atomicrmw-or-04.ll
new file mode 100644
index 000000000000..a74f6f9dd501
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-or-04.ll
@@ -0,0 +1,158 @@
+; Test 64-bit atomic ORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ORs of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: ogr %r0, %r4
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful OILL value.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK: f2:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: oill %r0, 1
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the OILL range.
+define i64 @f3(i64 %dummy, i64 *%src) {
+; CHECK: f3:
+; CHECK: oill %r0, 65535
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 65535 seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful OILH value, which is the next value up.
+define i64 @f4(i64 %dummy, i64 *%src) {
+; CHECK: f4:
+; CHECK: oilh %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 65536 seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful OILF value, which is the next value up again.
+define i64 @f5(i64 %dummy, i64 *%src) {
+; CHECK: f5:
+; CHECK: oilf %r0, 65537
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 65537 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the OILH range.
+define i64 @f6(i64 %dummy, i64 *%src) {
+; CHECK: f6:
+; CHECK: oilh %r0, 65535
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 4294901760 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use OILF.
+define i64 @f7(i64 %dummy, i64 *%src) {
+; CHECK: f7:
+; CHECK: oilf %r0, 4294901761
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 4294901761 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the OILF range.
+define i64 @f8(i64 %dummy, i64 *%src) {
+; CHECK: f8:
+; CHECK: oilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 4294967295 seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful OIHL value, which is one greater than above.
+define i64 @f9(i64 %dummy, i64 *%src) {
+; CHECK: f9:
+; CHECK: oihl %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register. (We could use
+; combinations of OIH* and OIL* instead, but that isn't implemented.)
+define i64 @f10(i64 %dummy, i64 *%src) {
+; CHECK: f10:
+; CHECK: ogr
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 4294967297 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the OIHL range.
+define i64 @f11(i64 %dummy, i64 *%src) {
+; CHECK: f11:
+; CHECK: oihl %r0, 65535
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 281470681743360 seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful OIHH value, which is 1<<32 greater than above.
+define i64 @f12(i64 %dummy, i64 *%src) {
+; CHECK: f12:
+; CHECK: oihh %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 281474976710656 seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful OIHF value, which is 1<<32 greater again.
+define i64 @f13(i64 %dummy, i64 *%src) {
+; CHECK: f13:
+; CHECK: oihf %r0, 65537
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 281479271677952 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the OIHH range.
+define i64 @f14(i64 %dummy, i64 *%src) {
+; CHECK: f14:
+; CHECK: oihh %r0, 65535
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 18446462598732840960 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register.
+define i64 @f15(i64 %dummy, i64 *%src) {
+; CHECK: f15:
+; CHECK: ogr
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 18446462598732840961 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the OIHF range.
+define i64 @f16(i64 %dummy, i64 *%src) {
+; CHECK: f16:
+; CHECK: oihf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw or i64 *%src, i64 -4294967296 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-sub-01.ll b/test/CodeGen/SystemZ/atomicrmw-sub-01.ll
new file mode 100644
index 000000000000..d073dc5ec29f
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-sub-01.ll
@@ -0,0 +1,132 @@
+; Test 8-bit atomic subtractions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check subtraction of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: sr [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: sr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the minimum signed value. We add 0x80000000 to the rotated word.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: afi [[ROT]], -2147483648
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i8 *%src, i8 -128 seq_cst
+ ret i8 %res
+}
+
+; Check subtraction of -1. We add 0x01000000 to the rotated word.
+define i8 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: afi [[ROT]], 16777216
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i8 *%src, i8 -1 seq_cst
+ ret i8 %res
+}
+
+; Check subtraction of -1. We add 0xff000000 to the rotated word.
+define i8 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: afi [[ROT]], -16777216
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the maximum signed value. We add 0x81000000 to the rotated word.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: afi [[ROT]], -2130706432
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i8 *%src, i8 127 seq_cst
+ ret i8 %res
+}
+
+; Check subtraction of a large unsigned value. We add 0x02000000 to the
+; rotated word.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: afi [[ROT]], 33554432
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i8 *%src, i8 254 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-sub-02.ll b/test/CodeGen/SystemZ/atomicrmw-sub-02.ll
new file mode 100644
index 000000000000..449d92ff3b30
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-sub-02.ll
@@ -0,0 +1,132 @@
+; Test 16-bit atomic subtractions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check subtraction of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: sr [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: sr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the minimum signed value. We add 0x80000000 to the rotated word.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: afi [[ROT]], -2147483648
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i16 *%src, i16 -32768 seq_cst
+ ret i16 %res
+}
+
+; Check subtraction of -1. We add 0x00010000 to the rotated word.
+define i16 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: afi [[ROT]], 65536
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i16 *%src, i16 -1 seq_cst
+ ret i16 %res
+}
+
+; Check subtraction of 1. We add 0xffff0000 to the rotated word.
+define i16 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: afi [[ROT]], -65536
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the maximum signed value. We add 0x80010000 to the rotated word.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: afi [[ROT]], -2147418112
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i16 *%src, i16 32767 seq_cst
+ ret i16 %res
+}
+
+; Check subtraction of a large unsigned value. We add 0x00020000 to the
+; rotated word.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: afi [[ROT]], 131072
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw sub i16 *%src, i16 65534 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-sub-03.ll b/test/CodeGen/SystemZ/atomicrmw-sub-03.ll
new file mode 100644
index 000000000000..da07fb57ef72
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-sub-03.ll
@@ -0,0 +1,94 @@
+; Test 32-bit atomic subtractions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check subtraction of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: sr %r0, %r4
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check subtraction of 1, which can use AHI.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: ahi %r0, -1
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the AHI range.
+define i32 @f3(i32 %dummy, i32 *%src) {
+; CHECK: f3:
+; CHECK: ahi %r0, -32768
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 32768 seq_cst
+ ret i32 %res
+}
+
+; Check the next value down, which must use AFI.
+define i32 @f4(i32 %dummy, i32 *%src) {
+; CHECK: f4:
+; CHECK: afi %r0, -32769
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 32769 seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the AFI range.
+define i32 @f5(i32 %dummy, i32 *%src) {
+; CHECK: f5:
+; CHECK: afi %r0, -2147483648
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 2147483648 seq_cst
+ ret i32 %res
+}
+
+; Check the next value up, which gets treated as a positive operand.
+define i32 @f6(i32 %dummy, i32 *%src) {
+; CHECK: f6:
+; CHECK: afi %r0, 2147483647
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 2147483649 seq_cst
+ ret i32 %res
+}
+
+; Check subtraction of -1, which can use AHI.
+define i32 @f7(i32 %dummy, i32 *%src) {
+; CHECK: f7:
+; CHECK: ahi %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 -1 seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the AHI range.
+define i32 @f8(i32 %dummy, i32 *%src) {
+; CHECK: f8:
+; CHECK: ahi %r0, 32767
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 -32767 seq_cst
+ ret i32 %res
+}
+
+; Check the next value down, which must use AFI instead.
+define i32 @f9(i32 %dummy, i32 *%src) {
+; CHECK: f9:
+; CHECK: afi %r0, 32768
+; CHECK: br %r14
+ %res = atomicrmw sub i32 *%src, i32 -32768 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-sub-04.ll b/test/CodeGen/SystemZ/atomicrmw-sub-04.ll
new file mode 100644
index 000000000000..26f75afe85f4
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-sub-04.ll
@@ -0,0 +1,112 @@
+; Test 64-bit atomic subtractions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check subtraction of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: sgr %r0, %r4
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check subtraction of 1, which can use AGHI.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK: f2:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: aghi %r0, -1
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the AGHI range.
+define i64 @f3(i64 %dummy, i64 *%src) {
+; CHECK: f3:
+; CHECK: aghi %r0, -32768
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 32768 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use AGFI.
+define i64 @f4(i64 %dummy, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r0, -32769
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 32769 seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the AGFI range.
+define i64 @f5(i64 %dummy, i64 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r0, -2147483648
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 2147483648 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register operation.
+define i64 @f6(i64 %dummy, i64 *%src) {
+; CHECK: f6:
+; CHECK: sgr
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 2147483649 seq_cst
+ ret i64 %res
+}
+
+; Check subtraction of -1, which can use AGHI.
+define i64 @f7(i64 %dummy, i64 *%src) {
+; CHECK: f7:
+; CHECK: aghi %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 -1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the AGHI range.
+define i64 @f8(i64 %dummy, i64 *%src) {
+; CHECK: f8:
+; CHECK: aghi %r0, 32767
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 -32767 seq_cst
+ ret i64 %res
+}
+
+; Check the next value down, which must use AGFI instead.
+define i64 @f9(i64 %dummy, i64 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r0, 32768
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 -32768 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the AGFI range.
+define i64 @f10(i64 %dummy, i64 *%src) {
+; CHECK: f10:
+; CHECK: agfi %r0, 2147483647
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 -2147483647 seq_cst
+ ret i64 %res
+}
+
+; Check the next value down, which must use a register operation.
+define i64 @f11(i64 %dummy, i64 *%src) {
+; CHECK: f11:
+; CHECK: sgr
+; CHECK: br %r14
+ %res = atomicrmw sub i64 *%src, i64 -2147483648 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xchg-01.ll b/test/CodeGen/SystemZ/atomicrmw-xchg-01.ll
new file mode 100644
index 000000000000..e33597b7297d
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xchg-01.ll
@@ -0,0 +1,55 @@
+; Test 8-bit atomic exchange.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT
+
+; Check exchange with a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK. CHECK-SHIFT also checks that %r3 is not modified before
+; being used in the RISBG (in contrast to things like atomic addition,
+; which shift %r3 left so that %b is at the high end of the word).
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: risbg [[ROT]], %r3, 32, 39, 24
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT: f1:
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: rll
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: risbg {{%r[0-9]+}}, %r3, 32, 39, 24
+; CHECK-SHIFT: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT: rll
+; CHECK-SHIFT: br %r14
+ %res = atomicrmw xchg i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check exchange with a constant. We should force the constant into
+; a register and use the sequence above.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: lhi [[VALUE:%r[0-9]+]], 88
+; CHECK: risbg {{%r[0-9]+}}, [[VALUE]], 32, 39, 24
+; CHECK: br %r14
+;
+; CHECK-SHIFT: f2:
+; CHECK-SHIFT: br %r14
+ %res = atomicrmw xchg i8 *%src, i8 88 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xchg-02.ll b/test/CodeGen/SystemZ/atomicrmw-xchg-02.ll
new file mode 100644
index 000000000000..31f802625a32
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xchg-02.ll
@@ -0,0 +1,55 @@
+; Test 16-bit atomic exchange.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT
+
+; Check exchange with a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK. CHECK-SHIFT also checks that %r3 is not modified before
+; being used in the RISBG (in contrast to things like atomic addition,
+; which shift %r3 left so that %b is at the high end of the word).
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: risbg [[ROT]], %r3, 32, 47, 16
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT: f1:
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: rll
+; CHECK-SHIFT-NOT: %r3
+; CHECK-SHIFT: risbg {{%r[0-9]+}}, %r3, 32, 47, 16
+; CHECK-SHIFT: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT: rll
+; CHECK-SHIFT: br %r14
+ %res = atomicrmw xchg i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check exchange with a constant. We should force the constant into
+; a register and use the sequence above.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: lhi [[VALUE:%r[0-9]+]], -25536
+; CHECK: risbg {{%r[0-9]+}}, [[VALUE]], 32, 47, 16
+; CHECK: br %r14
+;
+; CHECK-SHIFT: f2:
+; CHECK-SHIFT: br %r14
+ %res = atomicrmw xchg i16 *%src, i16 40000 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xchg-03.ll b/test/CodeGen/SystemZ/atomicrmw-xchg-03.ll
new file mode 100644
index 000000000000..37581ab9d602
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xchg-03.ll
@@ -0,0 +1,122 @@
+; Test 32-bit atomic exchange.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register exchange.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: cs %r2, %r4, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xchg i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the aligned CS range.
+define i32 @f2(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f2:
+; CHECK: l %r2, 4092(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which requires CSY.
+define i32 @f3(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f3:
+; CHECK: ly %r2, 4096(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the aligned CSY range.
+define i32 @f4(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f4:
+; CHECK: ly %r2, 524284(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word up, which needs separate address logic.
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f5:
+; CHECK: agfi %r3, 524288
+; CHECK: l %r2, 0(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the high end of the negative aligned CSY range.
+define i32 @f6(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f6:
+; CHECK: ly %r2, -4(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the low end of the CSY range.
+define i32 @f7(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f7:
+; CHECK: ly %r2, -524288(%r3)
+; CHECK: csy %r2, {{%r[0-9]+}}, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the next word down, which needs separate address logic.
+define i32 @f8(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f8:
+; CHECK: agfi %r3, -524292
+; CHECK: l %r2, 0(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check that indexed addresses are not allowed.
+define i32 @f9(i32 %dummy, i64 %base, i64 %index, i32 %b) {
+; CHECK: f9:
+; CHECK: agr %r3, %r4
+; CHECK: l %r2, 0(%r3)
+; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i32 *
+ %res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check exchange of a constant. We should force it into a register and
+; use the sequence above.
+define i32 @f10(i32 %dummy, i32 *%src) {
+; CHECK: f10:
+; CHECK: llill [[VALUE:%r[0-9+]]], 40000
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: cs %r2, [[VALUE]], 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xchg i32 *%src, i32 40000 seq_cst
+ ret i32 %res
+}
+
diff --git a/test/CodeGen/SystemZ/atomicrmw-xchg-04.ll b/test/CodeGen/SystemZ/atomicrmw-xchg-04.ll
new file mode 100644
index 000000000000..a68295ea8b04
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xchg-04.ll
@@ -0,0 +1,88 @@
+; Test 64-bit atomic exchange.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register exchange.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: csg %r2, %r4, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xchg i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the aligned CSG range.
+define i64 @f2(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f2:
+; CHECK: lg %r2, 524280(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword up, which requires separate address logic.
+define i64 @f3(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: lg %r2, 0(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the low end of the CSG range.
+define i64 @f4(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f4:
+; CHECK: lg %r2, -524288(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the next doubleword down, which requires separate address logic.
+define i64 @f5(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f5:
+; CHECK: agfi %r3, -524296
+; CHECK: lg %r2, 0(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check that indexed addresses are not allowed.
+define i64 @f6(i64 %dummy, i64 %base, i64 %index, i64 %b) {
+; CHECK: f6:
+; CHECK: agr %r3, %r4
+; CHECK: lg %r2, 0(%r3)
+; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i64 *
+ %res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check exchange of a constant. We should force it into a register and
+; use the sequence above.
+define i64 @f7(i64 %dummy, i64 *%ptr) {
+; CHECK: f7:
+; CHECK: llilf [[VALUE:%r[0-9+]]], 3000000000
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: csg %r2, [[VALUE]], 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xchg i64 *%ptr, i64 3000000000 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xor-01.ll b/test/CodeGen/SystemZ/atomicrmw-xor-01.ll
new file mode 100644
index 000000000000..13cdf02f486c
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xor-01.ll
@@ -0,0 +1,132 @@
+; Test 8-bit atomic XORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check XOR of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i8 @f1(i8 *%src, i8 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: xr [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 24
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: xr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i8 *%src, i8 %b seq_cst
+ ret i8 %res
+}
+
+; Check the minimum signed value. We XOR the rotated word with 0x80000000.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: xilf [[ROT]], 2147483648
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i8 *%src, i8 -128 seq_cst
+ ret i8 %res
+}
+
+; Check XORs of -1. We XOR the rotated word with 0xff000000.
+define i8 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: xilf [[ROT]], 4278190080
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i8 *%src, i8 -1 seq_cst
+ ret i8 %res
+}
+
+; Check XORs of 1. We XOR the rotated word with 0x01000000.
+define i8 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: xilf [[ROT]], 16777216
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i8 *%src, i8 1 seq_cst
+ ret i8 %res
+}
+
+; Check the maximum signed value. We XOR the rotated word with 0x7f000000.
+define i8 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: xilf [[ROT]], 2130706432
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i8 *%src, i8 127 seq_cst
+ ret i8 %res
+}
+
+; Check XORs of a large unsigned value. We XOR the rotated word with
+; 0xfd000000.
+define i8 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: xilf [[ROT]], 4244635648
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i8 *%src, i8 253 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xor-02.ll b/test/CodeGen/SystemZ/atomicrmw-xor-02.ll
new file mode 100644
index 000000000000..4faa64f8e837
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xor-02.ll
@@ -0,0 +1,132 @@
+; Test 16-bit atomic XORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT1
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT2
+
+; Check XOR of a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT1 makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK.
+; - CHECK-SHIFT2 makes sure that %b is shifted into the high part of the word
+; before being used. This shift is independent of the other loop prologue
+; instructions.
+define i16 @f1(i16 *%src, i16 %b) {
+; CHECK: f1:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: xr [[ROT]], %r3
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0({{%r[1-9]+}})
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f1:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f1:
+; CHECK-SHIFT2: sll %r3, 16
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: xr {{%r[0-9]+}}, %r3
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: rll
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i16 *%src, i16 %b seq_cst
+ ret i16 %res
+}
+
+; Check the minimum signed value. We XOR the rotated word with 0x80000000.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK: nill %r2, 65532
+; CHECK: l [[OLD:%r[0-9]+]], 0(%r2)
+; CHECK: [[LABEL:\.[^:]*]]:
+; CHECK: rll [[ROT:%r[0-9]+]], [[OLD]], 0([[SHIFT]])
+; CHECK: xilf [[ROT]], 2147483648
+; CHECK: rll [[NEW:%r[0-9]+]], [[ROT]], 0([[NEGSHIFT:%r[1-9]+]])
+; CHECK: cs [[OLD]], [[NEW]], 0(%r2)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f2:
+; CHECK-SHIFT1: sllg [[SHIFT:%r[1-9]+]], %r2, 3
+; CHECK-SHIFT1: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: rll {{%r[0-9]+}}, {{%r[0-9]+}}, 0([[NEGSHIFT]])
+; CHECK-SHIFT1: rll
+; CHECK-SHIFT1: br %r14
+;
+; CHECK-SHIFT2: f2:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i16 *%src, i16 -32768 seq_cst
+ ret i16 %res
+}
+
+; Check XORs of -1. We XOR the rotated word with 0xffff0000.
+define i16 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: xilf [[ROT]], 4294901760
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f3:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f3:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i16 *%src, i16 -1 seq_cst
+ ret i16 %res
+}
+
+; Check XORs of 1. We XOR the rotated word with 0x00010000.
+define i16 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: xilf [[ROT]], 65536
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f4:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f4:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i16 *%src, i16 1 seq_cst
+ ret i16 %res
+}
+
+; Check the maximum signed value. We XOR the rotated word with 0x7fff0000.
+define i16 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: xilf [[ROT]], 2147418112
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f5:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f5:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i16 *%src, i16 32767 seq_cst
+ ret i16 %res
+}
+
+; Check XORs of a large unsigned value. We XOR the rotated word with
+; 0xfffd0000.
+define i16 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: xilf [[ROT]], 4294770688
+; CHECK: br %r14
+;
+; CHECK-SHIFT1: f6:
+; CHECK-SHIFT1: br %r14
+; CHECK-SHIFT2: f6:
+; CHECK-SHIFT2: br %r14
+ %res = atomicrmw xor i16 *%src, i16 65533 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xor-03.ll b/test/CodeGen/SystemZ/atomicrmw-xor-03.ll
new file mode 100644
index 000000000000..23884f888e13
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xor-03.ll
@@ -0,0 +1,49 @@
+; Test 32-bit atomic XORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check XORs of a variable.
+define i32 @f1(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: xr %r0, %r4
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xor i32 *%src, i32 %b seq_cst
+ ret i32 %res
+}
+
+; Check the lowest useful constant.
+define i32 @f2(i32 %dummy, i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lr %r0, %r2
+; CHECK: xilf %r0, 1
+; CHECK: cs %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xor i32 *%src, i32 1 seq_cst
+ ret i32 %res
+}
+
+; Check an arbitrary constant.
+define i32 @f3(i32 %dummy, i32 *%src) {
+; CHECK: f3:
+; CHECK: xilf %r0, 3000000000
+; CHECK: br %r14
+ %res = atomicrmw xor i32 *%src, i32 3000000000 seq_cst
+ ret i32 %res
+}
+
+; Check bitwise negation.
+define i32 @f4(i32 %dummy, i32 *%src) {
+; CHECK: f4:
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw xor i32 *%src, i32 -1 seq_cst
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/atomicrmw-xor-04.ll b/test/CodeGen/SystemZ/atomicrmw-xor-04.ll
new file mode 100644
index 000000000000..21130fb47776
--- /dev/null
+++ b/test/CodeGen/SystemZ/atomicrmw-xor-04.ll
@@ -0,0 +1,77 @@
+; Test 64-bit atomic XORs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check XORs of a variable.
+define i64 @f1(i64 %dummy, i64 *%src, i64 %b) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: xgr %r0, %r4
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 %b seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful XILF value.
+define i64 @f2(i64 %dummy, i64 *%src) {
+; CHECK: f2:
+; CHECK: lg %r2, 0(%r3)
+; CHECK: [[LABEL:\.[^ ]*]]:
+; CHECK: lgr %r0, %r2
+; CHECK: xilf %r0, 1
+; CHECK: csg %r2, %r0, 0(%r3)
+; CHECK: j{{g?}}lh [[LABEL]]
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 1 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the XILF range.
+define i64 @f3(i64 %dummy, i64 *%src) {
+; CHECK: f3:
+; CHECK: xilf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 4294967295 seq_cst
+ ret i64 %res
+}
+
+; Check the lowest useful XIHF value, which is one greater than above.
+define i64 @f4(i64 %dummy, i64 *%src) {
+; CHECK: f4:
+; CHECK: xihf %r0, 1
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register. (We could use
+; combinations of XIH* and XIL* instead, but that isn't implemented.)
+define i64 @f5(i64 %dummy, i64 *%src) {
+; CHECK: f5:
+; CHECK: xgr
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 4294967297 seq_cst
+ ret i64 %res
+}
+
+; Check the high end of the XIHF range.
+define i64 @f6(i64 %dummy, i64 *%src) {
+; CHECK: f6:
+; CHECK: xihf %r0, 4294967295
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 -4294967296 seq_cst
+ ret i64 %res
+}
+
+; Check the next value up, which must use a register.
+define i64 @f7(i64 %dummy, i64 *%src) {
+; CHECK: f7:
+; CHECK: xgr
+; CHECK: br %r14
+ %res = atomicrmw xor i64 *%src, i64 -4294967295 seq_cst
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/branch-01.ll b/test/CodeGen/SystemZ/branch-01.ll
new file mode 100644
index 000000000000..8ff91ac38e80
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-01.ll
@@ -0,0 +1,14 @@
+; Test a simple unconditional jump.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(i8 *%dest) {
+; CHECK: f1:
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: mvi 0(%r2), 1
+; CHECK: j{{g?}} .L[[LABEL]]
+ br label %loop
+loop:
+ store volatile i8 1, i8 *%dest
+ br label %loop
+}
diff --git a/test/CodeGen/SystemZ/branch-02.ll b/test/CodeGen/SystemZ/branch-02.ll
new file mode 100644
index 000000000000..cde9b568b38c
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-02.ll
@@ -0,0 +1,94 @@
+; Test all condition-code masks that are relevant for signed integer
+; comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(i32 *%src, i32 %target) {
+; CHECK: f1:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: c %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}e .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp eq i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f2(i32 *%src, i32 %target) {
+; CHECK: f2:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: c %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}lh .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp ne i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f3(i32 *%src, i32 %target) {
+; CHECK: f3:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: c %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}le .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp sle i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f4(i32 *%src, i32 %target) {
+; CHECK: f4:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: c %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}l .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp slt i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f5(i32 *%src, i32 %target) {
+; CHECK: f5:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: c %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}h .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp sgt i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f6(i32 *%src, i32 %target) {
+; CHECK: f6:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: c %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}he .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp sge i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/branch-03.ll b/test/CodeGen/SystemZ/branch-03.ll
new file mode 100644
index 000000000000..1e447d034a39
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-03.ll
@@ -0,0 +1,63 @@
+; Test all condition-code masks that are relevant for unsigned integer
+; comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+define void @f1(i32 *%src, i32 %target) {
+; CHECK: f1:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: cl %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}le .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp ule i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f2(i32 *%src, i32 %target) {
+; CHECK: f2:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: cl %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}l .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp ult i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f3(i32 *%src, i32 %target) {
+; CHECK: f3:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: cl %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}h .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp ugt i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f4(i32 *%src, i32 %target) {
+; CHECK: f4:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: cl %r3, 0(%r2)
+; CHECK-NEXT: j{{g?}}he .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile i32 *%src
+ %cond = icmp uge i32 %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/branch-04.ll b/test/CodeGen/SystemZ/branch-04.ll
new file mode 100644
index 000000000000..3d4175041db2
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-04.ll
@@ -0,0 +1,218 @@
+; Test all condition-code masks that are relevant for floating-point
+; comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define void @f1(float *%src, float %target) {
+; CHECK: f1:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}e .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp oeq float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f2(float *%src, float %target) {
+; CHECK: f2:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}lh .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp one float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f3(float *%src, float %target) {
+; CHECK: f3:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}le .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ole float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f4(float *%src, float %target) {
+; CHECK: f4:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}l .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp olt float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f5(float *%src, float %target) {
+; CHECK: f5:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}h .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ogt float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f6(float *%src, float %target) {
+; CHECK: f6:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}he .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp oge float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f7(float *%src, float %target) {
+; CHECK: f7:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}nlh .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ueq float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f8(float *%src, float %target) {
+; CHECK: f8:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}ne .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp une float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f9(float *%src, float %target) {
+; CHECK: f9:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}nh .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ule float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f10(float *%src, float %target) {
+; CHECK: f10:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}nhe .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ult float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f11(float *%src, float %target) {
+; CHECK: f11:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}nle .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ugt float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+define void @f12(float *%src, float %target) {
+; CHECK: f12:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}nl .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp uge float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+; "jno" == "jump if no overflow", which corresponds to "jump if ordered"
+; rather than "jump if not ordered" after a floating-point comparison.
+define void @f13(float *%src, float %target) {
+; CHECK: f13:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}no .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp ord float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
+
+; "jo" == "jump if overflow", which corresponds to "jump if not ordered"
+; rather than "jump if ordered" after a floating-point comparison.
+define void @f14(float *%src, float %target) {
+; CHECK: f14:
+; CHECK: .cfi_startproc
+; CHECK: .L[[LABEL:.*]]:
+; CHECK: ceb %f0, 0(%r2)
+; CHECK-NEXT: j{{g?}}o .L[[LABEL]]
+ br label %loop
+loop:
+ %val = load volatile float *%src
+ %cond = fcmp uno float %target, %val
+ br i1 %cond, label %loop, label %exit
+exit:
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/branch-05.ll b/test/CodeGen/SystemZ/branch-05.ll
new file mode 100644
index 000000000000..d149e0b7013b
--- /dev/null
+++ b/test/CodeGen/SystemZ/branch-05.ll
@@ -0,0 +1,58 @@
+; Test indirect jumps.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+define i32 @f1(i32 %x, i32 %y, i32 %op) {
+; CHECK: f1:
+; CHECK: ahi %r4, -1
+; CHECK: clfi %r4, 5
+; CHECK-NEXT: j{{g?}}g
+; CHECK: llgfr [[OP64:%r[0-5]]], %r4
+; CHECK: sllg [[INDEX:%r[1-5]]], [[OP64]], 3
+; CHECK: larl [[BASE:%r[1-5]]]
+; CHECK: lg [[TARGET:%r[1-5]]], 0([[BASE]],[[INDEX]])
+; CHECK: br [[TARGET]]
+entry:
+ switch i32 %op, label %exit [
+ i32 1, label %b.add
+ i32 2, label %b.sub
+ i32 3, label %b.and
+ i32 4, label %b.or
+ i32 5, label %b.xor
+ i32 6, label %b.mul
+ ]
+
+b.add:
+ %add = add i32 %x, %y
+ br label %exit
+
+b.sub:
+ %sub = sub i32 %x, %y
+ br label %exit
+
+b.and:
+ %and = and i32 %x, %y
+ br label %exit
+
+b.or:
+ %or = or i32 %x, %y
+ br label %exit
+
+b.xor:
+ %xor = xor i32 %x, %y
+ br label %exit
+
+b.mul:
+ %mul = mul i32 %x, %y
+ br label %exit
+
+exit:
+ %res = phi i32 [ %x, %entry ],
+ [ %add, %b.add ],
+ [ %sub, %b.sub ],
+ [ %and, %b.and ],
+ [ %or, %b.or ],
+ [ %xor, %b.xor ],
+ [ %mul, %b.mul ]
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/bswap-01.ll b/test/CodeGen/SystemZ/bswap-01.ll
new file mode 100644
index 000000000000..952903df50f9
--- /dev/null
+++ b/test/CodeGen/SystemZ/bswap-01.ll
@@ -0,0 +1,24 @@
+; Test byteswaps between registers.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @llvm.bswap.i32(i32 %a)
+declare i64 @llvm.bswap.i64(i64 %a)
+
+; Check 32-bit register-to-register byteswaps.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: lrvr [[REGISTER:%r[0-5]]], %r2
+; CHECk: br %r14
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check 64-bit register-to-register byteswaps.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: lrvgr %r2, %r2
+; CHECk: br %r14
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
diff --git a/test/CodeGen/SystemZ/bswap-02.ll b/test/CodeGen/SystemZ/bswap-02.ll
new file mode 100644
index 000000000000..e9b7eb5f055b
--- /dev/null
+++ b/test/CodeGen/SystemZ/bswap-02.ll
@@ -0,0 +1,87 @@
+; Test 32-bit byteswaps from memory to registers.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @llvm.bswap.i32(i32 %a)
+
+; Check LRV with no displacement.
+define i32 @f1(i32 *%src) {
+; CHECK: f1:
+; CHECK: lrv %r2, 0(%r2)
+; CHECK: br %r14
+ %a = load i32 *%src
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check the high end of the aligned LRV range.
+define i32 @f2(i32 *%src) {
+; CHECK: f2:
+; CHECK: lrv %r2, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %a = load i32 *%ptr
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f3(i32 *%src) {
+; CHECK: f3:
+; CHECK: agfi %r2, 524288
+; CHECK: lrv %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %a = load i32 *%ptr
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check the high end of the negative aligned LRV range.
+define i32 @f4(i32 *%src) {
+; CHECK: f4:
+; CHECK: lrv %r2, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %a = load i32 *%ptr
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check the low end of the LRV range.
+define i32 @f5(i32 *%src) {
+; CHECK: f5:
+; CHECK: lrv %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %a = load i32 *%ptr
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, -524292
+; CHECK: lrv %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %a = load i32 *%ptr
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
+
+; Check that LRV allows an index.
+define i32 @f7(i64 %src, i64 %index) {
+; CHECK: f7:
+; CHECK: lrv %r2, 524287({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %a = load i32 *%ptr
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ ret i32 %swapped
+}
diff --git a/test/CodeGen/SystemZ/bswap-03.ll b/test/CodeGen/SystemZ/bswap-03.ll
new file mode 100644
index 000000000000..2e6bcdce2651
--- /dev/null
+++ b/test/CodeGen/SystemZ/bswap-03.ll
@@ -0,0 +1,87 @@
+; Test 64-bit byteswaps from memory to registers.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @llvm.bswap.i64(i64 %a)
+
+; Check LRVG with no displacement.
+define i64 @f1(i64 *%src) {
+; CHECK: f1:
+; CHECK: lrvg %r2, 0(%r2)
+; CHECK: br %r14
+ %a = load i64 *%src
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
+
+; Check the high end of the aligned LRVG range.
+define i64 @f2(i64 *%src) {
+; CHECK: f2:
+; CHECK: lrvg %r2, 524280(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %a = load i64 *%ptr
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 *%src) {
+; CHECK: f3:
+; CHECK: agfi %r2, 524288
+; CHECK: lrvg %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %a = load i64 *%ptr
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
+
+; Check the high end of the negative aligned LRVG range.
+define i64 @f4(i64 *%src) {
+; CHECK: f4:
+; CHECK: lrvg %r2, -8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %a = load i64 *%ptr
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
+
+; Check the low end of the LRVG range.
+define i64 @f5(i64 *%src) {
+; CHECK: f5:
+; CHECK: lrvg %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %a = load i64 *%ptr
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, -524296
+; CHECK: lrvg %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %a = load i64 *%ptr
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
+
+; Check that LRVG allows an index.
+define i64 @f7(i64 %src, i64 %index) {
+; CHECK: f7:
+; CHECK: lrvg %r2, 524287({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %a = load i64 *%ptr
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ ret i64 %swapped
+}
diff --git a/test/CodeGen/SystemZ/bswap-04.ll b/test/CodeGen/SystemZ/bswap-04.ll
new file mode 100644
index 000000000000..192327bd256c
--- /dev/null
+++ b/test/CodeGen/SystemZ/bswap-04.ll
@@ -0,0 +1,87 @@
+; Test 32-bit byteswaps from registers to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i32 @llvm.bswap.i32(i32 %a)
+
+; Check STRV with no displacement.
+define void @f1(i32 *%src, i32 %a) {
+; CHECK: f1:
+; CHECK: strv %r3, 0(%r2)
+; CHECK: br %r14
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%src
+ ret void
+}
+
+; Check the high end of the aligned STRV range.
+define void @f2(i32 *%src, i32 %a) {
+; CHECK: f2:
+; CHECK: strv %r3, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%ptr
+ ret void
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f3(i32 *%src, i32 %a) {
+; CHECK: f3:
+; CHECK: agfi %r2, 524288
+; CHECK: strv %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STRV range.
+define void @f4(i32 *%src, i32 %a) {
+; CHECK: f4:
+; CHECK: strv %r3, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%ptr
+ ret void
+}
+
+; Check the low end of the STRV range.
+define void @f5(i32 *%src, i32 %a) {
+; CHECK: f5:
+; CHECK: strv %r3, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%ptr
+ ret void
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i32 *%src, i32 %a) {
+; CHECK: f6:
+; CHECK: agfi %r2, -524292
+; CHECK: strv %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%ptr
+ ret void
+}
+
+; Check that STRV allows an index.
+define void @f7(i64 %src, i64 %index, i32 %a) {
+; CHECK: f7:
+; CHECK: strv %r4, 524287({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %swapped = call i32 @llvm.bswap.i32(i32 %a)
+ store i32 %swapped, i32 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/bswap-05.ll b/test/CodeGen/SystemZ/bswap-05.ll
new file mode 100644
index 000000000000..e58cb80c3981
--- /dev/null
+++ b/test/CodeGen/SystemZ/bswap-05.ll
@@ -0,0 +1,87 @@
+; Test 64-bit byteswaps from registers to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @llvm.bswap.i64(i64 %a)
+
+; Check STRVG with no displacement.
+define void @f1(i64 *%src, i64 %a) {
+; CHECK: f1:
+; CHECK: strvg %r3, 0(%r2)
+; CHECK: br %r14
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%src
+ ret void
+}
+
+; Check the high end of the aligned STRVG range.
+define void @f2(i64 *%src, i64 %a) {
+; CHECK: f2:
+; CHECK: strvg %r3, 524280(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f3(i64 *%src, i64 %a) {
+; CHECK: f3:
+; CHECK: agfi %r2, 524288
+; CHECK: strvg %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STRVG range.
+define void @f4(i64 *%src, i64 %a) {
+; CHECK: f4:
+; CHECK: strvg %r3, -8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%ptr
+ ret void
+}
+
+; Check the low end of the STRVG range.
+define void @f5(i64 *%src, i64 %a) {
+; CHECK: f5:
+; CHECK: strvg %r3, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i64 *%src, i64 %a) {
+; CHECK: f6:
+; CHECK: agfi %r2, -524296
+; CHECK: strvg %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%ptr
+ ret void
+}
+
+; Check that STRVG allows an index.
+define void @f7(i64 %src, i64 %index, i64 %a) {
+; CHECK: f7:
+; CHECK: strvg %r4, 524287({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %swapped = call i64 @llvm.bswap.i64(i64 %a)
+ store i64 %swapped, i64 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/call-01.ll b/test/CodeGen/SystemZ/call-01.ll
new file mode 100644
index 000000000000..1b9172bdd819
--- /dev/null
+++ b/test/CodeGen/SystemZ/call-01.ll
@@ -0,0 +1,18 @@
+; Test direct calls.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @bar()
+
+; We must allocate 160 bytes for the callee and save and restore %r14.
+define i64 @f1() {
+; CHECK: f1:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK: aghi %r15, -160
+; CHECK: brasl %r14, bar@PLT
+; CHECK: lmg %r14, %r15, 272(%r15)
+; CHECK: br %r14
+ %ret = call i64 @bar()
+ %inc = add i64 %ret, 1
+ ret i64 %inc
+}
diff --git a/test/CodeGen/SystemZ/call-02.ll b/test/CodeGen/SystemZ/call-02.ll
new file mode 100644
index 000000000000..07dd67bab1b6
--- /dev/null
+++ b/test/CodeGen/SystemZ/call-02.ll
@@ -0,0 +1,16 @@
+; Test indirect calls.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; We must allocate 160 bytes for the callee and save and restore %r14.
+define i64 @f1(i64() *%bar) {
+; CHECK: f1:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK: aghi %r15, -160
+; CHECK: basr %r14, %r2
+; CHECK: lmg %r14, %r15, 272(%r15)
+; CHECK: br %r14
+ %ret = call i64 %bar()
+ %inc = add i64 %ret, 1
+ ret i64 %inc
+}
diff --git a/test/CodeGen/SystemZ/cmpxchg-01.ll b/test/CodeGen/SystemZ/cmpxchg-01.ll
new file mode 100644
index 000000000000..477bcb00e9bd
--- /dev/null
+++ b/test/CodeGen/SystemZ/cmpxchg-01.ll
@@ -0,0 +1,56 @@
+; Test 8-bit compare and swap.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-MAIN
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT
+
+; Check compare and swap with a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK. CHECK-SHIFT also checks that %r3 is not modified before
+; being used in the RISBG (in contrast to things like atomic addition,
+; which shift %r3 left so that %b is at the high end of the word).
+define i8 @f1(i8 %dummy, i8 *%src, i8 %cmp, i8 %swap) {
+; CHECK-MAIN: f1:
+; CHECK-MAIN: sllg [[SHIFT:%r[1-9]+]], %r3, 3
+; CHECK-MAIN: nill %r3, 65532
+; CHECK-MAIN: l [[OLD:%r[0-9]+]], 0(%r3)
+; CHECK-MAIN: [[LOOP:\.[^ ]*]]:
+; CHECK-MAIN: rll %r2, [[OLD]], 8([[SHIFT]])
+; CHECK-MAIN: risbg %r4, %r2, 32, 55, 0
+; CHECK-MAIN: cr %r2, %r4
+; CHECK-MAIN: j{{g?}}lh [[EXIT:\.[^ ]*]]
+; CHECK-MAIN: risbg %r5, %r2, 32, 55, 0
+; CHECK-MAIN: rll [[NEW:%r[0-9]+]], %r5, -8({{%r[1-9]+}})
+; CHECK-MAIN: cs [[OLD]], [[NEW]], 0(%r3)
+; CHECK-MAIN: j{{g?}}lh [[LOOP]]
+; CHECK-MAIN: [[EXIT]]:
+; CHECK-MAIN-NOT: %r2
+; CHECK-MAIN: br %r14
+;
+; CHECK-SHIFT: f1:
+; CHECK-SHIFT: sllg [[SHIFT:%r[1-9]+]], %r3, 3
+; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT: rll
+; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -8([[NEGSHIFT]])
+ %res = cmpxchg i8 *%src, i8 %cmp, i8 %swap seq_cst
+ ret i8 %res
+}
+
+; Check compare and swap with constants. We should force the constants into
+; registers and use the sequence above.
+define i8 @f2(i8 *%src) {
+; CHECK: f2:
+; CHECK: lhi [[CMP:%r[0-9]+]], 42
+; CHECK: risbg [[CMP]], {{%r[0-9]+}}, 32, 55, 0
+; CHECK: risbg
+; CHECK: br %r14
+;
+; CHECK-SHIFT: f2:
+; CHECK-SHIFT: lhi [[SWAP:%r[0-9]+]], 88
+; CHECK-SHIFT: risbg
+; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 55, 0
+; CHECK-SHIFT: br %r14
+ %res = cmpxchg i8 *%src, i8 42, i8 88 seq_cst
+ ret i8 %res
+}
diff --git a/test/CodeGen/SystemZ/cmpxchg-02.ll b/test/CodeGen/SystemZ/cmpxchg-02.ll
new file mode 100644
index 000000000000..cc3452320b3d
--- /dev/null
+++ b/test/CodeGen/SystemZ/cmpxchg-02.ll
@@ -0,0 +1,56 @@
+; Test 16-bit compare and swap.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-MAIN
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-SHIFT
+
+; Check compare and swap with a variable.
+; - CHECK is for the main loop.
+; - CHECK-SHIFT makes sure that the negated shift count used by the second
+; RLL is set up correctly. The negation is independent of the NILL and L
+; tested in CHECK. CHECK-SHIFT also checks that %r3 is not modified before
+; being used in the RISBG (in contrast to things like atomic addition,
+; which shift %r3 left so that %b is at the high end of the word).
+define i16 @f1(i16 %dummy, i16 *%src, i16 %cmp, i16 %swap) {
+; CHECK-MAIN: f1:
+; CHECK-MAIN: sllg [[SHIFT:%r[1-9]+]], %r3, 3
+; CHECK-MAIN: nill %r3, 65532
+; CHECK-MAIN: l [[OLD:%r[0-9]+]], 0(%r3)
+; CHECK-MAIN: [[LOOP:\.[^ ]*]]:
+; CHECK-MAIN: rll %r2, [[OLD]], 16([[SHIFT]])
+; CHECK-MAIN: risbg %r4, %r2, 32, 47, 0
+; CHECK-MAIN: cr %r2, %r4
+; CHECK-MAIN: j{{g?}}lh [[EXIT:\.[^ ]*]]
+; CHECK-MAIN: risbg %r5, %r2, 32, 47, 0
+; CHECK-MAIN: rll [[NEW:%r[0-9]+]], %r5, -16({{%r[1-9]+}})
+; CHECK-MAIN: cs [[OLD]], [[NEW]], 0(%r3)
+; CHECK-MAIN: j{{g?}}lh [[LOOP]]
+; CHECK-MAIN: [[EXIT]]:
+; CHECK-MAIN-NOT: %r2
+; CHECK-MAIN: br %r14
+;
+; CHECK-SHIFT: f1:
+; CHECK-SHIFT: sllg [[SHIFT:%r[1-9]+]], %r3, 3
+; CHECK-SHIFT: lcr [[NEGSHIFT:%r[1-9]+]], [[SHIFT]]
+; CHECK-SHIFT: rll
+; CHECK-SHIFT: rll {{%r[0-9]+}}, %r5, -16([[NEGSHIFT]])
+ %res = cmpxchg i16 *%src, i16 %cmp, i16 %swap seq_cst
+ ret i16 %res
+}
+
+; Check compare and swap with constants. We should force the constants into
+; registers and use the sequence above.
+define i16 @f2(i16 *%src) {
+; CHECK: f2:
+; CHECK: lhi [[CMP:%r[0-9]+]], 42
+; CHECK: risbg [[CMP]], {{%r[0-9]+}}, 32, 47, 0
+; CHECK: risbg
+; CHECK: br %r14
+;
+; CHECK-SHIFT: f2:
+; CHECK-SHIFT: lhi [[SWAP:%r[0-9]+]], 88
+; CHECK-SHIFT: risbg
+; CHECK-SHIFT: risbg [[SWAP]], {{%r[0-9]+}}, 32, 47, 0
+; CHECK-SHIFT: br %r14
+ %res = cmpxchg i16 *%src, i16 42, i16 88 seq_cst
+ ret i16 %res
+}
diff --git a/test/CodeGen/SystemZ/cmpxchg-03.ll b/test/CodeGen/SystemZ/cmpxchg-03.ll
new file mode 100644
index 000000000000..45e224eda84c
--- /dev/null
+++ b/test/CodeGen/SystemZ/cmpxchg-03.ll
@@ -0,0 +1,131 @@
+; Test 32-bit compare and swap.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the CS range.
+define i32 @f1(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f1:
+; CHECK: cs %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %val = cmpxchg i32 *%src, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the high end of the aligned CS range.
+define i32 @f2(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f2:
+; CHECK: cs %r2, %r3, 4092(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the next word up, which should use CSY instead of CS.
+define i32 @f3(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f3:
+; CHECK: csy %r2, %r3, 4096(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the high end of the aligned CSY range.
+define i32 @f4(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f4:
+; CHECK: csy %r2, %r3, 524284(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f5(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r4, 524288
+; CHECK: cs %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the high end of the negative aligned CSY range.
+define i32 @f6(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f6:
+; CHECK: csy %r2, %r3, -4(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the low end of the CSY range.
+define i32 @f7(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f7:
+; CHECK: csy %r2, %r3, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f8(i32 %cmp, i32 %swap, i32 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r4, -524292
+; CHECK: cs %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check that CS does not allow an index.
+define i32 @f9(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: agr %r4, %r5
+; CHECK: cs %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %ptr = inttoptr i64 %add1 to i32 *
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check that CSY does not allow an index.
+define i32 @f10(i32 %cmp, i32 %swap, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: agr %r4, %r5
+; CHECK: csy %r2, %r3, 4096(%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check that a constant %cmp value is loaded into a register first.
+define i32 @f11(i32 %dummy, i32 %swap, i32 *%ptr) {
+; CHECK: f11:
+; CHECK: lhi %r2, 1001
+; CHECK: cs %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %val = cmpxchg i32 *%ptr, i32 1001, i32 %swap seq_cst
+ ret i32 %val
+}
+
+; Check that a constant %swap value is loaded into a register first.
+define i32 @f12(i32 %cmp, i32 *%ptr) {
+; CHECK: f12:
+; CHECK: lhi [[SWAP:%r[0-9]+]], 1002
+; CHECK: cs %r2, [[SWAP]], 0(%r3)
+; CHECK: br %r14
+ %val = cmpxchg i32 *%ptr, i32 %cmp, i32 1002 seq_cst
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/cmpxchg-04.ll b/test/CodeGen/SystemZ/cmpxchg-04.ll
new file mode 100644
index 000000000000..f8969ee08449
--- /dev/null
+++ b/test/CodeGen/SystemZ/cmpxchg-04.ll
@@ -0,0 +1,98 @@
+; Test 64-bit compare and swap.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check CSG without a displacement.
+define i64 @f1(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f1:
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %val = cmpxchg i64 *%src, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the high end of the aligned CSG range.
+define i64 @f2(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f2:
+; CHECK: csg %r2, %r3, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f3:
+; CHECK: agfi %r4, 524288
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the high end of the negative aligned CSG range.
+define i64 @f4(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f4:
+; CHECK: csg %r2, %r3, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the low end of the CSG range.
+define i64 @f5(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f5:
+; CHECK: csg %r2, %r3, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 %cmp, i64 %swap, i64 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r4, -524296
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check that CSG does not allow an index.
+define i64 @f7(i64 %cmp, i64 %swap, i64 %src, i64 %index) {
+; CHECK: f7:
+; CHECK: agr %r4, %r5
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %ptr = inttoptr i64 %add1 to i64 *
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check that a constant %cmp value is loaded into a register first.
+define i64 @f8(i64 %dummy, i64 %swap, i64 *%ptr) {
+; CHECK: f8:
+; CHECK: lghi %r2, 1001
+; CHECK: csg %r2, %r3, 0(%r4)
+; CHECK: br %r14
+ %val = cmpxchg i64 *%ptr, i64 1001, i64 %swap seq_cst
+ ret i64 %val
+}
+
+; Check that a constant %swap value is loaded into a register first.
+define i64 @f9(i64 %cmp, i64 *%ptr) {
+; CHECK: f9:
+; CHECK: lghi [[SWAP:%r[0-9]+]], 1002
+; CHECK: csg %r2, [[SWAP]], 0(%r3)
+; CHECK: br %r14
+ %val = cmpxchg i64 *%ptr, i64 %cmp, i64 1002 seq_cst
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/fp-abs-01.ll b/test/CodeGen/SystemZ/fp-abs-01.ll
new file mode 100644
index 000000000000..81b3fb273d14
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-abs-01.ll
@@ -0,0 +1,40 @@
+; Test floating-point absolute.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32.
+declare float @llvm.fabs.f32(float %f)
+define float @f1(float %f) {
+; CHECK: f1:
+; CHECK: lpebr %f0, %f0
+; CHECK: br %r14
+ %res = call float @llvm.fabs.f32(float %f)
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.fabs.f64(double %f)
+define double @f2(double %f) {
+; CHECK: f2:
+; CHECK: lpdbr %f0, %f0
+; CHECK: br %r14
+ %res = call double @llvm.fabs.f64(double %f)
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure absolute would probably
+; be better implemented using an NI on the upper byte. Do some extra
+; processing so that using FPRs is unequivocally better.
+declare fp128 @llvm.fabs.f128(fp128 %f)
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK: f3:
+; CHECK: lpxbr
+; CHECK: dxbr
+; CHECK: br %r14
+ %orig = load fp128 *%ptr
+ %abs = call fp128 @llvm.fabs.f128(fp128 %orig)
+ %op2 = load fp128 *%ptr2
+ %res = fdiv fp128 %abs, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-abs-02.ll b/test/CodeGen/SystemZ/fp-abs-02.ll
new file mode 100644
index 000000000000..513d49c7acf5
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-abs-02.ll
@@ -0,0 +1,43 @@
+; Test negated floating-point absolute.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32.
+declare float @llvm.fabs.f32(float %f)
+define float @f1(float %f) {
+; CHECK: f1:
+; CHECK: lnebr %f0, %f0
+; CHECK: br %r14
+ %abs = call float @llvm.fabs.f32(float %f)
+ %res = fsub float -0.0, %abs
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.fabs.f64(double %f)
+define double @f2(double %f) {
+; CHECK: f2:
+; CHECK: lndbr %f0, %f0
+; CHECK: br %r14
+ %abs = call double @llvm.fabs.f64(double %f)
+ %res = fsub double -0.0, %abs
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure negative-absolute would
+; probably be better implemented using an OI on the upper byte. Do some
+; extra processing so that using FPRs is unequivocally better.
+declare fp128 @llvm.fabs.f128(fp128 %f)
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK: f3:
+; CHECK: lnxbr
+; CHECK: dxbr
+; CHECK: br %r14
+ %orig = load fp128 *%ptr
+ %abs = call fp128 @llvm.fabs.f128(fp128 %orig)
+ %negabs = fsub fp128 0xL00000000000000008000000000000000, %abs
+ %op2 = load fp128 *%ptr2
+ %res = fdiv fp128 %negabs, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-add-01.ll b/test/CodeGen/SystemZ/fp-add-01.ll
new file mode 100644
index 000000000000..7ce0777b8870
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-add-01.ll
@@ -0,0 +1,71 @@
+; Test 32-bit floating-point addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register addition.
+define float @f1(float %f1, float %f2) {
+; CHECK: f1:
+; CHECK: aebr %f0, %f2
+; CHECK: br %r14
+ %res = fadd float %f1, %f2
+ ret float %res
+}
+
+; Check the low end of the AEB range.
+define float @f2(float %f1, float *%ptr) {
+; CHECK: f2:
+; CHECK: aeb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %res = fadd float %f1, %f2
+ ret float %res
+}
+
+; Check the high end of the aligned AEB range.
+define float @f3(float %f1, float *%base) {
+; CHECK: f3:
+; CHECK: aeb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %res = fadd float %f1, %f2
+ ret float %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f4(float %f1, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: aeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %res = fadd float %f1, %f2
+ ret float %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define float @f5(float %f1, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: aeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %res = fadd float %f1, %f2
+ ret float %res
+}
+
+; Check that AEB allows indices.
+define float @f6(float %f1, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: aeb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %f2 = load float *%ptr2
+ %res = fadd float %f1, %f2
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-add-02.ll b/test/CodeGen/SystemZ/fp-add-02.ll
new file mode 100644
index 000000000000..08eb90efbfaa
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-add-02.ll
@@ -0,0 +1,71 @@
+; Test 64-bit floating-point addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register addition.
+define double @f1(double %f1, double %f2) {
+; CHECK: f1:
+; CHECK: adbr %f0, %f2
+; CHECK: br %r14
+ %res = fadd double %f1, %f2
+ ret double %res
+}
+
+; Check the low end of the ADB range.
+define double @f2(double %f1, double *%ptr) {
+; CHECK: f2:
+; CHECK: adb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %res = fadd double %f1, %f2
+ ret double %res
+}
+
+; Check the high end of the aligned ADB range.
+define double @f3(double %f1, double *%base) {
+; CHECK: f3:
+; CHECK: adb %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %res = fadd double %f1, %f2
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double %f1, double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: adb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %res = fadd double %f1, %f2
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(double %f1, double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: adb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %res = fadd double %f1, %f2
+ ret double %res
+}
+
+; Check that ADB allows indices.
+define double @f6(double %f1, double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: adb %f0, 800(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %f2 = load double *%ptr2
+ %res = fadd double %f1, %f2
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-add-03.ll b/test/CodeGen/SystemZ/fp-add-03.ll
new file mode 100644
index 000000000000..13ffb023b6fb
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-add-03.ll
@@ -0,0 +1,20 @@
+; Test 128-bit floating-point addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; There is no memory form of 128-bit addition.
+define void @f1(fp128 *%ptr, float %f2) {
+; CHECK: f1:
+; CHECK: lxebr %f0, %f0
+; CHECK: ld %f1, 0(%r2)
+; CHECK: ld %f3, 8(%r2)
+; CHECK: axbr %f1, %f0
+; CHECK: std %f1, 0(%r2)
+; CHECK: std %f3, 8(%r2)
+; CHECK: br %r14
+ %f1 = load fp128 *%ptr
+ %f2x = fpext float %f2 to fp128
+ %sum = fadd fp128 %f1, %f2x
+ store fp128 %sum, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-cmp-01.ll b/test/CodeGen/SystemZ/fp-cmp-01.ll
new file mode 100644
index 000000000000..b80a71595e87
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-cmp-01.ll
@@ -0,0 +1,89 @@
+; Test 32-bit floating-point comparison.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparison with registers.
+define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) {
+; CHECK: f1:
+; CHECK: cebr %f0, %f2
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check the low end of the CEB range.
+define i64 @f2(i64 %a, i64 %b, float %f1, float *%ptr) {
+; CHECK: f2:
+; CHECK: ceb %f0, 0(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check the high end of the aligned CEB range.
+define i64 @f3(i64 %a, i64 %b, float %f1, float *%base) {
+; CHECK: f3:
+; CHECK: ceb %f0, 4092(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 %b, float %f1, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r4, 4096
+; CHECK: ceb %f0, 0(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define i64 @f5(i64 %a, i64 %b, float %f1, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r4, -4
+; CHECK: ceb %f0, 0(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check that CEB allows indices.
+define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r5, 2
+; CHECK: ceb %f0, 400(%r1,%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %f2 = load float *%ptr2
+ %cond = fcmp oeq float %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-cmp-02.ll b/test/CodeGen/SystemZ/fp-cmp-02.ll
new file mode 100644
index 000000000000..8227308ce810
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-cmp-02.ll
@@ -0,0 +1,89 @@
+; Test 64-bit floating-point comparison.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparison with registers.
+define i64 @f1(i64 %a, i64 %b, double %f1, double %f2) {
+; CHECK: f1:
+; CHECK: cdbr %f0, %f2
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %cond = fcmp oeq double %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check the low end of the CDB range.
+define i64 @f2(i64 %a, i64 %b, double %f1, double *%ptr) {
+; CHECK: f2:
+; CHECK: cdb %f0, 0(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %cond = fcmp oeq double %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check the high end of the aligned CDB range.
+define i64 @f3(i64 %a, i64 %b, double %f1, double *%base) {
+; CHECK: f3:
+; CHECK: cdb %f0, 4088(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %cond = fcmp oeq double %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 %b, double %f1, double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r4, 4096
+; CHECK: cdb %f0, 0(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %cond = fcmp oeq double %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define i64 @f5(i64 %a, i64 %b, double %f1, double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r4, -8
+; CHECK: cdb %f0, 0(%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %cond = fcmp oeq double %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
+
+; Check that CDB allows indices.
+define i64 @f6(i64 %a, i64 %b, double %f1, double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r5, 3
+; CHECK: cdb %f0, 800(%r1,%r4)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %f2 = load double *%ptr2
+ %cond = fcmp oeq double %f1, %f2
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-cmp-03.ll b/test/CodeGen/SystemZ/fp-cmp-03.ll
new file mode 100644
index 000000000000..fd12c93e27a5
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-cmp-03.ll
@@ -0,0 +1,20 @@
+; Test 128-bit floating-point comparison.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; There is no memory form of 128-bit comparison.
+define i64 @f1(i64 %a, i64 %b, fp128 *%ptr, float %f2) {
+; CHECK: f1:
+; CHECK: lxebr %f0, %f0
+; CHECK: ld %f1, 0(%r4)
+; CHECK: ld %f3, 8(%r4)
+; CHECK: cxbr %f1, %f0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ %f2x = fpext float %f2 to fp128
+ %f1 = load fp128 *%ptr
+ %cond = fcmp oeq fp128 %f1, %f2x
+ %res = select i1 %cond, i64 %a, i64 %b
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/fp-const-01.ll b/test/CodeGen/SystemZ/fp-const-01.ll
new file mode 100644
index 000000000000..65209d661e97
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-01.ll
@@ -0,0 +1,30 @@
+; Test loads of floating-point zero.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32.
+define float @f1() {
+; CHECK: f1:
+; CHECK: lzer %f0
+; CHECK: br %r14
+ ret float 0.0
+}
+
+; Test f64.
+define double @f2() {
+; CHECK: f2:
+; CHECK: lzdr %f0
+; CHECK: br %r14
+ ret double 0.0
+}
+
+; Test f128.
+define void @f3(fp128 *%x) {
+; CHECK: f3:
+; CHECK: lzxr %f0
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ store fp128 0xL00000000000000000000000000000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-const-02.ll b/test/CodeGen/SystemZ/fp-const-02.ll
new file mode 100644
index 000000000000..2dedf54e6f7d
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-02.ll
@@ -0,0 +1,31 @@
+; Test loads of negative floating-point zero.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32.
+define float @f1() {
+; CHECK: f1:
+; CHECK: lzer [[REGISTER:%f[0-5]+]]
+; CHECK: lcebr %f0, [[REGISTER]]
+; CHECK: br %r14
+ ret float -0.0
+}
+
+; Test f64.
+define double @f2() {
+; CHECK: f2:
+; CHECK: lzdr [[REGISTER:%f[0-5]+]]
+; CHECK: lcdbr %f0, [[REGISTER]]
+; CHECK: br %r14
+ ret double -0.0
+}
+
+; Test f128.
+define void @f3(fp128 *%x) {
+; CHECK: f3:
+; CHECK: lzxr [[REGISTER:%f[0-5]+]]
+; CHECK: lcxbr %f0, [[REGISTER]]
+; CHECK: br %r14
+ store fp128 0xL00000000000000008000000000000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-const-03.ll b/test/CodeGen/SystemZ/fp-const-03.ll
new file mode 100644
index 000000000000..4c287e4c08a3
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-03.ll
@@ -0,0 +1,14 @@
+; Test loads of 32-bit floating-point constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define float @f1() {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]]], {{.*}}
+; CHECK: le %f0, 0([[REGISTER]])
+; CHECK: br %r14
+;
+; CONST: .long 1065353217
+ ret float 0x3ff0000020000000
+}
diff --git a/test/CodeGen/SystemZ/fp-const-04.ll b/test/CodeGen/SystemZ/fp-const-04.ll
new file mode 100644
index 000000000000..847c380e3b98
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-04.ll
@@ -0,0 +1,15 @@
+; Test loads of 64-bit floating-point constants that can be represented
+; as 32-bit constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define double @f1() {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]]], {{.*}}
+; CHECK: ldeb %f0, 0([[REGISTER]])
+; CHECK: br %r14
+;
+; CONST: .long 1065353217
+ ret double 0x3ff0000020000000
+}
diff --git a/test/CodeGen/SystemZ/fp-const-05.ll b/test/CodeGen/SystemZ/fp-const-05.ll
new file mode 100644
index 000000000000..48f84ce5bee8
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-05.ll
@@ -0,0 +1,18 @@
+; Test loads of 128-bit floating-point constants that can be represented
+; as 32-bit constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define void @f1(fp128 *%x) {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: lxeb %f0, 0([[REGISTER]])
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+;
+; CONST: .long 1065353217
+ store fp128 0xL00000000000000003fff000002000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-const-06.ll b/test/CodeGen/SystemZ/fp-const-06.ll
new file mode 100644
index 000000000000..1da3d5eafaae
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-06.ll
@@ -0,0 +1,14 @@
+; Test loads of 64-bit floating-point constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define double @f1() {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: ld %f0, 0([[REGISTER]])
+; CHECK: br %r14
+;
+; CONST: .quad 4607182419068452864
+ ret double 0x3ff0000010000000
+}
diff --git a/test/CodeGen/SystemZ/fp-const-07.ll b/test/CodeGen/SystemZ/fp-const-07.ll
new file mode 100644
index 000000000000..5a108452a8e0
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-07.ll
@@ -0,0 +1,18 @@
+; Test loads of 128-bit floating-point constants that can be represented
+; as 64-bit constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define void @f1(fp128 *%x) {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: lxdb %f0, 0([[REGISTER]])
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+;
+; CONST: .quad 4607182419068452864
+ store fp128 0xL00000000000000003fff000001000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-const-08.ll b/test/CodeGen/SystemZ/fp-const-08.ll
new file mode 100644
index 000000000000..6a8a1ab3f9b7
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-08.ll
@@ -0,0 +1,21 @@
+; Test loads of 128-bit floating-point constants. This value would actually
+; fit within the x86 80-bit format, so the test make sure we don't try to
+; extend from an f80.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define void @f1(fp128 *%x) {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: ld %f0, 0([[REGISTER]])
+; CHECK: ld %f2, 8([[REGISTER]])
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+;
+; CONST: .quad 4611404543450677248
+; CONST: .quad 576460752303423488
+ store fp128 0xL08000000000000003fff000000000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-const-09.ll b/test/CodeGen/SystemZ/fp-const-09.ll
new file mode 100644
index 000000000000..435dcbacc19d
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-const-09.ll
@@ -0,0 +1,20 @@
+; Test loads of 128-bit floating-point constants in which the low bit of
+; the significand is set.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CONST
+
+define void @f1(fp128 *%x) {
+; CHECK: f1:
+; CHECK: larl [[REGISTER:%r[1-5]+]], {{.*}}
+; CHECK: ld %f0, 0([[REGISTER]])
+; CHECK: ld %f2, 8([[REGISTER]])
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+;
+; CONST: .quad 4611404543450677248
+; CONST: .quad 1
+ store fp128 0xL00000000000000013fff000000000000, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-01.ll b/test/CodeGen/SystemZ/fp-conv-01.ll
new file mode 100644
index 000000000000..6c8ef4899776
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-01.ll
@@ -0,0 +1,61 @@
+; Test floating-point truncations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f64->f32.
+define float @f1(double %d1, double %d2) {
+; CHECK: f1:
+; CHECK: ledbr %f0, %f2
+; CHECK: br %r14
+ %res = fptrunc double %d2 to float
+ ret float %res
+}
+
+; Test f128->f32.
+define float @f2(fp128 *%ptr) {
+; CHECK: f2:
+; CHECK: lexbr %f0, %f0
+; CHECK: br %r14
+ %val = load fp128 *%ptr
+ %res = fptrunc fp128 %val to float
+ ret float %res
+}
+
+; Make sure that we don't use %f0 as the destination of LEXBR when %f2
+; is still live.
+define void @f3(float *%dst, fp128 *%ptr, float %d1, float %d2) {
+; CHECK: f3:
+; CHECK: lexbr %f1, %f1
+; CHECK: aebr %f1, %f2
+; CHECK: ste %f1, 0(%r2)
+; CHECK: br %r14
+ %val = load fp128 *%ptr
+ %conv = fptrunc fp128 %val to float
+ %res = fadd float %conv, %d2
+ store float %res, float *%dst
+ ret void
+}
+
+; Test f128->f64.
+define double @f4(fp128 *%ptr) {
+; CHECK: f4:
+; CHECK: ldxbr %f0, %f0
+; CHECK: br %r14
+ %val = load fp128 *%ptr
+ %res = fptrunc fp128 %val to double
+ ret double %res
+}
+
+; Like f3, but for f128->f64.
+define void @f5(double *%dst, fp128 *%ptr, double %d1, double %d2) {
+; CHECK: f5:
+; CHECK: ldxbr %f1, %f1
+; CHECK: adbr %f1, %f2
+; CHECK: std %f1, 0(%r2)
+; CHECK: br %r14
+ %val = load fp128 *%ptr
+ %conv = fptrunc fp128 %val to double
+ %res = fadd double %conv, %d2
+ store double %res, double *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-02.ll b/test/CodeGen/SystemZ/fp-conv-02.ll
new file mode 100644
index 000000000000..f284e1dc2ae4
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-02.ll
@@ -0,0 +1,71 @@
+; Test extensions of f32 to f64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register extension.
+define double @f1(float %val) {
+; CHECK: f1:
+; CHECK: ldebr %f0, %f0
+; CHECK: br %r14
+ %res = fpext float %val to double
+ ret double %res
+}
+
+; Check the low end of the LDEB range.
+define double @f2(float *%ptr) {
+; CHECK: f2:
+; CHECK: ldeb %f0, 0(%r2)
+; CHECK: br %r14
+ %val = load float *%ptr
+ %res = fpext float %val to double
+ ret double %res
+}
+
+; Check the high end of the aligned LDEB range.
+define double @f3(float *%base) {
+; CHECK: f3:
+; CHECK: ldeb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %val = load float *%ptr
+ %res = fpext float %val to double
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: ldeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %val = load float *%ptr
+ %res = fpext float %val to double
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: ldeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %val = load float *%ptr
+ %res = fpext float %val to double
+ ret double %res
+}
+
+; Check that LDEB allows indices.
+define double @f6(float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: ldeb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %val = load float *%ptr2
+ %res = fpext float %val to double
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-03.ll b/test/CodeGen/SystemZ/fp-conv-03.ll
new file mode 100644
index 000000000000..703a141e3e12
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-03.ll
@@ -0,0 +1,89 @@
+; Test extensions of f32 to f128.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register extension.
+define void @f1(fp128 *%dst, float %val) {
+; CHECK: f1:
+; CHECK: lxebr %f0, %f0
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the low end of the LXEB range.
+define void @f2(fp128 *%dst, float *%ptr) {
+; CHECK: f2:
+; CHECK: lxeb %f0, 0(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %val = load float *%ptr
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the high end of the aligned LXEB range.
+define void @f3(fp128 *%dst, float *%base) {
+; CHECK: f3:
+; CHECK: lxeb %f0, 4092(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %val = load float *%ptr
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f4(fp128 *%dst, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r3, 4096
+; CHECK: lxeb %f0, 0(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %val = load float *%ptr
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check negative displacements, which also need separate address logic.
+define void @f5(fp128 *%dst, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r3, -4
+; CHECK: lxeb %f0, 0(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %val = load float *%ptr
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check that LXEB allows indices.
+define void @f6(fp128 *%dst, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r4, 2
+; CHECK: lxeb %f0, 400(%r1,%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %val = load float *%ptr2
+ %res = fpext float %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-04.ll b/test/CodeGen/SystemZ/fp-conv-04.ll
new file mode 100644
index 000000000000..b7b516693430
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-04.ll
@@ -0,0 +1,89 @@
+; Test extensions of f64 to f128.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register extension.
+define void @f1(fp128 *%dst, double %val) {
+; CHECK: f1:
+; CHECK: lxdbr %f0, %f0
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the low end of the LXDB range.
+define void @f2(fp128 *%dst, double *%ptr) {
+; CHECK: f2:
+; CHECK: lxdb %f0, 0(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %val = load double *%ptr
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the high end of the aligned LXDB range.
+define void @f3(fp128 *%dst, double *%base) {
+; CHECK: f3:
+; CHECK: lxdb %f0, 4088(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %val = load double *%ptr
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f4(fp128 *%dst, double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r3, 4096
+; CHECK: lxdb %f0, 0(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %val = load double *%ptr
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check negative displacements, which also need separate address logic.
+define void @f5(fp128 *%dst, double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r3, -8
+; CHECK: lxdb %f0, 0(%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %val = load double *%ptr
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check that LXDB allows indices.
+define void @f6(fp128 *%dst, double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r4, 3
+; CHECK: lxdb %f0, 800(%r1,%r3)
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %val = load double *%ptr2
+ %res = fpext double %val to fp128
+ store fp128 %res, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-05.ll b/test/CodeGen/SystemZ/fp-conv-05.ll
new file mode 100644
index 000000000000..2d887324c3e6
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-05.ll
@@ -0,0 +1,33 @@
+; Test conversions of signed i32s to floating-point values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check i32->f32.
+define float @f1(i32 %i) {
+; CHECK: f1:
+; CHECK: cefbr %f0, %r2
+; CHECK: br %r14
+ %conv = sitofp i32 %i to float
+ ret float %conv
+}
+
+; Check i32->f64.
+define double @f2(i32 %i) {
+; CHECK: f2:
+; CHECK: cdfbr %f0, %r2
+; CHECK: br %r14
+ %conv = sitofp i32 %i to double
+ ret double %conv
+}
+
+; Check i32->f128.
+define void @f3(i32 %i, fp128 *%dst) {
+; CHECK: f3:
+; CHECK: cxfbr %f0, %r2
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = sitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-06.ll b/test/CodeGen/SystemZ/fp-conv-06.ll
new file mode 100644
index 000000000000..1b39b67d49b8
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-06.ll
@@ -0,0 +1,37 @@
+; Test conversions of unsigned i32s to floating-point values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check i32->f32. There is no native instruction, so we must promote
+; to i64 first.
+define float @f1(i32 %i) {
+; CHECK: f1:
+; CHECK: llgfr [[REGISTER:%r[0-5]]], %r2
+; CHECK: cegbr %f0, [[REGISTER]]
+; CHECK: br %r14
+ %conv = uitofp i32 %i to float
+ ret float %conv
+}
+
+; Check i32->f64.
+define double @f2(i32 %i) {
+; CHECK: f2:
+; CHECK: llgfr [[REGISTER:%r[0-5]]], %r2
+; CHECK: cdgbr %f0, [[REGISTER]]
+; CHECK: br %r14
+ %conv = uitofp i32 %i to double
+ ret double %conv
+}
+
+; Check i32->f128.
+define void @f3(i32 %i, fp128 *%dst) {
+; CHECK: f3:
+; CHECK: llgfr [[REGISTER:%r[0-5]]], %r2
+; CHECK: cxgbr %f0, [[REGISTER]]
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = uitofp i32 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-07.ll b/test/CodeGen/SystemZ/fp-conv-07.ll
new file mode 100644
index 000000000000..0ebbd37d512d
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-07.ll
@@ -0,0 +1,33 @@
+; Test conversions of signed i64s to floating-point values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test i64->f32.
+define float @f1(i64 %i) {
+; CHECK: f1:
+; CHECK: cegbr %f0, %r2
+; CHECK: br %r14
+ %conv = sitofp i64 %i to float
+ ret float %conv
+}
+
+; Test i64->f64.
+define double @f2(i64 %i) {
+; CHECK: f2:
+; CHECK: cdgbr %f0, %r2
+; CHECK: br %r14
+ %conv = sitofp i64 %i to double
+ ret double %conv
+}
+
+; Test i64->f128.
+define void @f3(i64 %i, fp128 *%dst) {
+; CHECK: f3:
+; CHECK: cxgbr %f0, %r2
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %conv = sitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-08.ll b/test/CodeGen/SystemZ/fp-conv-08.ll
new file mode 100644
index 000000000000..20c4e30f0796
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-08.ll
@@ -0,0 +1,35 @@
+; Test conversions of unsigned i64s to floating-point values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test i64->f32. There's no native support for unsigned i64-to-fp conversions,
+; but we should be able to implement them using signed i64-to-fp conversions.
+define float @f1(i64 %i) {
+; CHECK: f1:
+; CHECK: cegbr
+; CHECK: aebr
+; CHECK: br %r14
+ %conv = uitofp i64 %i to float
+ ret float %conv
+}
+
+; Test i64->f64.
+define double @f2(i64 %i) {
+; CHECK: f2:
+; CHECK: ldgr
+; CHECL: adbr
+; CHECK: br %r14
+ %conv = uitofp i64 %i to double
+ ret double %conv
+}
+
+; Test i64->f128.
+define void @f3(i64 %i, fp128 *%dst) {
+; CHECK: f3:
+; CHECK: cxgbr
+; CHECK: axbr
+; CHECK: br %r14
+ %conv = uitofp i64 %i to fp128
+ store fp128 %conv, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-09.ll b/test/CodeGen/SystemZ/fp-conv-09.ll
new file mode 100644
index 000000000000..e3c0352cf84e
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-09.ll
@@ -0,0 +1,33 @@
+; Test conversion of floating-point values to signed i32s.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32->i32.
+define i32 @f1(float %f) {
+; CHECK: f1:
+; CHECK: cfebr %r2, 5, %f0
+; CHECK: br %r14
+ %conv = fptosi float %f to i32
+ ret i32 %conv
+}
+
+; Test f64->i32.
+define i32 @f2(double %f) {
+; CHECK: f2:
+; CHECK: cfdbr %r2, 5, %f0
+; CHECK: br %r14
+ %conv = fptosi double %f to i32
+ ret i32 %conv
+}
+
+; Test f128->i32.
+define i32 @f3(fp128 *%src) {
+; CHECK: f3:
+; CHECK: ld %f0, 0(%r2)
+; CHECK: ld %f2, 8(%r2)
+; CHECK: cfxbr %r2, 5, %f0
+; CHECK: br %r14
+ %f = load fp128 *%src
+ %conv = fptosi fp128 %f to i32
+ ret i32 %conv
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-10.ll b/test/CodeGen/SystemZ/fp-conv-10.ll
new file mode 100644
index 000000000000..bb8878bacee8
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-10.ll
@@ -0,0 +1,45 @@
+; Test conversion of floating-point values to unsigned i32s.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; z10 doesn't have native support for unsigned fp-to-i32 conversions;
+; they were added in z196 as the Convert to Logical family of instructions.
+; Promoting to i64 doesn't generate an inexact condition for values that are
+; outside the i32 range but in the i64 range, so use the default expansion.
+
+; Test f32->i32.
+define i32 @f1(float %f) {
+; CHECK: f1:
+; CHECK: cebr
+; CHECK: sebr
+; CHECK: cfebr
+; CHECK: xilf
+; CHECK: br %r14
+ %conv = fptoui float %f to i32
+ ret i32 %conv
+}
+
+; Test f64->i32.
+define i32 @f2(double %f) {
+; CHECK: f2:
+; CHECK: cdbr
+; CHECK: sdbr
+; CHECK: cfdbr
+; CHECK: xilf
+; CHECK: br %r14
+ %conv = fptoui double %f to i32
+ ret i32 %conv
+}
+
+; Test f128->i32.
+define i32 @f3(fp128 *%src) {
+; CHECK: f3:
+; CHECK: cxbr
+; CHECK: sxbr
+; CHECK: cfxbr
+; CHECK: xilf
+; CHECK: br %r14
+ %f = load fp128 *%src
+ %conv = fptoui fp128 %f to i32
+ ret i32 %conv
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-11.ll b/test/CodeGen/SystemZ/fp-conv-11.ll
new file mode 100644
index 000000000000..2a36cb955cb5
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-11.ll
@@ -0,0 +1,33 @@
+; Test conversion of floating-point values to signed i64s.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32->i64.
+define i64 @f1(float %f) {
+; CHECK: f1:
+; CHECK: cgebr %r2, 5, %f0
+; CHECK: br %r14
+ %conv = fptosi float %f to i64
+ ret i64 %conv
+}
+
+; Test f64->i64.
+define i64 @f2(double %f) {
+; CHECK: f2:
+; CHECK: cgdbr %r2, 5, %f0
+; CHECK: br %r14
+ %conv = fptosi double %f to i64
+ ret i64 %conv
+}
+
+; Test f128->i64.
+define i64 @f3(fp128 *%src) {
+; CHECK: f3:
+; CHECK: ld %f0, 0(%r2)
+; CHECK: ld %f2, 8(%r2)
+; CHECK: cgxbr %r2, 5, %f0
+; CHECK: br %r14
+ %f = load fp128 *%src
+ %conv = fptosi fp128 %f to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/SystemZ/fp-conv-12.ll b/test/CodeGen/SystemZ/fp-conv-12.ll
new file mode 100644
index 000000000000..4445b14ee8ef
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-conv-12.ll
@@ -0,0 +1,44 @@
+; Test conversion of floating-point values to unsigned i64s.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; z10 doesn't have native support for unsigned fp-to-i64 conversions;
+; they were added in z196 as the Convert to Logical family of instructions.
+; Convert via signed i64s instead.
+
+; Test f32->i64.
+define i64 @f1(float %f) {
+; CHECK: f1:
+; CHECK: cebr
+; CHECK: sebr
+; CHECK: cgebr
+; CHECK: xihf
+; CHECK: br %r14
+ %conv = fptoui float %f to i64
+ ret i64 %conv
+}
+
+; Test f64->i64.
+define i64 @f2(double %f) {
+; CHECK: f2:
+; CHECK: cdbr
+; CHECK: sdbr
+; CHECK: cgdbr
+; CHECK: xihf
+; CHECK: br %r14
+ %conv = fptoui double %f to i64
+ ret i64 %conv
+}
+
+; Test f128->i64.
+define i64 @f3(fp128 *%src) {
+; CHECK: f3:
+; CHECK: cxbr
+; CHECK: sxbr
+; CHECK: cgxbr
+; CHECK: xihf
+; CHECK: br %r14
+ %f = load fp128 *%src
+ %conv = fptoui fp128 %f to i64
+ ret i64 %conv
+}
diff --git a/test/CodeGen/SystemZ/fp-copysign-01.ll b/test/CodeGen/SystemZ/fp-copysign-01.ll
new file mode 100644
index 000000000000..458d475bdf3c
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-copysign-01.ll
@@ -0,0 +1,128 @@
+; Test copysign operations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare float @copysignf(float, float) readnone
+declare double @copysign(double, double) readnone
+; FIXME: not really the correct prototype for SystemZ.
+declare fp128 @copysignl(fp128, fp128) readnone
+
+; Test f32 copies in which the sign comes from an f32.
+define float @f1(float %a, float %b) {
+; CHECK: f1:
+; CHECK-NOT: %f2
+; CHECK: cpsdr %f0, %f0, %f2
+; CHECK: br %r14
+ %res = call float @copysignf(float %a, float %b) readnone
+ ret float %res
+}
+
+; Test f32 copies in which the sign comes from an f64.
+define float @f2(float %a, double %bd) {
+; CHECK: f2:
+; CHECK-NOT: %f2
+; CHECK: cpsdr %f0, %f0, %f2
+; CHECK: br %r14
+ %b = fptrunc double %bd to float
+ %res = call float @copysignf(float %a, float %b) readnone
+ ret float %res
+}
+
+; Test f32 copies in which the sign comes from an f128.
+define float @f3(float %a, fp128 *%bptr) {
+; CHECK: f3:
+; CHECK: ld [[BHIGH:%f[0-7]]], 0(%r2)
+; CHECK: ld [[BLOW:%f[0-7]]], 8(%r2)
+; CHECK: cpsdr %f0, %f0, [[BHIGH]]
+; CHECK: br %r14
+ %bl = load volatile fp128 *%bptr
+ %b = fptrunc fp128 %bl to float
+ %res = call float @copysignf(float %a, float %b) readnone
+ ret float %res
+}
+
+; Test f64 copies in which the sign comes from an f32.
+define double @f4(double %a, float %bf) {
+; CHECK: f4:
+; CHECK-NOT: %f2
+; CHECK: cpsdr %f0, %f0, %f2
+; CHECK: br %r14
+ %b = fpext float %bf to double
+ %res = call double @copysign(double %a, double %b) readnone
+ ret double %res
+}
+
+; Test f64 copies in which the sign comes from an f64.
+define double @f5(double %a, double %b) {
+; CHECK: f5:
+; CHECK-NOT: %f2
+; CHECK: cpsdr %f0, %f0, %f2
+; CHECK: br %r14
+ %res = call double @copysign(double %a, double %b) readnone
+ ret double %res
+}
+
+; Test f64 copies in which the sign comes from an f128.
+define double @f6(double %a, fp128 *%bptr) {
+; CHECK: f6:
+; CHECK: ld [[BHIGH:%f[0-7]]], 0(%r2)
+; CHECK: ld [[BLOW:%f[0-7]]], 8(%r2)
+; CHECK: cpsdr %f0, %f0, [[BHIGH]]
+; CHECK: br %r14
+ %bl = load volatile fp128 *%bptr
+ %b = fptrunc fp128 %bl to double
+ %res = call double @copysign(double %a, double %b) readnone
+ ret double %res
+}
+
+; Test f128 copies in which the sign comes from an f32. We shouldn't
+; need any register shuffling here; %a should be tied to %c, with CPSDR
+; just changing the high register.
+define void @f7(fp128 *%cptr, fp128 *%aptr, float %bf) {
+; CHECK: f7:
+; CHECK: ld [[AHIGH:%f[0-7]]], 0(%r3)
+; CHECK: ld [[ALOW:%f[0-7]]], 8(%r3)
+; CHECK: cpsdr [[AHIGH]], [[AHIGH]], %f0
+; CHECK: std [[AHIGH]], 0(%r2)
+; CHECK: std [[ALOW]], 8(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128 *%aptr
+ %b = fpext float %bf to fp128
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
+
+; As above, but the sign comes from an f64.
+define void @f8(fp128 *%cptr, fp128 *%aptr, double %bd) {
+; CHECK: f8:
+; CHECK: ld [[AHIGH:%f[0-7]]], 0(%r3)
+; CHECK: ld [[ALOW:%f[0-7]]], 8(%r3)
+; CHECK: cpsdr [[AHIGH]], [[AHIGH]], %f0
+; CHECK: std [[AHIGH]], 0(%r2)
+; CHECK: std [[ALOW]], 8(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128 *%aptr
+ %b = fpext double %bd to fp128
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
+
+; As above, but the sign comes from an f128. Don't require the low part
+; of %b to be loaded, since it isn't used.
+define void @f9(fp128 *%cptr, fp128 *%aptr, fp128 *%bptr) {
+; CHECK: f9:
+; CHECK: ld [[AHIGH:%f[0-7]]], 0(%r3)
+; CHECK: ld [[ALOW:%f[0-7]]], 8(%r3)
+; CHECK: ld [[BHIGH:%f[0-7]]], 0(%r4)
+; CHECK: cpsdr [[AHIGH]], [[AHIGH]], [[BHIGH]]
+; CHECK: std [[AHIGH]], 0(%r2)
+; CHECK: std [[ALOW]], 8(%r2)
+; CHECK: br %r14
+ %a = load volatile fp128 *%aptr
+ %b = load volatile fp128 *%bptr
+ %c = call fp128 @copysignl(fp128 %a, fp128 %b) readnone
+ store fp128 %c, fp128 *%cptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-div-01.ll b/test/CodeGen/SystemZ/fp-div-01.ll
new file mode 100644
index 000000000000..080d45eb2bfb
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-div-01.ll
@@ -0,0 +1,71 @@
+; Test 32-bit floating-point division.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register division.
+define float @f1(float %f1, float %f2) {
+; CHECK: f1:
+; CHECK: debr %f0, %f2
+; CHECK: br %r14
+ %res = fdiv float %f1, %f2
+ ret float %res
+}
+
+; Check the low end of the DEB range.
+define float @f2(float %f1, float *%ptr) {
+; CHECK: f2:
+; CHECK: deb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %res = fdiv float %f1, %f2
+ ret float %res
+}
+
+; Check the high end of the aligned DEB range.
+define float @f3(float %f1, float *%base) {
+; CHECK: f3:
+; CHECK: deb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %res = fdiv float %f1, %f2
+ ret float %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f4(float %f1, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: deb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %res = fdiv float %f1, %f2
+ ret float %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define float @f5(float %f1, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: deb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %res = fdiv float %f1, %f2
+ ret float %res
+}
+
+; Check that DEB allows indices.
+define float @f6(float %f1, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: deb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %f2 = load float *%ptr2
+ %res = fdiv float %f1, %f2
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-div-02.ll b/test/CodeGen/SystemZ/fp-div-02.ll
new file mode 100644
index 000000000000..c5cae15a824b
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-div-02.ll
@@ -0,0 +1,71 @@
+; Test 64-bit floating-point division.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register division.
+define double @f1(double %f1, double %f2) {
+; CHECK: f1:
+; CHECK: ddbr %f0, %f2
+; CHECK: br %r14
+ %res = fdiv double %f1, %f2
+ ret double %res
+}
+
+; Check the low end of the DDB range.
+define double @f2(double %f1, double *%ptr) {
+; CHECK: f2:
+; CHECK: ddb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %res = fdiv double %f1, %f2
+ ret double %res
+}
+
+; Check the high end of the aligned DDB range.
+define double @f3(double %f1, double *%base) {
+; CHECK: f3:
+; CHECK: ddb %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %res = fdiv double %f1, %f2
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double %f1, double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: ddb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %res = fdiv double %f1, %f2
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(double %f1, double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: ddb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %res = fdiv double %f1, %f2
+ ret double %res
+}
+
+; Check that DDB allows indices.
+define double @f6(double %f1, double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: ddb %f0, 800(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %f2 = load double *%ptr2
+ %res = fdiv double %f1, %f2
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-div-03.ll b/test/CodeGen/SystemZ/fp-div-03.ll
new file mode 100644
index 000000000000..18f2d7449a80
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-div-03.ll
@@ -0,0 +1,20 @@
+; Test 128-bit floating-point division.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; There is no memory form of 128-bit division.
+define void @f1(fp128 *%ptr, float %f2) {
+; CHECK: f1:
+; CHECK: lxebr %f0, %f0
+; CHECK: ld %f1, 0(%r2)
+; CHECK: ld %f3, 8(%r2)
+; CHECK: dxbr %f1, %f0
+; CHECK: std %f1, 0(%r2)
+; CHECK: std %f3, 8(%r2)
+; CHECK: br %r14
+ %f1 = load fp128 *%ptr
+ %f2x = fpext float %f2 to fp128
+ %sum = fdiv fp128 %f1, %f2x
+ store fp128 %sum, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-move-01.ll b/test/CodeGen/SystemZ/fp-move-01.ll
new file mode 100644
index 000000000000..73cd978c5975
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-01.ll
@@ -0,0 +1,30 @@
+; Test moves between FPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32 moves.
+define float @f1(float %a, float %b) {
+; CHECK: f1:
+; CHECK: ler %f0, %f2
+ ret float %b
+}
+
+; Test f64 moves.
+define double @f2(double %a, double %b) {
+; CHECK: f2:
+; CHECK: ldr %f0, %f2
+ ret double %b
+}
+
+; Test f128 moves. Since f128s are passed by reference, we need to force
+; a copy by other means.
+define void @f3(fp128 *%x) {
+; CHECK: f3:
+; CHECK: lxr
+; CHECK: axbr
+ %val = load volatile fp128 *%x
+ %sum = fadd fp128 %val, %val
+ store volatile fp128 %sum, fp128 *%x
+ store volatile fp128 %val, fp128 *%x
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-move-02.ll b/test/CodeGen/SystemZ/fp-move-02.ll
new file mode 100644
index 000000000000..9d87797c8f92
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-02.ll
@@ -0,0 +1,103 @@
+; Test moves between FPRs and GPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 32-bit moves from GPRs to FPRs. The GPR must be moved into the high
+; 32 bits of the FPR.
+define float @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: sllg [[REGISTER:%r[0-5]]], %r2, 32
+; CHECK: ldgr %f0, [[REGISTER]]
+ %res = bitcast i32 %a to float
+ ret float %res
+}
+
+; Like f1, but create a situation where the shift can be folded with
+; surrounding code.
+define float @f2(i64 %big) {
+; CHECK: f2:
+; CHECK: sllg [[REGISTER:%r[0-5]]], %r2, 31
+; CHECK: ldgr %f0, [[REGISTER]]
+ %shift = lshr i64 %big, 1
+ %a = trunc i64 %shift to i32
+ %res = bitcast i32 %a to float
+ ret float %res
+}
+
+; Another example of the same thing.
+define float @f3(i64 %big) {
+; CHECK: f3:
+; CHECK: sllg [[REGISTER:%r[0-5]]], %r2, 2
+; CHECK: ldgr %f0, [[REGISTER]]
+ %shift = ashr i64 %big, 30
+ %a = trunc i64 %shift to i32
+ %res = bitcast i32 %a to float
+ ret float %res
+}
+
+; Like f1, but the value to transfer is already in the high 32 bits.
+define float @f4(i64 %big) {
+; CHECK: f4:
+; CHECK-NOT: %r2
+; CHECK: nilf %r2, 0
+; CHECK-NOT: %r2
+; CHECK: ldgr %f0, %r2
+ %shift = ashr i64 %big, 32
+ %a = trunc i64 %shift to i32
+ %res = bitcast i32 %a to float
+ ret float %res
+}
+
+; Test 64-bit moves from GPRs to FPRs.
+define double @f5(i64 %a) {
+; CHECK: f5:
+; CHECK: ldgr %f0, %r2
+ %res = bitcast i64 %a to double
+ ret double %res
+}
+
+; Test 128-bit moves from GPRs to FPRs. i128 isn't a legitimate type,
+; so this goes through memory.
+define void @f6(fp128 *%a, i128 *%b) {
+; CHECK: f6:
+; CHECK: lg
+; CHECK: lg
+; CHECK: stg
+; CHECK: stg
+ %val = load i128 *%b
+ %res = bitcast i128 %val to fp128
+ store fp128 %res, fp128 *%a
+ ret void
+}
+
+; Test 32-bit moves from FPRs to GPRs. The high 32 bits of the FPR should
+; be moved into the low 32 bits of the GPR.
+define i32 @f7(float %a) {
+; CHECK: f7:
+; CHECK: lgdr [[REGISTER:%r[0-5]]], %f0
+; CHECK: srlg %r2, [[REGISTER]], 32
+ %res = bitcast float %a to i32
+ ret i32 %res
+}
+
+; Test 64-bit moves from FPRs to GPRs.
+define i64 @f8(double %a) {
+; CHECK: f8:
+; CHECK: lgdr %r2, %f0
+ %res = bitcast double %a to i64
+ ret i64 %res
+}
+
+; Test 128-bit moves from FPRs to GPRs, with the same restriction as f6.
+define void @f9(fp128 *%a, i128 *%b) {
+; CHECK: f9:
+; CHECK: ld
+; CHECK: ld
+; CHECK: std
+; CHECK: std
+ %val = load fp128 *%a
+ %res = bitcast fp128 %val to i128
+ store i128 %res, i128 *%b
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/fp-move-03.ll b/test/CodeGen/SystemZ/fp-move-03.ll
new file mode 100644
index 000000000000..37dbdfad7b87
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-03.ll
@@ -0,0 +1,110 @@
+; Test 32-bit floating-point loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test the low end of the LE range.
+define float @f1(float *%src) {
+; CHECK: f1:
+; CHECK: le %f0, 0(%r2)
+; CHECK: br %r14
+ %val = load float *%src
+ ret float %val
+}
+
+; Test the high end of the LE range.
+define float @f2(float *%src) {
+; CHECK: f2:
+; CHECK: le %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 1023
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check the next word up, which should use LEY instead of LE.
+define float @f3(float *%src) {
+; CHECK: f3:
+; CHECK: ley %f0, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 1024
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check the high end of the aligned LEY range.
+define float @f4(float *%src) {
+; CHECK: f4:
+; CHECK: ley %f0, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 131071
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f5(float *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: le %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 131072
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check the high end of the negative aligned LEY range.
+define float @f6(float *%src) {
+; CHECK: f6:
+; CHECK: ley %f0, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 -1
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check the low end of the LEY range.
+define float @f7(float *%src) {
+; CHECK: f7:
+; CHECK: ley %f0, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 -131072
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f8(float *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524292
+; CHECK: le %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 -131073
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check that LE allows an index.
+define float @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: le %f0, 4092({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to float *
+ %val = load float *%ptr
+ ret float %val
+}
+
+; Check that LEY allows an index.
+define float @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: ley %f0, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to float *
+ %val = load float *%ptr
+ ret float %val
+}
diff --git a/test/CodeGen/SystemZ/fp-move-04.ll b/test/CodeGen/SystemZ/fp-move-04.ll
new file mode 100644
index 000000000000..72e90d1fffd1
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-04.ll
@@ -0,0 +1,110 @@
+; Test 64-bit floating-point loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test the low end of the LD range.
+define double @f1(double *%src) {
+; CHECK: f1:
+; CHECK: ld %f0, 0(%r2)
+; CHECK: br %r14
+ %val = load double *%src
+ ret double %val
+}
+
+; Test the high end of the LD range.
+define double @f2(double *%src) {
+; CHECK: f2:
+; CHECK: ld %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 511
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check the next doubleword up, which should use LDY instead of LD.
+define double @f3(double *%src) {
+; CHECK: f3:
+; CHECK: ldy %f0, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 512
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check the high end of the aligned LDY range.
+define double @f4(double *%src) {
+; CHECK: f4:
+; CHECK: ldy %f0, 524280(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 65535
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f5(double *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: ld %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 65536
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check the high end of the negative aligned LDY range.
+define double @f6(double *%src) {
+; CHECK: f6:
+; CHECK: ldy %f0, -8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 -1
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check the low end of the LDY range.
+define double @f7(double *%src) {
+; CHECK: f7:
+; CHECK: ldy %f0, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 -65536
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f8(double *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524296
+; CHECK: ld %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 -65537
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check that LD allows an index.
+define double @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: ld %f0, 4095({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to double *
+ %val = load double *%ptr
+ ret double %val
+}
+
+; Check that LDY allows an index.
+define double @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: ldy %f0, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to double *
+ %val = load double *%ptr
+ ret double %val
+}
diff --git a/test/CodeGen/SystemZ/fp-move-05.ll b/test/CodeGen/SystemZ/fp-move-05.ll
new file mode 100644
index 000000000000..66ad048fbed7
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-05.ll
@@ -0,0 +1,151 @@
+; Test 128-bit floating-point loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check loads with no offset.
+define double @f1(i64 %src) {
+; CHECK: f1:
+; CHECK: ld %f0, 0(%r2)
+; CHECK: ld %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = inttoptr i64 %src to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the highest aligned offset that allows LD for both halves.
+define double @f2(i64 %src) {
+; CHECK: f2:
+; CHECK: ld %f0, 4080(%r2)
+; CHECK: ld %f2, 4088(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 4080
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the next doubleword up, which requires a mixture of LD and LDY.
+define double @f3(i64 %src) {
+; CHECK: f3:
+; CHECK: ld %f0, 4088(%r2)
+; CHECK: ldy %f2, 4096(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 4088
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the next doubleword after that, which requires LDY for both halves.
+define double @f4(i64 %src) {
+; CHECK: f4:
+; CHECK: ldy %f0, 4096(%r2)
+; CHECK: ldy %f2, 4104(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 4096
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the highest aligned offset that allows LDY for both halves.
+define double @f5(i64 %src) {
+; CHECK: f5:
+; CHECK: ldy %f0, 524272(%r2)
+; CHECK: ldy %f2, 524280(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 524272
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the next doubleword up, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define double @f6(i64 %src) {
+; CHECK: f6:
+; CHECK: lay %r1, 524280(%r2)
+; CHECK: ld %f0, 0(%r1)
+; CHECK: ld %f2, 8(%r1)
+; CHECK: br %r14
+ %add = add i64 %src, 524280
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the highest aligned negative offset, which needs a combination of
+; LDY and LD.
+define double @f7(i64 %src) {
+; CHECK: f7:
+; CHECK: ldy %f0, -8(%r2)
+; CHECK: ld %f2, 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -8
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the next doubleword down, which requires LDY for both halves.
+define double @f8(i64 %src) {
+; CHECK: f8:
+; CHECK: ldy %f0, -16(%r2)
+; CHECK: ldy %f2, -8(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -16
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the lowest offset that allows LDY for both halves.
+define double @f9(i64 %src) {
+; CHECK: f9:
+; CHECK: ldy %f0, -524288(%r2)
+; CHECK: ldy %f2, -524280(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -524288
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check the next doubleword down, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define double @f10(i64 %src) {
+; CHECK: f10:
+; CHECK: agfi %r2, -524296
+; CHECK: ld %f0, 0(%r2)
+; CHECK: ld %f2, 8(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -524296
+ %ptr = inttoptr i64 %add to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
+
+; Check that indices are allowed.
+define double @f11(i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: ld %f0, 4088({{%r2,%r3|%r3,%r2}})
+; CHECK: ldy %f2, 4096({{%r2,%r3|%r3,%r2}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4088
+ %ptr = inttoptr i64 %add2 to fp128 *
+ %val = load fp128 *%ptr
+ %trunc = fptrunc fp128 %val to double
+ ret double %trunc
+}
diff --git a/test/CodeGen/SystemZ/fp-move-06.ll b/test/CodeGen/SystemZ/fp-move-06.ll
new file mode 100644
index 000000000000..b660c2ac223d
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-06.ll
@@ -0,0 +1,110 @@
+; Test 32-bit floating-point stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test the low end of the STE range.
+define void @f1(float *%ptr, float %val) {
+; CHECK: f1:
+; CHECK: ste %f0, 0(%r2)
+; CHECK: br %r14
+ store float %val, float *%ptr
+ ret void
+}
+
+; Test the high end of the STE range.
+define void @f2(float *%src, float %val) {
+; CHECK: f2:
+; CHECK: ste %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 1023
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check the next word up, which should use STEY instead of STE.
+define void @f3(float *%src, float %val) {
+; CHECK: f3:
+; CHECK: stey %f0, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 1024
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check the high end of the aligned STEY range.
+define void @f4(float *%src, float %val) {
+; CHECK: f4:
+; CHECK: stey %f0, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 131071
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f5(float *%src, float %val) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: ste %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 131072
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STEY range.
+define void @f6(float *%src, float %val) {
+; CHECK: f6:
+; CHECK: stey %f0, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 -1
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check the low end of the STEY range.
+define void @f7(float *%src, float %val) {
+; CHECK: f7:
+; CHECK: stey %f0, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 -131072
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(float *%src, float %val) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524292
+; CHECK: ste %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%src, i64 -131073
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check that STE allows an index.
+define void @f9(i64 %src, i64 %index, float %val) {
+; CHECK: f9:
+; CHECK: ste %f0, 4092({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to float *
+ store float %val, float *%ptr
+ ret void
+}
+
+; Check that STEY allows an index.
+define void @f10(i64 %src, i64 %index, float %val) {
+; CHECK: f10:
+; CHECK: stey %f0, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to float *
+ store float %val, float *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-move-07.ll b/test/CodeGen/SystemZ/fp-move-07.ll
new file mode 100644
index 000000000000..0cb0474157d0
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-07.ll
@@ -0,0 +1,110 @@
+; Test 64-bit floating-point stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test the low end of the STD range.
+define void @f1(double *%src, double %val) {
+; CHECK: f1:
+; CHECK: std %f0, 0(%r2)
+; CHECK: br %r14
+ store double %val, double *%src
+ ret void
+}
+
+; Test the high end of the STD range.
+define void @f2(double *%src, double %val) {
+; CHECK: f2:
+; CHECK: std %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 511
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which should use STDY instead of STD.
+define void @f3(double *%src, double %val) {
+; CHECK: f3:
+; CHECK: stdy %f0, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 512
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check the high end of the aligned STDY range.
+define void @f4(double *%src, double %val) {
+; CHECK: f4:
+; CHECK: stdy %f0, 524280(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 65535
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f5(double *%src, double %val) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: std %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 65536
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STDY range.
+define void @f6(double *%src, double %val) {
+; CHECK: f6:
+; CHECK: stdy %f0, -8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 -1
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check the low end of the STDY range.
+define void @f7(double *%src, double %val) {
+; CHECK: f7:
+; CHECK: stdy %f0, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 -65536
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(double *%src, double %val) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524296
+; CHECK: std %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%src, i64 -65537
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check that STD allows an index.
+define void @f9(i64 %src, i64 %index, double %val) {
+; CHECK: f9:
+; CHECK: std %f0, 4095({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to double *
+ store double %val, double *%ptr
+ ret void
+}
+
+; Check that STDY allows an index.
+define void @f10(i64 %src, i64 %index, double %val) {
+; CHECK: f10:
+; CHECK: stdy %f0, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to double *
+ store double %val, double *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-move-08.ll b/test/CodeGen/SystemZ/fp-move-08.ll
new file mode 100644
index 000000000000..448d2ace1762
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-move-08.ll
@@ -0,0 +1,151 @@
+; Test 128-bit floating-point stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check stores with no offset.
+define void @f1(i64 %src, double %val) {
+; CHECK: f1:
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %ptr = inttoptr i64 %src to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the highest aligned offset that allows STD for both halves.
+define void @f2(i64 %src, double %val) {
+; CHECK: f2:
+; CHECK: std %f0, 4080(%r2)
+; CHECK: std %f2, 4088(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 4080
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which requires a mixture of STD and STDY.
+define void @f3(i64 %src, double %val) {
+; CHECK: f3:
+; CHECK: std %f0, 4088(%r2)
+; CHECK: stdy %f2, 4096(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 4088
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the next doubleword after that, which requires STDY for both halves.
+define void @f4(i64 %src, double %val) {
+; CHECK: f4:
+; CHECK: stdy %f0, 4096(%r2)
+; CHECK: stdy %f2, 4104(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 4096
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the highest aligned offset that allows STDY for both halves.
+define void @f5(i64 %src, double %val) {
+; CHECK: f5:
+; CHECK: stdy %f0, 524272(%r2)
+; CHECK: stdy %f2, 524280(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, 524272
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i64 %src, double %val) {
+; CHECK: f6:
+; CHECK: lay %r1, 524280(%r2)
+; CHECK: std %f0, 0(%r1)
+; CHECK: std %f2, 8(%r1)
+; CHECK: br %r14
+ %add = add i64 %src, 524280
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the highest aligned negative offset, which needs a combination of
+; STDY and STD.
+define void @f7(i64 %src, double %val) {
+; CHECK: f7:
+; CHECK: stdy %f0, -8(%r2)
+; CHECK: std %f2, 0(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -8
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the next doubleword down, which requires STDY for both halves.
+define void @f8(i64 %src, double %val) {
+; CHECK: f8:
+; CHECK: stdy %f0, -16(%r2)
+; CHECK: stdy %f2, -8(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -16
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the lowest offset that allows STDY for both halves.
+define void @f9(i64 %src, double %val) {
+; CHECK: f9:
+; CHECK: stdy %f0, -524288(%r2)
+; CHECK: stdy %f2, -524280(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -524288
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check the next doubleword down, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define void @f10(i64 %src, double %val) {
+; CHECK: f10:
+; CHECK: agfi %r2, -524296
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %add = add i64 %src, -524296
+ %ptr = inttoptr i64 %add to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
+
+; Check that indices are allowed.
+define void @f11(i64 %src, i64 %index, double %val) {
+; CHECK: f11:
+; CHECK: std %f0, 4088({{%r2,%r3|%r3,%r2}})
+; CHECK: stdy %f2, 4096({{%r2,%r3|%r3,%r2}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4088
+ %ptr = inttoptr i64 %add2 to fp128 *
+ %ext = fpext double %val to fp128
+ store fp128 %ext, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-01.ll b/test/CodeGen/SystemZ/fp-mul-01.ll
new file mode 100644
index 000000000000..68c78ee2da6b
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-01.ll
@@ -0,0 +1,71 @@
+; Test multiplication of two f32s, producing an f32 result.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register multiplication.
+define float @f1(float %f1, float %f2) {
+; CHECK: f1:
+; CHECK: meebr %f0, %f2
+; CHECK: br %r14
+ %res = fmul float %f1, %f2
+ ret float %res
+}
+
+; Check the low end of the MEEB range.
+define float @f2(float %f1, float *%ptr) {
+; CHECK: f2:
+; CHECK: meeb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %res = fmul float %f1, %f2
+ ret float %res
+}
+
+; Check the high end of the aligned MEEB range.
+define float @f3(float %f1, float *%base) {
+; CHECK: f3:
+; CHECK: meeb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %res = fmul float %f1, %f2
+ ret float %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f4(float %f1, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: meeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %res = fmul float %f1, %f2
+ ret float %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define float @f5(float %f1, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: meeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %res = fmul float %f1, %f2
+ ret float %res
+}
+
+; Check that MEEB allows indices.
+define float @f6(float %f1, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: meeb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %f2 = load float *%ptr2
+ %res = fmul float %f1, %f2
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-02.ll b/test/CodeGen/SystemZ/fp-mul-02.ll
new file mode 100644
index 000000000000..ec51a4c1d679
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-02.ll
@@ -0,0 +1,83 @@
+; Test multiplication of two f32s, producing an f64 result.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register multiplication.
+define double @f1(float %f1, float %f2) {
+; CHECK: f1:
+; CHECK: mdebr %f0, %f2
+; CHECK: br %r14
+ %f1x = fpext float %f1 to double
+ %f2x = fpext float %f2 to double
+ %res = fmul double %f1x, %f2x
+ ret double %res
+}
+
+; Check the low end of the MDEB range.
+define double @f2(float %f1, float *%ptr) {
+; CHECK: f2:
+; CHECK: mdeb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %f1x = fpext float %f1 to double
+ %f2x = fpext float %f2 to double
+ %res = fmul double %f1x, %f2x
+ ret double %res
+}
+
+; Check the high end of the aligned MDEB range.
+define double @f3(float %f1, float *%base) {
+; CHECK: f3:
+; CHECK: mdeb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %f1x = fpext float %f1 to double
+ %f2x = fpext float %f2 to double
+ %res = fmul double %f1x, %f2x
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(float %f1, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: mdeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %f1x = fpext float %f1 to double
+ %f2x = fpext float %f2 to double
+ %res = fmul double %f1x, %f2x
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(float %f1, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: mdeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %f1x = fpext float %f1 to double
+ %f2x = fpext float %f2 to double
+ %res = fmul double %f1x, %f2x
+ ret double %res
+}
+
+; Check that MDEB allows indices.
+define double @f6(float %f1, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: mdeb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %f2 = load float *%ptr2
+ %f1x = fpext float %f1 to double
+ %f2x = fpext float %f2 to double
+ %res = fmul double %f1x, %f2x
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-03.ll b/test/CodeGen/SystemZ/fp-mul-03.ll
new file mode 100644
index 000000000000..9849247deccb
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-03.ll
@@ -0,0 +1,71 @@
+; Test multiplication of two f64s, producing an f64 result.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register multiplication.
+define double @f1(double %f1, double %f2) {
+; CHECK: f1:
+; CHECK: mdbr %f0, %f2
+; CHECK: br %r14
+ %res = fmul double %f1, %f2
+ ret double %res
+}
+
+; Check the low end of the MDB range.
+define double @f2(double %f1, double *%ptr) {
+; CHECK: f2:
+; CHECK: mdb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %res = fmul double %f1, %f2
+ ret double %res
+}
+
+; Check the high end of the aligned MDB range.
+define double @f3(double %f1, double *%base) {
+; CHECK: f3:
+; CHECK: mdb %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %res = fmul double %f1, %f2
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double %f1, double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: mdb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %res = fmul double %f1, %f2
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(double %f1, double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: mdb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %res = fmul double %f1, %f2
+ ret double %res
+}
+
+; Check that MDB allows indices.
+define double @f6(double %f1, double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: mdb %f0, 800(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %f2 = load double *%ptr2
+ %res = fmul double %f1, %f2
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-04.ll b/test/CodeGen/SystemZ/fp-mul-04.ll
new file mode 100644
index 000000000000..712ead85cbd4
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-04.ll
@@ -0,0 +1,103 @@
+; Test multiplication of two f64s, producing an f128 result.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register multiplication. "mxdbr %f0, %f2" is not valid from LLVM's
+; point of view, because %f2 is the low register of the FP128 %f0. Pass the
+; multiplier in %f4 instead.
+define void @f1(double %f1, double %dummy, double %f2, fp128 *%dst) {
+; CHECK: f1:
+; CHECK: mxdbr %f0, %f4
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the low end of the MXDB range.
+define void @f2(double %f1, double *%ptr, fp128 *%dst) {
+; CHECK: f2:
+; CHECK: mxdb %f0, 0(%r2)
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the high end of the aligned MXDB range.
+define void @f3(double %f1, double *%base, fp128 *%dst) {
+; CHECK: f3:
+; CHECK: mxdb %f0, 4088(%r2)
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f4(double %f1, double *%base, fp128 *%dst) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: mxdb %f0, 0(%r2)
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check negative displacements, which also need separate address logic.
+define void @f5(double %f1, double *%base, fp128 *%dst) {
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: mxdb %f0, 0(%r2)
+; CHECK: std %f0, 0(%r3)
+; CHECK: std %f2, 8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
+
+; Check that MXDB allows indices.
+define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: mxdb %f0, 800(%r1,%r2)
+; CHECK: std %f0, 0(%r4)
+; CHECK: std %f2, 8(%r4)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %f2 = load double *%ptr2
+ %f1x = fpext double %f1 to fp128
+ %f2x = fpext double %f2 to fp128
+ %res = fmul fp128 %f1x, %f2x
+ store fp128 %res, fp128 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-05.ll b/test/CodeGen/SystemZ/fp-mul-05.ll
new file mode 100644
index 000000000000..df5bc4e70755
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-05.ll
@@ -0,0 +1,20 @@
+; Test multiplication of two f128s.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; There is no memory form of 128-bit multiplication.
+define void @f1(fp128 *%ptr, float %f2) {
+; CHECK: f1:
+; CHECK: lxebr %f0, %f0
+; CHECK: ld %f1, 0(%r2)
+; CHECK: ld %f3, 8(%r2)
+; CHECK: mxbr %f1, %f0
+; CHECK: std %f1, 0(%r2)
+; CHECK: std %f3, 8(%r2)
+; CHECK: br %r14
+ %f1 = load fp128 *%ptr
+ %f2x = fpext float %f2 to fp128
+ %diff = fmul fp128 %f1, %f2x
+ store fp128 %diff, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-06.ll b/test/CodeGen/SystemZ/fp-mul-06.ll
new file mode 100644
index 000000000000..8124c680371d
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-06.ll
@@ -0,0 +1,102 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
+
+define float @f1(float %f1, float %f2, float %acc) {
+; CHECK: f1:
+; CHECK: maebr %f4, %f0, %f2
+; CHECK: ler %f0, %f4
+; CHECK: br %r14
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f2(float %f1, float *%ptr, float %acc) {
+; CHECK: f2:
+; CHECK: maeb %f2, %f0, 0(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f3(float %f1, float *%base, float %acc) {
+; CHECK: f3:
+; CHECK: maeb %f2, %f0, 4092(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f4(float %f1, float *%base, float %acc) {
+; The important thing here is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: maeb %f2, %f0, 0(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f5(float %f1, float *%base, float %acc) {
+; Here too the important thing is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: maeb %f2, %f0, 0(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f6(float %f1, float *%base, i64 %index, float %acc) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: maeb %f2, %f0, 0(%r1,%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 %index
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f7(float %f1, float *%base, i64 %index, float %acc) {
+; CHECK: f7:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: maeb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 1023
+ %ptr = getelementptr float *%base, i64 %index2
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
+
+define float @f8(float %f1, float *%base, i64 %index, float %acc) {
+; CHECK: f8:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
+; CHECK: maeb %f2, %f0, 0(%r1)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 1024
+ %ptr = getelementptr float *%base, i64 %index2
+ %f2 = load float *%ptr
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-07.ll b/test/CodeGen/SystemZ/fp-mul-07.ll
new file mode 100644
index 000000000000..b8e44830f331
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-07.ll
@@ -0,0 +1,102 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare double @llvm.fma.f64(double %f1, double %f2, double %f3)
+
+define double @f1(double %f1, double %f2, double %acc) {
+; CHECK: f1:
+; CHECK: madbr %f4, %f0, %f2
+; CHECK: ldr %f0, %f4
+; CHECK: br %r14
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f2(double %f1, double *%ptr, double %acc) {
+; CHECK: f2:
+; CHECK: madb %f2, %f0, 0(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f3(double %f1, double *%base, double %acc) {
+; CHECK: f3:
+; CHECK: madb %f2, %f0, 4088(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f4(double %f1, double *%base, double %acc) {
+; The important thing here is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: madb %f2, %f0, 0(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f5(double %f1, double *%base, double %acc) {
+; Here too the important thing is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: madb %f2, %f0, 0(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f6(double %f1, double *%base, i64 %index, double %acc) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: madb %f2, %f0, 0(%r1,%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 %index
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f7(double %f1, double *%base, i64 %index, double %acc) {
+; CHECK: f7:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: madb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 511
+ %ptr = getelementptr double *%base, i64 %index2
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
+
+define double @f8(double %f1, double *%base, i64 %index, double %acc) {
+; CHECK: f8:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
+; CHECK: madb %f2, %f0, 0(%r1)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 512
+ %ptr = getelementptr double *%base, i64 %index2
+ %f2 = load double *%ptr
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-08.ll b/test/CodeGen/SystemZ/fp-mul-08.ll
new file mode 100644
index 000000000000..5c1474063a16
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-08.ll
@@ -0,0 +1,110 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare float @llvm.fma.f32(float %f1, float %f2, float %f3)
+
+define float @f1(float %f1, float %f2, float %acc) {
+; CHECK: f1:
+; CHECK: msebr %f4, %f0, %f2
+; CHECK: ler %f0, %f4
+; CHECK: br %r14
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f2(float %f1, float *%ptr, float %acc) {
+; CHECK: f2:
+; CHECK: mseb %f2, %f0, 0(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f3(float %f1, float *%base, float %acc) {
+; CHECK: f3:
+; CHECK: mseb %f2, %f0, 4092(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f4(float %f1, float *%base, float %acc) {
+; The important thing here is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: mseb %f2, %f0, 0(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f5(float %f1, float *%base, float %acc) {
+; Here too the important thing is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: mseb %f2, %f0, 0(%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f6(float %f1, float *%base, i64 %index, float %acc) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: mseb %f2, %f0, 0(%r1,%r2)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 %index
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f7(float %f1, float *%base, i64 %index, float %acc) {
+; CHECK: f7:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: mseb %f2, %f0, 4092({{%r1,%r2|%r2,%r1}})
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 1023
+ %ptr = getelementptr float *%base, i64 %index2
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
+
+define float @f8(float %f1, float *%base, i64 %index, float %acc) {
+; CHECK: f8:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
+; CHECK: mseb %f2, %f0, 0(%r1)
+; CHECK: ler %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 1024
+ %ptr = getelementptr float *%base, i64 %index2
+ %f2 = load float *%ptr
+ %negacc = fsub float -0.0, %acc
+ %res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-mul-09.ll b/test/CodeGen/SystemZ/fp-mul-09.ll
new file mode 100644
index 000000000000..bcae1e35e6eb
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-mul-09.ll
@@ -0,0 +1,110 @@
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare double @llvm.fma.f64(double %f1, double %f2, double %f3)
+
+define double @f1(double %f1, double %f2, double %acc) {
+; CHECK: f1:
+; CHECK: msdbr %f4, %f0, %f2
+; CHECK: ldr %f0, %f4
+; CHECK: br %r14
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f2(double %f1, double *%ptr, double %acc) {
+; CHECK: f2:
+; CHECK: msdb %f2, %f0, 0(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f3(double %f1, double *%base, double %acc) {
+; CHECK: f3:
+; CHECK: msdb %f2, %f0, 4088(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f4(double %f1, double *%base, double %acc) {
+; The important thing here is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: msdb %f2, %f0, 0(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f5(double %f1, double *%base, double %acc) {
+; Here too the important thing is that we don't generate an out-of-range
+; displacement. Other sequences besides this one would be OK.
+;
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: msdb %f2, %f0, 0(%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f6(double %f1, double *%base, i64 %index, double %acc) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: msdb %f2, %f0, 0(%r1,%r2)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 %index
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f7(double %f1, double *%base, i64 %index, double %acc) {
+; CHECK: f7:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: msdb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 511
+ %ptr = getelementptr double *%base, i64 %index2
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
+
+define double @f8(double %f1, double *%base, i64 %index, double %acc) {
+; CHECK: f8:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
+; CHECK: msdb %f2, %f0, 0(%r1)
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %index2 = add i64 %index, 512
+ %ptr = getelementptr double *%base, i64 %index2
+ %f2 = load double *%ptr
+ %negacc = fsub double -0.0, %acc
+ %res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-neg-01.ll b/test/CodeGen/SystemZ/fp-neg-01.ll
new file mode 100644
index 000000000000..09a4a53e41d1
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-neg-01.ll
@@ -0,0 +1,38 @@
+; Test floating-point negation.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32.
+define float @f1(float %f) {
+; CHECK: f1:
+; CHECK: lcebr %f0, %f0
+; CHECK: br %r14
+ %res = fsub float -0.0, %f
+ ret float %res
+}
+
+; Test f64.
+define double @f2(double %f) {
+; CHECK: f2:
+; CHECK: lcdbr %f0, %f0
+; CHECK: br %r14
+ %res = fsub double -0.0, %f
+ ret double %res
+}
+
+; Test f128. With the loads and stores, a pure negation would probably
+; be better implemented using an XI on the upper byte. Do some extra
+; processing so that using FPRs is unequivocally better.
+define void @f3(fp128 *%ptr, fp128 *%ptr2) {
+; CHECK: f3:
+; CHECK: lcxbr
+; CHECK: dxbr
+; CHECK: br %r14
+ %orig = load fp128 *%ptr
+ %negzero = fpext float -0.0 to fp128
+ %neg = fsub fp128 0xL00000000000000008000000000000000, %orig
+ %op2 = load fp128 *%ptr2
+ %res = fdiv fp128 %neg, %op2
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-round-01.ll b/test/CodeGen/SystemZ/fp-round-01.ll
new file mode 100644
index 000000000000..20325c336648
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-round-01.ll
@@ -0,0 +1,36 @@
+; Test rint()-like rounding, with non-integer values triggering an
+; inexact condition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test f32.
+declare float @llvm.rint.f32(float %f)
+define float @f1(float %f) {
+; CHECK: f1:
+; CHECK: fiebr %f0, 0, %f0
+; CHECK: br %r14
+ %res = call float @llvm.rint.f32(float %f)
+ ret float %res
+}
+
+; Test f64.
+declare double @llvm.rint.f64(double %f)
+define double @f2(double %f) {
+; CHECK: f2:
+; CHECK: fidbr %f0, 0, %f0
+; CHECK: br %r14
+ %res = call double @llvm.rint.f64(double %f)
+ ret double %res
+}
+
+; Test f128.
+declare fp128 @llvm.rint.f128(fp128 %f)
+define void @f3(fp128 *%ptr) {
+; CHECK: f3:
+; CHECK: fixbr %f0, 0, %f0
+; CHECK: br %r14
+ %src = load fp128 *%ptr
+ %res = call fp128 @llvm.rint.f128(fp128 %src)
+ store fp128 %res, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-sqrt-01.ll b/test/CodeGen/SystemZ/fp-sqrt-01.ll
new file mode 100644
index 000000000000..7ed27f56d0d0
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sqrt-01.ll
@@ -0,0 +1,73 @@
+; Test 32-bit square root.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare float @llvm.sqrt.f32(float %f)
+
+; Check register square root.
+define float @f1(float %val) {
+; CHECK: f1:
+; CHECK: sqebr %f0, %f0
+; CHECK: br %r14
+ %res = call float @llvm.sqrt.f32(float %val)
+ ret float %res
+}
+
+; Check the low end of the SQEB range.
+define float @f2(float *%ptr) {
+; CHECK: f2:
+; CHECK: sqeb %f0, 0(%r2)
+; CHECK: br %r14
+ %val = load float *%ptr
+ %res = call float @llvm.sqrt.f32(float %val)
+ ret float %res
+}
+
+; Check the high end of the aligned SQEB range.
+define float @f3(float *%base) {
+; CHECK: f3:
+; CHECK: sqeb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %val = load float *%ptr
+ %res = call float @llvm.sqrt.f32(float %val)
+ ret float %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f4(float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: sqeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %val = load float *%ptr
+ %res = call float @llvm.sqrt.f32(float %val)
+ ret float %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define float @f5(float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: sqeb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %val = load float *%ptr
+ %res = call float @llvm.sqrt.f32(float %val)
+ ret float %res
+}
+
+; Check that SQEB allows indices.
+define float @f6(float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: sqeb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %val = load float *%ptr2
+ %res = call float @llvm.sqrt.f32(float %val)
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-sqrt-02.ll b/test/CodeGen/SystemZ/fp-sqrt-02.ll
new file mode 100644
index 000000000000..22a91ad2f4f7
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sqrt-02.ll
@@ -0,0 +1,73 @@
+; Test 64-bit square root.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare double @llvm.sqrt.f64(double %f)
+
+; Check register square root.
+define double @f1(double %val) {
+; CHECK: f1:
+; CHECK: sqdbr %f0, %f0
+; CHECK: br %r14
+ %res = call double @llvm.sqrt.f64(double %val)
+ ret double %res
+}
+
+; Check the low end of the SQDB range.
+define double @f2(double *%ptr) {
+; CHECK: f2:
+; CHECK: sqdb %f0, 0(%r2)
+; CHECK: br %r14
+ %val = load double *%ptr
+ %res = call double @llvm.sqrt.f64(double %val)
+ ret double %res
+}
+
+; Check the high end of the aligned SQDB range.
+define double @f3(double *%base) {
+; CHECK: f3:
+; CHECK: sqdb %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %val = load double *%ptr
+ %res = call double @llvm.sqrt.f64(double %val)
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: sqdb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %val = load double *%ptr
+ %res = call double @llvm.sqrt.f64(double %val)
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: sqdb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %val = load double *%ptr
+ %res = call double @llvm.sqrt.f64(double %val)
+ ret double %res
+}
+
+; Check that SQDB allows indices.
+define double @f6(double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: sqdb %f0, 800(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %val = load double *%ptr2
+ %res = call double @llvm.sqrt.f64(double %val)
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-sqrt-03.ll b/test/CodeGen/SystemZ/fp-sqrt-03.ll
new file mode 100644
index 000000000000..1b49af41254f
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sqrt-03.ll
@@ -0,0 +1,20 @@
+; Test 128-bit square root.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare fp128 @llvm.sqrt.f128(fp128 %f)
+
+; There's no memory form of SQXBR.
+define void @f1(fp128 *%ptr) {
+; CHECK: f1:
+; CHECK: ld %f0, 0(%r2)
+; CHECK: ld %f2, 8(%r2)
+; CHECK: sqxbr %f0, %f0
+; CHECK: std %f0, 0(%r2)
+; CHECK: std %f2, 8(%r2)
+; CHECK: br %r14
+ %orig = load fp128 *%ptr
+ %sqrt = call fp128 @llvm.sqrt.f128(fp128 %orig)
+ store fp128 %sqrt, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/fp-sub-01.ll b/test/CodeGen/SystemZ/fp-sub-01.ll
new file mode 100644
index 000000000000..b03f04bd017e
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sub-01.ll
@@ -0,0 +1,71 @@
+; Test 32-bit floating-point subtraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register subtraction.
+define float @f1(float %f1, float %f2) {
+; CHECK: f1:
+; CHECK: sebr %f0, %f2
+; CHECK: br %r14
+ %res = fsub float %f1, %f2
+ ret float %res
+}
+
+; Check the low end of the SEB range.
+define float @f2(float %f1, float *%ptr) {
+; CHECK: f2:
+; CHECK: seb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load float *%ptr
+ %res = fsub float %f1, %f2
+ ret float %res
+}
+
+; Check the high end of the aligned SEB range.
+define float @f3(float %f1, float *%base) {
+; CHECK: f3:
+; CHECK: seb %f0, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1023
+ %f2 = load float *%ptr
+ %res = fsub float %f1, %f2
+ ret float %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define float @f4(float %f1, float *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: seb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 1024
+ %f2 = load float *%ptr
+ %res = fsub float %f1, %f2
+ ret float %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define float @f5(float %f1, float *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -4
+; CHECK: seb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float *%base, i64 -1
+ %f2 = load float *%ptr
+ %res = fsub float %f1, %f2
+ ret float %res
+}
+
+; Check that SEB allows indices.
+define float @f6(float %f1, float *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 2
+; CHECK: seb %f0, 400(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr float *%base, i64 %index
+ %ptr2 = getelementptr float *%ptr1, i64 100
+ %f2 = load float *%ptr2
+ %res = fsub float %f1, %f2
+ ret float %res
+}
diff --git a/test/CodeGen/SystemZ/fp-sub-02.ll b/test/CodeGen/SystemZ/fp-sub-02.ll
new file mode 100644
index 000000000000..bf9848c2fd51
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sub-02.ll
@@ -0,0 +1,71 @@
+; Test 64-bit floating-point subtraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register subtraction.
+define double @f1(double %f1, double %f2) {
+; CHECK: f1:
+; CHECK: sdbr %f0, %f2
+; CHECK: br %r14
+ %res = fsub double %f1, %f2
+ ret double %res
+}
+
+; Check the low end of the SDB range.
+define double @f2(double %f1, double *%ptr) {
+; CHECK: f2:
+; CHECK: sdb %f0, 0(%r2)
+; CHECK: br %r14
+ %f2 = load double *%ptr
+ %res = fsub double %f1, %f2
+ ret double %res
+}
+
+; Check the high end of the aligned SDB range.
+define double @f3(double %f1, double *%base) {
+; CHECK: f3:
+; CHECK: sdb %f0, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 511
+ %f2 = load double *%ptr
+ %res = fsub double %f1, %f2
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double %f1, double *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: sdb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 512
+ %f2 = load double *%ptr
+ %res = fsub double %f1, %f2
+ ret double %res
+}
+
+; Check negative displacements, which also need separate address logic.
+define double @f5(double %f1, double *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -8
+; CHECK: sdb %f0, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr double *%base, i64 -1
+ %f2 = load double *%ptr
+ %res = fsub double %f1, %f2
+ ret double %res
+}
+
+; Check that SDB allows indices.
+define double @f6(double %f1, double *%base, i64 %index) {
+; CHECK: f6:
+; CHECK: sllg %r1, %r3, 3
+; CHECK: sdb %f0, 800(%r1,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr double *%base, i64 %index
+ %ptr2 = getelementptr double *%ptr1, i64 100
+ %f2 = load double *%ptr2
+ %res = fsub double %f1, %f2
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/fp-sub-03.ll b/test/CodeGen/SystemZ/fp-sub-03.ll
new file mode 100644
index 000000000000..82bb94dd28b3
--- /dev/null
+++ b/test/CodeGen/SystemZ/fp-sub-03.ll
@@ -0,0 +1,20 @@
+; Test 128-bit floating-point subtraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; There is no memory form of 128-bit subtraction.
+define void @f1(fp128 *%ptr, float %f2) {
+; CHECK: f1:
+; CHECK: lxebr %f0, %f0
+; CHECK: ld %f1, 0(%r2)
+; CHECK: ld %f3, 8(%r2)
+; CHECK: sxbr %f1, %f0
+; CHECK: std %f1, 0(%r2)
+; CHECK: std %f3, 8(%r2)
+; CHECK: br %r14
+ %f1 = load fp128 *%ptr
+ %f2x = fpext float %f2 to fp128
+ %sum = fsub fp128 %f1, %f2x
+ store fp128 %sum, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-01.ll b/test/CodeGen/SystemZ/frame-01.ll
new file mode 100644
index 000000000000..0d343128c4cd
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-01.ll
@@ -0,0 +1,110 @@
+; Test the allocation of frames in cases where we do not need to save
+; registers in the prologue.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; The CFA offset is 160 (the caller-allocated part of the frame) + 168.
+define void @f1(i64 %x) {
+; CHECK: f1:
+; CHECK: aghi %r15, -168
+; CHECK: .cfi_def_cfa_offset 328
+; CHECK: stg %r2, 160(%r15)
+; CHECK: aghi %r15, 168
+; CHECK: br %r14
+ %y = alloca i64, align 8
+ store volatile i64 %x, i64* %y
+ ret void
+}
+
+; Check frames of size 32760, which is the largest size that can be both
+; allocated and freed using AGHI. This size is big enough to require
+; an emergency spill slot at 160(%r15), for instructions with unsigned
+; 12-bit offsets that end up being out of range. Fill the remaining
+; 32760 - 168 bytes by allocating (32760 - 168) / 8 = 4074 doublewords.
+define void @f2(i64 %x) {
+; CHECK: f2:
+; CHECK: aghi %r15, -32760
+; CHECK: .cfi_def_cfa_offset 32920
+; CHECK: stg %r2, 168(%r15)
+; CHECK: aghi %r15, 32760
+; CHECK: br %r14
+ %y = alloca [4074 x i64], align 8
+ %ptr = getelementptr inbounds [4074 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; Allocate one more doubleword. This is the one frame size that we can
+; allocate using AGHI but must free using AGFI.
+define void @f3(i64 %x) {
+; CHECK: f3:
+; CHECK: aghi %r15, -32768
+; CHECK: .cfi_def_cfa_offset 32928
+; CHECK: stg %r2, 168(%r15)
+; CHECK: agfi %r15, 32768
+; CHECK: br %r14
+ %y = alloca [4075 x i64], align 8
+ %ptr = getelementptr inbounds [4075 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; Allocate another doubleword on top of that. The allocation and free
+; must both use AGFI.
+define void @f4(i64 %x) {
+; CHECK: f4:
+; CHECK: agfi %r15, -32776
+; CHECK: .cfi_def_cfa_offset 32936
+; CHECK: stg %r2, 168(%r15)
+; CHECK: agfi %r15, 32776
+; CHECK: br %r14
+ %y = alloca [4076 x i64], align 8
+ %ptr = getelementptr inbounds [4076 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; The largest size that can be both allocated and freed using AGFI.
+; At this point the frame is too big to represent properly in the CFI.
+define void @f5(i64 %x) {
+; CHECK: f5:
+; CHECK: agfi %r15, -2147483640
+; CHECK: stg %r2, 168(%r15)
+; CHECK: agfi %r15, 2147483640
+; CHECK: br %r14
+ %y = alloca [268435434 x i64], align 8
+ %ptr = getelementptr inbounds [268435434 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; The only frame size that can be allocated using a single AGFI but which
+; must be freed using two instructions.
+define void @f6(i64 %x) {
+; CHECK: f6:
+; CHECK: agfi %r15, -2147483648
+; CHECK: stg %r2, 168(%r15)
+; CHECK: agfi %r15, 2147483640
+; CHECK: aghi %r15, 8
+; CHECK: br %r14
+ %y = alloca [268435435 x i64], align 8
+ %ptr = getelementptr inbounds [268435435 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; The smallest frame size that needs two instructions to both allocate
+; and free the frame.
+define void @f7(i64 %x) {
+; CHECK: f7:
+; CHECK: agfi %r15, -2147483648
+; CHECK: aghi %r15, -8
+; CHECK: stg %r2, 168(%r15)
+; CHECK: agfi %r15, 2147483640
+; CHECK: aghi %r15, 16
+; CHECK: br %r14
+ %y = alloca [268435436 x i64], align 8
+ %ptr = getelementptr inbounds [268435436 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-02.ll b/test/CodeGen/SystemZ/frame-02.ll
new file mode 100644
index 000000000000..589703ec0e74
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-02.ll
@@ -0,0 +1,257 @@
+; Test saving and restoring of call-saved FPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This function should require all FPRs, but no other spill slots.
+; We need to save and restore 8 of the 16 FPRs, so the frame size
+; should be exactly 160 + 8 * 8 = 224. The CFA offset is 160
+; (the caller-allocated part of the frame) + 224.
+define void @f1(float *%ptr) {
+; CHECK: f1:
+; CHECK: aghi %r15, -224
+; CHECK: .cfi_def_cfa_offset 384
+; CHECK: std %f8, 216(%r15)
+; CHECK: std %f9, 208(%r15)
+; CHECK: std %f10, 200(%r15)
+; CHECK: std %f11, 192(%r15)
+; CHECK: std %f12, 184(%r15)
+; CHECK: std %f13, 176(%r15)
+; CHECK: std %f14, 168(%r15)
+; CHECK: std %f15, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f9, -176
+; CHECK: .cfi_offset %f10, -184
+; CHECK: .cfi_offset %f11, -192
+; CHECK: .cfi_offset %f12, -200
+; CHECK: .cfi_offset %f13, -208
+; CHECK: .cfi_offset %f14, -216
+; CHECK: .cfi_offset %f15, -224
+; ...main function body...
+; CHECK: ld %f8, 216(%r15)
+; CHECK: ld %f9, 208(%r15)
+; CHECK: ld %f10, 200(%r15)
+; CHECK: ld %f11, 192(%r15)
+; CHECK: ld %f12, 184(%r15)
+; CHECK: ld %f13, 176(%r15)
+; CHECK: ld %f14, 168(%r15)
+; CHECK: ld %f15, 160(%r15)
+; CHECK: aghi %r15, 224
+; CHECK: br %r14
+ %l0 = load volatile float *%ptr
+ %l1 = load volatile float *%ptr
+ %l2 = load volatile float *%ptr
+ %l3 = load volatile float *%ptr
+ %l4 = load volatile float *%ptr
+ %l5 = load volatile float *%ptr
+ %l6 = load volatile float *%ptr
+ %l7 = load volatile float *%ptr
+ %l8 = load volatile float *%ptr
+ %l9 = load volatile float *%ptr
+ %l10 = load volatile float *%ptr
+ %l11 = load volatile float *%ptr
+ %l12 = load volatile float *%ptr
+ %l13 = load volatile float *%ptr
+ %l14 = load volatile float *%ptr
+ %l15 = load volatile float *%ptr
+ %add0 = fadd float %l0, %l0
+ %add1 = fadd float %l1, %add0
+ %add2 = fadd float %l2, %add1
+ %add3 = fadd float %l3, %add2
+ %add4 = fadd float %l4, %add3
+ %add5 = fadd float %l5, %add4
+ %add6 = fadd float %l6, %add5
+ %add7 = fadd float %l7, %add6
+ %add8 = fadd float %l8, %add7
+ %add9 = fadd float %l9, %add8
+ %add10 = fadd float %l10, %add9
+ %add11 = fadd float %l11, %add10
+ %add12 = fadd float %l12, %add11
+ %add13 = fadd float %l13, %add12
+ %add14 = fadd float %l14, %add13
+ %add15 = fadd float %l15, %add14
+ store volatile float %add0, float *%ptr
+ store volatile float %add1, float *%ptr
+ store volatile float %add2, float *%ptr
+ store volatile float %add3, float *%ptr
+ store volatile float %add4, float *%ptr
+ store volatile float %add5, float *%ptr
+ store volatile float %add6, float *%ptr
+ store volatile float %add7, float *%ptr
+ store volatile float %add8, float *%ptr
+ store volatile float %add9, float *%ptr
+ store volatile float %add10, float *%ptr
+ store volatile float %add11, float *%ptr
+ store volatile float %add12, float *%ptr
+ store volatile float %add13, float *%ptr
+ store volatile float %add14, float *%ptr
+ store volatile float %add15, float *%ptr
+ ret void
+}
+
+; Like f1, but requires one fewer FPR. We allocate in numerical order,
+; so %f15 is the one that gets dropped.
+define void @f2(float *%ptr) {
+; CHECK: f2:
+; CHECK: aghi %r15, -216
+; CHECK: .cfi_def_cfa_offset 376
+; CHECK: std %f8, 208(%r15)
+; CHECK: std %f9, 200(%r15)
+; CHECK: std %f10, 192(%r15)
+; CHECK: std %f11, 184(%r15)
+; CHECK: std %f12, 176(%r15)
+; CHECK: std %f13, 168(%r15)
+; CHECK: std %f14, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f9, -176
+; CHECK: .cfi_offset %f10, -184
+; CHECK: .cfi_offset %f11, -192
+; CHECK: .cfi_offset %f12, -200
+; CHECK: .cfi_offset %f13, -208
+; CHECK: .cfi_offset %f14, -216
+; CHECK-NOT: %f15
+; ...main function body...
+; CHECK: ld %f8, 208(%r15)
+; CHECK: ld %f9, 200(%r15)
+; CHECK: ld %f10, 192(%r15)
+; CHECK: ld %f11, 184(%r15)
+; CHECK: ld %f12, 176(%r15)
+; CHECK: ld %f13, 168(%r15)
+; CHECK: ld %f14, 160(%r15)
+; CHECK: aghi %r15, 216
+; CHECK: br %r14
+ %l0 = load volatile float *%ptr
+ %l1 = load volatile float *%ptr
+ %l2 = load volatile float *%ptr
+ %l3 = load volatile float *%ptr
+ %l4 = load volatile float *%ptr
+ %l5 = load volatile float *%ptr
+ %l6 = load volatile float *%ptr
+ %l7 = load volatile float *%ptr
+ %l8 = load volatile float *%ptr
+ %l9 = load volatile float *%ptr
+ %l10 = load volatile float *%ptr
+ %l11 = load volatile float *%ptr
+ %l12 = load volatile float *%ptr
+ %l13 = load volatile float *%ptr
+ %l14 = load volatile float *%ptr
+ %add0 = fadd float %l0, %l0
+ %add1 = fadd float %l1, %add0
+ %add2 = fadd float %l2, %add1
+ %add3 = fadd float %l3, %add2
+ %add4 = fadd float %l4, %add3
+ %add5 = fadd float %l5, %add4
+ %add6 = fadd float %l6, %add5
+ %add7 = fadd float %l7, %add6
+ %add8 = fadd float %l8, %add7
+ %add9 = fadd float %l9, %add8
+ %add10 = fadd float %l10, %add9
+ %add11 = fadd float %l11, %add10
+ %add12 = fadd float %l12, %add11
+ %add13 = fadd float %l13, %add12
+ %add14 = fadd float %l14, %add13
+ store volatile float %add0, float *%ptr
+ store volatile float %add1, float *%ptr
+ store volatile float %add2, float *%ptr
+ store volatile float %add3, float *%ptr
+ store volatile float %add4, float *%ptr
+ store volatile float %add5, float *%ptr
+ store volatile float %add6, float *%ptr
+ store volatile float %add7, float *%ptr
+ store volatile float %add8, float *%ptr
+ store volatile float %add9, float *%ptr
+ store volatile float %add10, float *%ptr
+ store volatile float %add11, float *%ptr
+ store volatile float %add12, float *%ptr
+ store volatile float %add13, float *%ptr
+ store volatile float %add14, float *%ptr
+ ret void
+}
+
+; Like f1, but should require only one call-saved FPR.
+define void @f3(float *%ptr) {
+; CHECK: f3:
+; CHECK: aghi %r15, -168
+; CHECK: .cfi_def_cfa_offset 328
+; CHECK: std %f8, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK-NOT: %f9
+; CHECK-NOT: %f10
+; CHECK-NOT: %f11
+; CHECK-NOT: %f12
+; CHECK-NOT: %f13
+; CHECK-NOT: %f14
+; CHECK-NOT: %f15
+; ...main function body...
+; CHECK: ld %f8, 160(%r15)
+; CHECK: aghi %r15, 168
+; CHECK: br %r14
+ %l0 = load volatile float *%ptr
+ %l1 = load volatile float *%ptr
+ %l2 = load volatile float *%ptr
+ %l3 = load volatile float *%ptr
+ %l4 = load volatile float *%ptr
+ %l5 = load volatile float *%ptr
+ %l6 = load volatile float *%ptr
+ %l7 = load volatile float *%ptr
+ %l8 = load volatile float *%ptr
+ %add0 = fadd float %l0, %l0
+ %add1 = fadd float %l1, %add0
+ %add2 = fadd float %l2, %add1
+ %add3 = fadd float %l3, %add2
+ %add4 = fadd float %l4, %add3
+ %add5 = fadd float %l5, %add4
+ %add6 = fadd float %l6, %add5
+ %add7 = fadd float %l7, %add6
+ %add8 = fadd float %l8, %add7
+ store volatile float %add0, float *%ptr
+ store volatile float %add1, float *%ptr
+ store volatile float %add2, float *%ptr
+ store volatile float %add3, float *%ptr
+ store volatile float %add4, float *%ptr
+ store volatile float %add5, float *%ptr
+ store volatile float %add6, float *%ptr
+ store volatile float %add7, float *%ptr
+ store volatile float %add8, float *%ptr
+ ret void
+}
+
+; This function should use all call-clobbered FPRs but no call-saved ones.
+; It shouldn't need to create a frame.
+define void @f4(float *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: %r15
+; CHECK-NOT: %f8
+; CHECK-NOT: %f9
+; CHECK-NOT: %f10
+; CHECK-NOT: %f11
+; CHECK-NOT: %f12
+; CHECK-NOT: %f13
+; CHECK-NOT: %f14
+; CHECK-NOT: %f15
+; CHECK: br %r14
+ %l0 = load volatile float *%ptr
+ %l1 = load volatile float *%ptr
+ %l2 = load volatile float *%ptr
+ %l3 = load volatile float *%ptr
+ %l4 = load volatile float *%ptr
+ %l5 = load volatile float *%ptr
+ %l6 = load volatile float *%ptr
+ %l7 = load volatile float *%ptr
+ %add0 = fadd float %l0, %l0
+ %add1 = fadd float %l1, %add0
+ %add2 = fadd float %l2, %add1
+ %add3 = fadd float %l3, %add2
+ %add4 = fadd float %l4, %add3
+ %add5 = fadd float %l5, %add4
+ %add6 = fadd float %l6, %add5
+ %add7 = fadd float %l7, %add6
+ store volatile float %add0, float *%ptr
+ store volatile float %add1, float *%ptr
+ store volatile float %add2, float *%ptr
+ store volatile float %add3, float *%ptr
+ store volatile float %add4, float *%ptr
+ store volatile float %add5, float *%ptr
+ store volatile float %add6, float *%ptr
+ store volatile float %add7, float *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-03.ll b/test/CodeGen/SystemZ/frame-03.ll
new file mode 100644
index 000000000000..3c4a49977a12
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-03.ll
@@ -0,0 +1,259 @@
+; Like frame-02.ll, but with doubles rather than floats. Internally this
+; uses a different register class, but the set of saved and restored
+; registers should be the same.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This function should require all FPRs, but no other spill slots.
+; We need to save and restore 8 of the 16 FPRs, so the frame size
+; should be exactly 160 + 8 * 8 = 224. The CFA offset is 160
+; (the caller-allocated part of the frame) + 224.
+define void @f1(double *%ptr) {
+; CHECK: f1:
+; CHECK: aghi %r15, -224
+; CHECK: .cfi_def_cfa_offset 384
+; CHECK: std %f8, 216(%r15)
+; CHECK: std %f9, 208(%r15)
+; CHECK: std %f10, 200(%r15)
+; CHECK: std %f11, 192(%r15)
+; CHECK: std %f12, 184(%r15)
+; CHECK: std %f13, 176(%r15)
+; CHECK: std %f14, 168(%r15)
+; CHECK: std %f15, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f9, -176
+; CHECK: .cfi_offset %f10, -184
+; CHECK: .cfi_offset %f11, -192
+; CHECK: .cfi_offset %f12, -200
+; CHECK: .cfi_offset %f13, -208
+; CHECK: .cfi_offset %f14, -216
+; CHECK: .cfi_offset %f15, -224
+; ...main function body...
+; CHECK: ld %f8, 216(%r15)
+; CHECK: ld %f9, 208(%r15)
+; CHECK: ld %f10, 200(%r15)
+; CHECK: ld %f11, 192(%r15)
+; CHECK: ld %f12, 184(%r15)
+; CHECK: ld %f13, 176(%r15)
+; CHECK: ld %f14, 168(%r15)
+; CHECK: ld %f15, 160(%r15)
+; CHECK: aghi %r15, 224
+; CHECK: br %r14
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %l8 = load volatile double *%ptr
+ %l9 = load volatile double *%ptr
+ %l10 = load volatile double *%ptr
+ %l11 = load volatile double *%ptr
+ %l12 = load volatile double *%ptr
+ %l13 = load volatile double *%ptr
+ %l14 = load volatile double *%ptr
+ %l15 = load volatile double *%ptr
+ %add0 = fadd double %l0, %l0
+ %add1 = fadd double %l1, %add0
+ %add2 = fadd double %l2, %add1
+ %add3 = fadd double %l3, %add2
+ %add4 = fadd double %l4, %add3
+ %add5 = fadd double %l5, %add4
+ %add6 = fadd double %l6, %add5
+ %add7 = fadd double %l7, %add6
+ %add8 = fadd double %l8, %add7
+ %add9 = fadd double %l9, %add8
+ %add10 = fadd double %l10, %add9
+ %add11 = fadd double %l11, %add10
+ %add12 = fadd double %l12, %add11
+ %add13 = fadd double %l13, %add12
+ %add14 = fadd double %l14, %add13
+ %add15 = fadd double %l15, %add14
+ store volatile double %add0, double *%ptr
+ store volatile double %add1, double *%ptr
+ store volatile double %add2, double *%ptr
+ store volatile double %add3, double *%ptr
+ store volatile double %add4, double *%ptr
+ store volatile double %add5, double *%ptr
+ store volatile double %add6, double *%ptr
+ store volatile double %add7, double *%ptr
+ store volatile double %add8, double *%ptr
+ store volatile double %add9, double *%ptr
+ store volatile double %add10, double *%ptr
+ store volatile double %add11, double *%ptr
+ store volatile double %add12, double *%ptr
+ store volatile double %add13, double *%ptr
+ store volatile double %add14, double *%ptr
+ store volatile double %add15, double *%ptr
+ ret void
+}
+
+; Like f1, but requires one fewer FPR. We allocate in numerical order,
+; so %f15 is the one that gets dropped.
+define void @f2(double *%ptr) {
+; CHECK: f2:
+; CHECK: aghi %r15, -216
+; CHECK: .cfi_def_cfa_offset 376
+; CHECK: std %f8, 208(%r15)
+; CHECK: std %f9, 200(%r15)
+; CHECK: std %f10, 192(%r15)
+; CHECK: std %f11, 184(%r15)
+; CHECK: std %f12, 176(%r15)
+; CHECK: std %f13, 168(%r15)
+; CHECK: std %f14, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f9, -176
+; CHECK: .cfi_offset %f10, -184
+; CHECK: .cfi_offset %f11, -192
+; CHECK: .cfi_offset %f12, -200
+; CHECK: .cfi_offset %f13, -208
+; CHECK: .cfi_offset %f14, -216
+; CHECK-NOT: %f15
+; ...main function body...
+; CHECK: ld %f8, 208(%r15)
+; CHECK: ld %f9, 200(%r15)
+; CHECK: ld %f10, 192(%r15)
+; CHECK: ld %f11, 184(%r15)
+; CHECK: ld %f12, 176(%r15)
+; CHECK: ld %f13, 168(%r15)
+; CHECK: ld %f14, 160(%r15)
+; CHECK: aghi %r15, 216
+; CHECK: br %r14
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %l8 = load volatile double *%ptr
+ %l9 = load volatile double *%ptr
+ %l10 = load volatile double *%ptr
+ %l11 = load volatile double *%ptr
+ %l12 = load volatile double *%ptr
+ %l13 = load volatile double *%ptr
+ %l14 = load volatile double *%ptr
+ %add0 = fadd double %l0, %l0
+ %add1 = fadd double %l1, %add0
+ %add2 = fadd double %l2, %add1
+ %add3 = fadd double %l3, %add2
+ %add4 = fadd double %l4, %add3
+ %add5 = fadd double %l5, %add4
+ %add6 = fadd double %l6, %add5
+ %add7 = fadd double %l7, %add6
+ %add8 = fadd double %l8, %add7
+ %add9 = fadd double %l9, %add8
+ %add10 = fadd double %l10, %add9
+ %add11 = fadd double %l11, %add10
+ %add12 = fadd double %l12, %add11
+ %add13 = fadd double %l13, %add12
+ %add14 = fadd double %l14, %add13
+ store volatile double %add0, double *%ptr
+ store volatile double %add1, double *%ptr
+ store volatile double %add2, double *%ptr
+ store volatile double %add3, double *%ptr
+ store volatile double %add4, double *%ptr
+ store volatile double %add5, double *%ptr
+ store volatile double %add6, double *%ptr
+ store volatile double %add7, double *%ptr
+ store volatile double %add8, double *%ptr
+ store volatile double %add9, double *%ptr
+ store volatile double %add10, double *%ptr
+ store volatile double %add11, double *%ptr
+ store volatile double %add12, double *%ptr
+ store volatile double %add13, double *%ptr
+ store volatile double %add14, double *%ptr
+ ret void
+}
+
+; Like f1, but should require only one call-saved FPR.
+define void @f3(double *%ptr) {
+; CHECK: f3:
+; CHECK: aghi %r15, -168
+; CHECK: .cfi_def_cfa_offset 328
+; CHECK: std %f8, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK-NOT: %f9
+; CHECK-NOT: %f10
+; CHECK-NOT: %f11
+; CHECK-NOT: %f12
+; CHECK-NOT: %f13
+; CHECK-NOT: %f14
+; CHECK-NOT: %f15
+; ...main function body...
+; CHECK: ld %f8, 160(%r15)
+; CHECK: aghi %r15, 168
+; CHECK: br %r14
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %l8 = load volatile double *%ptr
+ %add0 = fadd double %l0, %l0
+ %add1 = fadd double %l1, %add0
+ %add2 = fadd double %l2, %add1
+ %add3 = fadd double %l3, %add2
+ %add4 = fadd double %l4, %add3
+ %add5 = fadd double %l5, %add4
+ %add6 = fadd double %l6, %add5
+ %add7 = fadd double %l7, %add6
+ %add8 = fadd double %l8, %add7
+ store volatile double %add0, double *%ptr
+ store volatile double %add1, double *%ptr
+ store volatile double %add2, double *%ptr
+ store volatile double %add3, double *%ptr
+ store volatile double %add4, double *%ptr
+ store volatile double %add5, double *%ptr
+ store volatile double %add6, double *%ptr
+ store volatile double %add7, double *%ptr
+ store volatile double %add8, double *%ptr
+ ret void
+}
+
+; This function should use all call-clobbered FPRs but no call-saved ones.
+; It shouldn't need to create a frame.
+define void @f4(double *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: %r15
+; CHECK-NOT: %f8
+; CHECK-NOT: %f9
+; CHECK-NOT: %f10
+; CHECK-NOT: %f11
+; CHECK-NOT: %f12
+; CHECK-NOT: %f13
+; CHECK-NOT: %f14
+; CHECK-NOT: %f15
+; CHECK: br %r14
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %add0 = fadd double %l0, %l0
+ %add1 = fadd double %l1, %add0
+ %add2 = fadd double %l2, %add1
+ %add3 = fadd double %l3, %add2
+ %add4 = fadd double %l4, %add3
+ %add5 = fadd double %l5, %add4
+ %add6 = fadd double %l6, %add5
+ %add7 = fadd double %l7, %add6
+ store volatile double %add0, double *%ptr
+ store volatile double %add1, double *%ptr
+ store volatile double %add2, double *%ptr
+ store volatile double %add3, double *%ptr
+ store volatile double %add4, double *%ptr
+ store volatile double %add5, double *%ptr
+ store volatile double %add6, double *%ptr
+ store volatile double %add7, double *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-04.ll b/test/CodeGen/SystemZ/frame-04.ll
new file mode 100644
index 000000000000..360f85cde322
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-04.ll
@@ -0,0 +1,187 @@
+; Like frame-02.ll, but with long doubles rather than floats. Some of the
+; cases are slightly different because we need to allocate pairs of FPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This function should require all FPRs, but no other spill slots.
+; We need to save and restore 8 of the 16 FPRs, so the frame size
+; should be exactly 160 + 8 * 8 = 224. The CFA offset is 160
+; (the caller-allocated part of the frame) + 224.
+define void @f1(fp128 *%ptr) {
+; CHECK: f1:
+; CHECK: aghi %r15, -224
+; CHECK: .cfi_def_cfa_offset 384
+; CHECK: std %f8, 216(%r15)
+; CHECK: std %f9, 208(%r15)
+; CHECK: std %f10, 200(%r15)
+; CHECK: std %f11, 192(%r15)
+; CHECK: std %f12, 184(%r15)
+; CHECK: std %f13, 176(%r15)
+; CHECK: std %f14, 168(%r15)
+; CHECK: std %f15, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f9, -176
+; CHECK: .cfi_offset %f10, -184
+; CHECK: .cfi_offset %f11, -192
+; CHECK: .cfi_offset %f12, -200
+; CHECK: .cfi_offset %f13, -208
+; CHECK: .cfi_offset %f14, -216
+; CHECK: .cfi_offset %f15, -224
+; ...main function body...
+; CHECK: ld %f8, 216(%r15)
+; CHECK: ld %f9, 208(%r15)
+; CHECK: ld %f10, 200(%r15)
+; CHECK: ld %f11, 192(%r15)
+; CHECK: ld %f12, 184(%r15)
+; CHECK: ld %f13, 176(%r15)
+; CHECK: ld %f14, 168(%r15)
+; CHECK: ld %f15, 160(%r15)
+; CHECK: aghi %r15, 224
+; CHECK: br %r14
+ %l0 = load volatile fp128 *%ptr
+ %l1 = load volatile fp128 *%ptr
+ %l4 = load volatile fp128 *%ptr
+ %l5 = load volatile fp128 *%ptr
+ %l8 = load volatile fp128 *%ptr
+ %l9 = load volatile fp128 *%ptr
+ %l12 = load volatile fp128 *%ptr
+ %l13 = load volatile fp128 *%ptr
+ %add0 = fadd fp128 %l0, %l0
+ %add1 = fadd fp128 %l1, %add0
+ %add4 = fadd fp128 %l4, %add1
+ %add5 = fadd fp128 %l5, %add4
+ %add8 = fadd fp128 %l8, %add5
+ %add9 = fadd fp128 %l9, %add8
+ %add12 = fadd fp128 %l12, %add9
+ %add13 = fadd fp128 %l13, %add12
+ store volatile fp128 %add0, fp128 *%ptr
+ store volatile fp128 %add1, fp128 *%ptr
+ store volatile fp128 %add4, fp128 *%ptr
+ store volatile fp128 %add5, fp128 *%ptr
+ store volatile fp128 %add8, fp128 *%ptr
+ store volatile fp128 %add9, fp128 *%ptr
+ store volatile fp128 %add12, fp128 *%ptr
+ store volatile fp128 %add13, fp128 *%ptr
+ ret void
+}
+
+; Like f1, but requires one fewer FPR pair. We allocate in numerical order,
+; so %f13+%f15 is the pair that gets dropped.
+define void @f2(fp128 *%ptr) {
+; CHECK: f2:
+; CHECK: aghi %r15, -208
+; CHECK: .cfi_def_cfa_offset 368
+; CHECK: std %f8, 200(%r15)
+; CHECK: std %f9, 192(%r15)
+; CHECK: std %f10, 184(%r15)
+; CHECK: std %f11, 176(%r15)
+; CHECK: std %f12, 168(%r15)
+; CHECK: std %f14, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f9, -176
+; CHECK: .cfi_offset %f10, -184
+; CHECK: .cfi_offset %f11, -192
+; CHECK: .cfi_offset %f12, -200
+; CHECK: .cfi_offset %f14, -208
+; CHECK-NOT: %f13
+; CHECK-NOT: %f15
+; ...main function body...
+; CHECK: ld %f8, 200(%r15)
+; CHECK: ld %f9, 192(%r15)
+; CHECK: ld %f10, 184(%r15)
+; CHECK: ld %f11, 176(%r15)
+; CHECK: ld %f12, 168(%r15)
+; CHECK: ld %f14, 160(%r15)
+; CHECK: aghi %r15, 208
+; CHECK: br %r14
+ %l0 = load volatile fp128 *%ptr
+ %l1 = load volatile fp128 *%ptr
+ %l4 = load volatile fp128 *%ptr
+ %l5 = load volatile fp128 *%ptr
+ %l8 = load volatile fp128 *%ptr
+ %l9 = load volatile fp128 *%ptr
+ %l12 = load volatile fp128 *%ptr
+ %add0 = fadd fp128 %l0, %l0
+ %add1 = fadd fp128 %l1, %add0
+ %add4 = fadd fp128 %l4, %add1
+ %add5 = fadd fp128 %l5, %add4
+ %add8 = fadd fp128 %l8, %add5
+ %add9 = fadd fp128 %l9, %add8
+ %add12 = fadd fp128 %l12, %add9
+ store volatile fp128 %add0, fp128 *%ptr
+ store volatile fp128 %add1, fp128 *%ptr
+ store volatile fp128 %add4, fp128 *%ptr
+ store volatile fp128 %add5, fp128 *%ptr
+ store volatile fp128 %add8, fp128 *%ptr
+ store volatile fp128 %add9, fp128 *%ptr
+ store volatile fp128 %add12, fp128 *%ptr
+ ret void
+}
+
+; Like f1, but requires only one call-saved FPR pair. We allocate in
+; numerical order so the pair should be %f8+%f10.
+define void @f3(fp128 *%ptr) {
+; CHECK: f3:
+; CHECK: aghi %r15, -176
+; CHECK: .cfi_def_cfa_offset 336
+; CHECK: std %f8, 168(%r15)
+; CHECK: std %f10, 160(%r15)
+; CHECK: .cfi_offset %f8, -168
+; CHECK: .cfi_offset %f10, -176
+; CHECK-NOT: %f9
+; CHECK-NOT: %f11
+; CHECK-NOT: %f12
+; CHECK-NOT: %f13
+; CHECK-NOT: %f14
+; CHECK-NOT: %f15
+; ...main function body...
+; CHECK: ld %f8, 168(%r15)
+; CHECK: ld %f10, 160(%r15)
+; CHECK: aghi %r15, 176
+; CHECK: br %r14
+ %l0 = load volatile fp128 *%ptr
+ %l1 = load volatile fp128 *%ptr
+ %l4 = load volatile fp128 *%ptr
+ %l5 = load volatile fp128 *%ptr
+ %l8 = load volatile fp128 *%ptr
+ %add0 = fadd fp128 %l0, %l0
+ %add1 = fadd fp128 %l1, %add0
+ %add4 = fadd fp128 %l4, %add1
+ %add5 = fadd fp128 %l5, %add4
+ %add8 = fadd fp128 %l8, %add5
+ store volatile fp128 %add0, fp128 *%ptr
+ store volatile fp128 %add1, fp128 *%ptr
+ store volatile fp128 %add4, fp128 *%ptr
+ store volatile fp128 %add5, fp128 *%ptr
+ store volatile fp128 %add8, fp128 *%ptr
+ ret void
+}
+
+; This function should use all call-clobbered FPRs but no call-saved ones.
+; It shouldn't need to create a frame.
+define void @f4(fp128 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: %r15
+; CHECK-NOT: %f8
+; CHECK-NOT: %f9
+; CHECK-NOT: %f10
+; CHECK-NOT: %f11
+; CHECK-NOT: %f12
+; CHECK-NOT: %f13
+; CHECK-NOT: %f14
+; CHECK-NOT: %f15
+; CHECK: br %r14
+ %l0 = load volatile fp128 *%ptr
+ %l1 = load volatile fp128 *%ptr
+ %l4 = load volatile fp128 *%ptr
+ %l5 = load volatile fp128 *%ptr
+ %add0 = fadd fp128 %l0, %l0
+ %add1 = fadd fp128 %l1, %add0
+ %add4 = fadd fp128 %l4, %add1
+ %add5 = fadd fp128 %l5, %add4
+ store volatile fp128 %add0, fp128 *%ptr
+ store volatile fp128 %add1, fp128 *%ptr
+ store volatile fp128 %add4, fp128 *%ptr
+ store volatile fp128 %add5, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-05.ll b/test/CodeGen/SystemZ/frame-05.ll
new file mode 100644
index 000000000000..3a159fcd5941
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-05.ll
@@ -0,0 +1,219 @@
+; Test saving and restoring of call-saved GPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This function should require all GPRs, but no other spill slots. The caller
+; allocates room for the GPR save slots, so we shouldn't need to allocate any
+; extra space.
+;
+; The function only modifies the low 32 bits of each register, which in
+; itself would allow STM and LM to be used instead of STMG and LMG.
+; However, the ABI defines the offset of each register, so we always
+; use the 64-bit form.
+;
+; Use a different address for the final store, so that we can check that
+; %r15 isn't referenced again until after that.
+define void @f1(i32 *%ptr) {
+; CHECK: f1:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK-NOT: %r15
+; CHECK: .cfi_offset %r6, -112
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK: st {{.*}}, 4(%r2)
+; CHECK: lmg %r6, %r15, 48(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l3 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l6 = load volatile i32 *%ptr
+ %l7 = load volatile i32 *%ptr
+ %l8 = load volatile i32 *%ptr
+ %l9 = load volatile i32 *%ptr
+ %l10 = load volatile i32 *%ptr
+ %l11 = load volatile i32 *%ptr
+ %l12 = load volatile i32 *%ptr
+ %l13 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add3 = add i32 %l3, %add1
+ %add4 = add i32 %l4, %add3
+ %add5 = add i32 %l5, %add4
+ %add6 = add i32 %l6, %add5
+ %add7 = add i32 %l7, %add6
+ %add8 = add i32 %l8, %add7
+ %add9 = add i32 %l9, %add8
+ %add10 = add i32 %l10, %add9
+ %add11 = add i32 %l11, %add10
+ %add12 = add i32 %l12, %add11
+ %add13 = add i32 %l13, %add12
+ %add14 = add i32 %l14, %add13
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add3, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add6, i32 *%ptr
+ store volatile i32 %add7, i32 *%ptr
+ store volatile i32 %add8, i32 *%ptr
+ store volatile i32 %add9, i32 *%ptr
+ store volatile i32 %add10, i32 *%ptr
+ store volatile i32 %add11, i32 *%ptr
+ store volatile i32 %add12, i32 *%ptr
+ store volatile i32 %add13, i32 *%ptr
+ %final = getelementptr i32 *%ptr, i32 1
+ store volatile i32 %add14, i32 *%final
+ ret void
+}
+
+; Like f1, but requires one fewer GPR. We allocate the call-saved GPRs
+; from %r14 down, so that the STMG/LMG sequences aren't any longer than
+; they need to be.
+define void @f2(i32 *%ptr) {
+; CHECK: f2:
+; CHECK: stmg %r7, %r15, 56(%r15)
+; CHECK-NOT: %r15
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK-NOT: %r6
+; CHECK: st {{.*}}, 4(%r2)
+; CHECK: lmg %r7, %r15, 56(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l3 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l7 = load volatile i32 *%ptr
+ %l8 = load volatile i32 *%ptr
+ %l9 = load volatile i32 *%ptr
+ %l10 = load volatile i32 *%ptr
+ %l11 = load volatile i32 *%ptr
+ %l12 = load volatile i32 *%ptr
+ %l13 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add3 = add i32 %l3, %add1
+ %add4 = add i32 %l4, %add3
+ %add5 = add i32 %l5, %add4
+ %add7 = add i32 %l7, %add5
+ %add8 = add i32 %l8, %add7
+ %add9 = add i32 %l9, %add8
+ %add10 = add i32 %l10, %add9
+ %add11 = add i32 %l11, %add10
+ %add12 = add i32 %l12, %add11
+ %add13 = add i32 %l13, %add12
+ %add14 = add i32 %l14, %add13
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add3, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add7, i32 *%ptr
+ store volatile i32 %add8, i32 *%ptr
+ store volatile i32 %add9, i32 *%ptr
+ store volatile i32 %add10, i32 *%ptr
+ store volatile i32 %add11, i32 *%ptr
+ store volatile i32 %add12, i32 *%ptr
+ store volatile i32 %add13, i32 *%ptr
+ %final = getelementptr i32 *%ptr, i32 1
+ store volatile i32 %add14, i32 *%final
+ ret void
+}
+
+; Like f1, but only needs one call-saved GPR, which ought to be %r14.
+define void @f3(i32 *%ptr) {
+; CHECK: f3:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK-NOT: %r15
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK-NOT: %r6
+; CHECK-NOT: %r7
+; CHECK-NOT: %r8
+; CHECK-NOT: %r9
+; CHECK-NOT: %r10
+; CHECK-NOT: %r11
+; CHECK-NOT: %r12
+; CHECK-NOT: %r13
+; CHECK: st {{.*}}, 4(%r2)
+; CHECK: lmg %r14, %r15, 112(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l3 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add3 = add i32 %l3, %add1
+ %add4 = add i32 %l4, %add3
+ %add5 = add i32 %l5, %add4
+ %add14 = add i32 %l14, %add5
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add3, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ %final = getelementptr i32 *%ptr, i32 1
+ store volatile i32 %add14, i32 *%final
+ ret void
+}
+
+; This function should use all call-clobbered GPRs but no call-saved ones.
+; It shouldn't need to touch the stack at all.
+define void @f4(i32 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: %r15
+; CHECK-NOT: %r6
+; CHECK-NOT: %r7
+; CHECK-NOT: %r8
+; CHECK-NOT: %r9
+; CHECK-NOT: %r10
+; CHECK-NOT: %r11
+; CHECK-NOT: %r12
+; CHECK-NOT: %r13
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l3 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add3 = add i32 %l3, %add1
+ %add4 = add i32 %l4, %add3
+ %add5 = add i32 %l5, %add4
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add3, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ %final = getelementptr i32 *%ptr, i32 1
+ store volatile i32 %add5, i32 *%final
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-06.ll b/test/CodeGen/SystemZ/frame-06.ll
new file mode 100644
index 000000000000..4c361f1e9fc9
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-06.ll
@@ -0,0 +1,216 @@
+; Like frame-05.ll, but with i64s rather than i32s. Internally this
+; uses a different register class, but the set of saved and restored
+; registers should be the same.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This function should require all GPRs, but no other spill slots. The caller
+; allocates room for the GPR save slots, so we shouldn't need to allocate any
+; extra space.
+;
+; Use a different address for the final store, so that we can check that
+; %r15 isn't referenced again until after that.
+define void @f1(i64 *%ptr) {
+; CHECK: f1:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK-NOT: %r15
+; CHECK: .cfi_offset %r6, -112
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK: stg {{.*}}, 8(%r2)
+; CHECK: lmg %r6, %r15, 48(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i64 *%ptr
+ %l1 = load volatile i64 *%ptr
+ %l3 = load volatile i64 *%ptr
+ %l4 = load volatile i64 *%ptr
+ %l5 = load volatile i64 *%ptr
+ %l6 = load volatile i64 *%ptr
+ %l7 = load volatile i64 *%ptr
+ %l8 = load volatile i64 *%ptr
+ %l9 = load volatile i64 *%ptr
+ %l10 = load volatile i64 *%ptr
+ %l11 = load volatile i64 *%ptr
+ %l12 = load volatile i64 *%ptr
+ %l13 = load volatile i64 *%ptr
+ %l14 = load volatile i64 *%ptr
+ %add0 = add i64 %l0, %l0
+ %add1 = add i64 %l1, %add0
+ %add3 = add i64 %l3, %add1
+ %add4 = add i64 %l4, %add3
+ %add5 = add i64 %l5, %add4
+ %add6 = add i64 %l6, %add5
+ %add7 = add i64 %l7, %add6
+ %add8 = add i64 %l8, %add7
+ %add9 = add i64 %l9, %add8
+ %add10 = add i64 %l10, %add9
+ %add11 = add i64 %l11, %add10
+ %add12 = add i64 %l12, %add11
+ %add13 = add i64 %l13, %add12
+ %add14 = add i64 %l14, %add13
+ store volatile i64 %add0, i64 *%ptr
+ store volatile i64 %add1, i64 *%ptr
+ store volatile i64 %add3, i64 *%ptr
+ store volatile i64 %add4, i64 *%ptr
+ store volatile i64 %add5, i64 *%ptr
+ store volatile i64 %add6, i64 *%ptr
+ store volatile i64 %add7, i64 *%ptr
+ store volatile i64 %add8, i64 *%ptr
+ store volatile i64 %add9, i64 *%ptr
+ store volatile i64 %add10, i64 *%ptr
+ store volatile i64 %add11, i64 *%ptr
+ store volatile i64 %add12, i64 *%ptr
+ store volatile i64 %add13, i64 *%ptr
+ %final = getelementptr i64 *%ptr, i64 1
+ store volatile i64 %add14, i64 *%final
+ ret void
+}
+
+; Like f1, but requires one fewer GPR. We allocate the call-saved GPRs
+; from %r14 down, so that the STMG/LMG sequences aren't any longer than
+; they need to be.
+define void @f2(i64 *%ptr) {
+; CHECK: f2:
+; CHECK: stmg %r7, %r15, 56(%r15)
+; CHECK-NOT: %r15
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK-NOT: %r6
+; CHECK: stg {{.*}}, 8(%r2)
+; CHECK: lmg %r7, %r15, 56(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i64 *%ptr
+ %l1 = load volatile i64 *%ptr
+ %l3 = load volatile i64 *%ptr
+ %l4 = load volatile i64 *%ptr
+ %l5 = load volatile i64 *%ptr
+ %l7 = load volatile i64 *%ptr
+ %l8 = load volatile i64 *%ptr
+ %l9 = load volatile i64 *%ptr
+ %l10 = load volatile i64 *%ptr
+ %l11 = load volatile i64 *%ptr
+ %l12 = load volatile i64 *%ptr
+ %l13 = load volatile i64 *%ptr
+ %l14 = load volatile i64 *%ptr
+ %add0 = add i64 %l0, %l0
+ %add1 = add i64 %l1, %add0
+ %add3 = add i64 %l3, %add1
+ %add4 = add i64 %l4, %add3
+ %add5 = add i64 %l5, %add4
+ %add7 = add i64 %l7, %add5
+ %add8 = add i64 %l8, %add7
+ %add9 = add i64 %l9, %add8
+ %add10 = add i64 %l10, %add9
+ %add11 = add i64 %l11, %add10
+ %add12 = add i64 %l12, %add11
+ %add13 = add i64 %l13, %add12
+ %add14 = add i64 %l14, %add13
+ store volatile i64 %add0, i64 *%ptr
+ store volatile i64 %add1, i64 *%ptr
+ store volatile i64 %add3, i64 *%ptr
+ store volatile i64 %add4, i64 *%ptr
+ store volatile i64 %add5, i64 *%ptr
+ store volatile i64 %add7, i64 *%ptr
+ store volatile i64 %add8, i64 *%ptr
+ store volatile i64 %add9, i64 *%ptr
+ store volatile i64 %add10, i64 *%ptr
+ store volatile i64 %add11, i64 *%ptr
+ store volatile i64 %add12, i64 *%ptr
+ store volatile i64 %add13, i64 *%ptr
+ %final = getelementptr i64 *%ptr, i64 1
+ store volatile i64 %add14, i64 *%final
+ ret void
+}
+
+; Like f1, but only needs one call-saved GPR, which ought to be %r14.
+define void @f3(i64 *%ptr) {
+; CHECK: f3:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK-NOT: %r15
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK-NOT: %r6
+; CHECK-NOT: %r7
+; CHECK-NOT: %r8
+; CHECK-NOT: %r9
+; CHECK-NOT: %r10
+; CHECK-NOT: %r11
+; CHECK-NOT: %r12
+; CHECK-NOT: %r13
+; CHECK: stg {{.*}}, 8(%r2)
+; CHECK: lmg %r14, %r15, 112(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i64 *%ptr
+ %l1 = load volatile i64 *%ptr
+ %l3 = load volatile i64 *%ptr
+ %l4 = load volatile i64 *%ptr
+ %l5 = load volatile i64 *%ptr
+ %l14 = load volatile i64 *%ptr
+ %add0 = add i64 %l0, %l0
+ %add1 = add i64 %l1, %add0
+ %add3 = add i64 %l3, %add1
+ %add4 = add i64 %l4, %add3
+ %add5 = add i64 %l5, %add4
+ %add14 = add i64 %l14, %add5
+ store volatile i64 %add0, i64 *%ptr
+ store volatile i64 %add1, i64 *%ptr
+ store volatile i64 %add3, i64 *%ptr
+ store volatile i64 %add4, i64 *%ptr
+ store volatile i64 %add5, i64 *%ptr
+ %final = getelementptr i64 *%ptr, i64 1
+ store volatile i64 %add14, i64 *%final
+ ret void
+}
+
+; This function should use all call-clobbered GPRs but no call-saved ones.
+; It shouldn't need to touch the stack at all.
+define void @f4(i64 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: %r15
+; CHECK-NOT: %r6
+; CHECK-NOT: %r7
+; CHECK-NOT: %r8
+; CHECK-NOT: %r9
+; CHECK-NOT: %r10
+; CHECK-NOT: %r11
+; CHECK-NOT: %r12
+; CHECK-NOT: %r13
+; CHECK: br %r14
+ %l0 = load volatile i64 *%ptr
+ %l1 = load volatile i64 *%ptr
+ %l3 = load volatile i64 *%ptr
+ %l4 = load volatile i64 *%ptr
+ %l5 = load volatile i64 *%ptr
+ %add0 = add i64 %l0, %l0
+ %add1 = add i64 %l1, %add0
+ %add3 = add i64 %l3, %add1
+ %add4 = add i64 %l4, %add3
+ %add5 = add i64 %l5, %add4
+ store volatile i64 %add0, i64 *%ptr
+ store volatile i64 %add1, i64 *%ptr
+ store volatile i64 %add3, i64 *%ptr
+ store volatile i64 %add4, i64 *%ptr
+ %final = getelementptr i64 *%ptr, i64 1
+ store volatile i64 %add5, i64 *%final
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-07.ll b/test/CodeGen/SystemZ/frame-07.ll
new file mode 100644
index 000000000000..cfe9f868c07b
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-07.ll
@@ -0,0 +1,249 @@
+; Test the saving and restoring of FPRs in large frames.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck -check-prefix=CHECK-NOFP %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+
+; Test a frame size that requires some FPRs to be saved and loaded using
+; the 20-bit STDY and LDY while others can use the 12-bit STD and LD.
+; The frame is big enough to require an emergency spill slot at 160(%r15),
+; as well as the 8 FPR save slots. Get a frame of size 4128 by allocating
+; (4128 - 168 - 8 * 8) / 8 = 487 extra doublewords.
+define void @f1(double *%ptr, i64 %x) {
+; CHECK-NOFP: f1:
+; CHECK-NOFP: aghi %r15, -4128
+; CHECK-NOFP: .cfi_def_cfa_offset 4288
+; CHECK-NOFP: stdy %f8, 4120(%r15)
+; CHECK-NOFP: stdy %f9, 4112(%r15)
+; CHECK-NOFP: stdy %f10, 4104(%r15)
+; CHECK-NOFP: stdy %f11, 4096(%r15)
+; CHECK-NOFP: std %f12, 4088(%r15)
+; CHECK-NOFP: std %f13, 4080(%r15)
+; CHECK-NOFP: std %f14, 4072(%r15)
+; CHECK-NOFP: std %f15, 4064(%r15)
+; CHECK-NOFP: .cfi_offset %f8, -168
+; CHECK-NOFP: .cfi_offset %f9, -176
+; CHECK-NOFP: .cfi_offset %f10, -184
+; CHECK-NOFP: .cfi_offset %f11, -192
+; CHECK-NOFP: .cfi_offset %f12, -200
+; CHECK-NOFP: .cfi_offset %f13, -208
+; CHECK-NOFP: .cfi_offset %f14, -216
+; CHECK-NOFP: .cfi_offset %f15, -224
+; ...main function body...
+; CHECK-NOFP: ldy %f8, 4120(%r15)
+; CHECK-NOFP: ldy %f9, 4112(%r15)
+; CHECK-NOFP: ldy %f10, 4104(%r15)
+; CHECK-NOFP: ldy %f11, 4096(%r15)
+; CHECK-NOFP: ld %f12, 4088(%r15)
+; CHECK-NOFP: ld %f13, 4080(%r15)
+; CHECK-NOFP: ld %f14, 4072(%r15)
+; CHECK-NOFP: ld %f15, 4064(%r15)
+; CHECK-NOFP: aghi %r15, 4128
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f1:
+; CHECK-FP: stmg %r11, %r15, 88(%r15)
+; CHECK-FP: aghi %r15, -4128
+; CHECK-FP: .cfi_def_cfa_offset 4288
+; CHECK-FP: lgr %r11, %r15
+; CHECK-FP: .cfi_def_cfa_register %r11
+; CHECK-FP: stdy %f8, 4120(%r11)
+; CHECK-FP: stdy %f9, 4112(%r11)
+; CHECK-FP: stdy %f10, 4104(%r11)
+; CHECK-FP: stdy %f11, 4096(%r11)
+; CHECK-FP: std %f12, 4088(%r11)
+; CHECK-FP: std %f13, 4080(%r11)
+; CHECK-FP: std %f14, 4072(%r11)
+; CHECK-FP: std %f15, 4064(%r11)
+; ...main function body...
+; CHECK-FP: ldy %f8, 4120(%r11)
+; CHECK-FP: ldy %f9, 4112(%r11)
+; CHECK-FP: ldy %f10, 4104(%r11)
+; CHECK-FP: ldy %f11, 4096(%r11)
+; CHECK-FP: ld %f12, 4088(%r11)
+; CHECK-FP: ld %f13, 4080(%r11)
+; CHECK-FP: ld %f14, 4072(%r11)
+; CHECK-FP: ld %f15, 4064(%r11)
+; CHECK-FP: lmg %r11, %r15, 4216(%r11)
+; CHECK-FP: br %r14
+ %y = alloca [487 x i64], align 8
+ %elem = getelementptr inbounds [487 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %elem
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %l8 = load volatile double *%ptr
+ %l9 = load volatile double *%ptr
+ %l10 = load volatile double *%ptr
+ %l11 = load volatile double *%ptr
+ %l12 = load volatile double *%ptr
+ %l13 = load volatile double *%ptr
+ %l14 = load volatile double *%ptr
+ %l15 = load volatile double *%ptr
+ %add0 = fadd double %l0, %l0
+ %add1 = fadd double %l1, %add0
+ %add2 = fadd double %l2, %add1
+ %add3 = fadd double %l3, %add2
+ %add4 = fadd double %l4, %add3
+ %add5 = fadd double %l5, %add4
+ %add6 = fadd double %l6, %add5
+ %add7 = fadd double %l7, %add6
+ %add8 = fadd double %l8, %add7
+ %add9 = fadd double %l9, %add8
+ %add10 = fadd double %l10, %add9
+ %add11 = fadd double %l11, %add10
+ %add12 = fadd double %l12, %add11
+ %add13 = fadd double %l13, %add12
+ %add14 = fadd double %l14, %add13
+ %add15 = fadd double %l15, %add14
+ store volatile double %add0, double *%ptr
+ store volatile double %add1, double *%ptr
+ store volatile double %add2, double *%ptr
+ store volatile double %add3, double *%ptr
+ store volatile double %add4, double *%ptr
+ store volatile double %add5, double *%ptr
+ store volatile double %add6, double *%ptr
+ store volatile double %add7, double *%ptr
+ store volatile double %add8, double *%ptr
+ store volatile double %add9, double *%ptr
+ store volatile double %add10, double *%ptr
+ store volatile double %add11, double *%ptr
+ store volatile double %add12, double *%ptr
+ store volatile double %add13, double *%ptr
+ store volatile double %add14, double *%ptr
+ store volatile double %add15, double *%ptr
+ ret void
+}
+
+; Test a frame size that requires some FPRs to be saved and loaded using
+; an indexed STD and LD while others can use the 20-bit STDY and LDY.
+; The index can be any call-clobbered GPR except %r0.
+;
+; Don't require the accesses to share the same LLILH; that would be a
+; good optimisation but is really a different test.
+;
+; As above, get a frame of size 524320 by allocating
+; (524320 - 168 - 8 * 8) / 8 = 65511 extra doublewords.
+define void @f2(double *%ptr, i64 %x) {
+; CHECK-NOFP: f2:
+; CHECK-NOFP: agfi %r15, -524320
+; CHECK-NOFP: .cfi_def_cfa_offset 524480
+; CHECK-NOFP: llilh [[INDEX:%r[1-5]]], 8
+; CHECK-NOFP: std %f8, 24([[INDEX]],%r15)
+; CHECK-NOFP: std %f9, 16({{%r[1-5]}},%r15)
+; CHECK-NOFP: std %f10, 8({{%r[1-5]}},%r15)
+; CHECK-NOFP: std %f11, 0({{%r[1-5]}},%r15)
+; CHECK-NOFP: stdy %f12, 524280(%r15)
+; CHECK-NOFP: stdy %f13, 524272(%r15)
+; CHECK-NOFP: stdy %f14, 524264(%r15)
+; CHECK-NOFP: stdy %f15, 524256(%r15)
+; CHECK-NOFP: .cfi_offset %f8, -168
+; CHECK-NOFP: .cfi_offset %f9, -176
+; CHECK-NOFP: .cfi_offset %f10, -184
+; CHECK-NOFP: .cfi_offset %f11, -192
+; CHECK-NOFP: .cfi_offset %f12, -200
+; CHECK-NOFP: .cfi_offset %f13, -208
+; CHECK-NOFP: .cfi_offset %f14, -216
+; CHECK-NOFP: .cfi_offset %f15, -224
+; ...main function body...
+; CHECK-NOFP: ld %f8, 24({{%r[1-5]}},%r15)
+; CHECK-NOFP: ld %f9, 16({{%r[1-5]}},%r15)
+; CHECK-NOFP: ld %f10, 8({{%r[1-5]}},%r15)
+; CHECK-NOFP: ld %f11, 0({{%r[1-5]}},%r15)
+; CHECK-NOFP: ldy %f12, 524280(%r15)
+; CHECK-NOFP: ldy %f13, 524272(%r15)
+; CHECK-NOFP: ldy %f14, 524264(%r15)
+; CHECK-NOFP: ldy %f15, 524256(%r15)
+; CHECK-NOFP: agfi %r15, 524320
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f2:
+; CHECK-FP: stmg %r11, %r15, 88(%r15)
+; CHECK-FP: agfi %r15, -524320
+; CHECK-FP: .cfi_def_cfa_offset 524480
+; CHECK-FP: llilh [[INDEX:%r[1-5]]], 8
+; CHECK-FP: std %f8, 24([[INDEX]],%r11)
+; CHECK-FP: std %f9, 16({{%r[1-5]}},%r11)
+; CHECK-FP: std %f10, 8({{%r[1-5]}},%r11)
+; CHECK-FP: std %f11, 0({{%r[1-5]}},%r11)
+; CHECK-FP: stdy %f12, 524280(%r11)
+; CHECK-FP: stdy %f13, 524272(%r11)
+; CHECK-FP: stdy %f14, 524264(%r11)
+; CHECK-FP: stdy %f15, 524256(%r11)
+; CHECK-FP: .cfi_offset %f8, -168
+; CHECK-FP: .cfi_offset %f9, -176
+; CHECK-FP: .cfi_offset %f10, -184
+; CHECK-FP: .cfi_offset %f11, -192
+; CHECK-FP: .cfi_offset %f12, -200
+; CHECK-FP: .cfi_offset %f13, -208
+; CHECK-FP: .cfi_offset %f14, -216
+; CHECK-FP: .cfi_offset %f15, -224
+; ...main function body...
+; CHECK-FP: ld %f8, 24({{%r[1-5]}},%r11)
+; CHECK-FP: ld %f9, 16({{%r[1-5]}},%r11)
+; CHECK-FP: ld %f10, 8({{%r[1-5]}},%r11)
+; CHECK-FP: ld %f11, 0({{%r[1-5]}},%r11)
+; CHECK-FP: ldy %f12, 524280(%r11)
+; CHECK-FP: ldy %f13, 524272(%r11)
+; CHECK-FP: ldy %f14, 524264(%r11)
+; CHECK-FP: ldy %f15, 524256(%r11)
+; CHECK-FP: aghi %r11, 128
+; CHECK-FP: lmg %r11, %r15, 524280(%r11)
+; CHECK-FP: br %r14
+ %y = alloca [65511 x i64], align 8
+ %elem = getelementptr inbounds [65511 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %elem
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %l8 = load volatile double *%ptr
+ %l9 = load volatile double *%ptr
+ %l10 = load volatile double *%ptr
+ %l11 = load volatile double *%ptr
+ %l12 = load volatile double *%ptr
+ %l13 = load volatile double *%ptr
+ %l14 = load volatile double *%ptr
+ %l15 = load volatile double *%ptr
+ %add0 = fadd double %l0, %l0
+ %add1 = fadd double %l1, %add0
+ %add2 = fadd double %l2, %add1
+ %add3 = fadd double %l3, %add2
+ %add4 = fadd double %l4, %add3
+ %add5 = fadd double %l5, %add4
+ %add6 = fadd double %l6, %add5
+ %add7 = fadd double %l7, %add6
+ %add8 = fadd double %l8, %add7
+ %add9 = fadd double %l9, %add8
+ %add10 = fadd double %l10, %add9
+ %add11 = fadd double %l11, %add10
+ %add12 = fadd double %l12, %add11
+ %add13 = fadd double %l13, %add12
+ %add14 = fadd double %l14, %add13
+ %add15 = fadd double %l15, %add14
+ store volatile double %add0, double *%ptr
+ store volatile double %add1, double *%ptr
+ store volatile double %add2, double *%ptr
+ store volatile double %add3, double *%ptr
+ store volatile double %add4, double *%ptr
+ store volatile double %add5, double *%ptr
+ store volatile double %add6, double *%ptr
+ store volatile double %add7, double *%ptr
+ store volatile double %add8, double *%ptr
+ store volatile double %add9, double *%ptr
+ store volatile double %add10, double *%ptr
+ store volatile double %add11, double *%ptr
+ store volatile double %add12, double *%ptr
+ store volatile double %add13, double *%ptr
+ store volatile double %add14, double *%ptr
+ store volatile double %add15, double *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-08.ll b/test/CodeGen/SystemZ/frame-08.ll
new file mode 100644
index 000000000000..6cf6378268f4
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-08.ll
@@ -0,0 +1,277 @@
+; Test the saving and restoring of GPRs in large frames.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; This is the largest frame size that can use a plain LMG for %r6 and above.
+; It is big enough to require an emergency spill slot at 160(%r15),
+; so get a frame of size 524232 by allocating (524232 - 168) / 8 = 65508
+; extra doublewords.
+define void @f1(i32 *%ptr, i64 %x) {
+; CHECK: f1:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK: .cfi_offset %r6, -112
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -524232
+; CHECK: .cfi_def_cfa_offset 524392
+; ...main function body...
+; CHECK-NOT: ag
+; CHECK: lmg %r6, %r15, 524280(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l6 = load volatile i32 *%ptr
+ %l7 = load volatile i32 *%ptr
+ %l8 = load volatile i32 *%ptr
+ %l9 = load volatile i32 *%ptr
+ %l10 = load volatile i32 *%ptr
+ %l11 = load volatile i32 *%ptr
+ %l12 = load volatile i32 *%ptr
+ %l13 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add4 = add i32 %l4, %add1
+ %add5 = add i32 %l5, %add4
+ %add6 = add i32 %l6, %add5
+ %add7 = add i32 %l7, %add6
+ %add8 = add i32 %l8, %add7
+ %add9 = add i32 %l9, %add8
+ %add10 = add i32 %l10, %add9
+ %add11 = add i32 %l11, %add10
+ %add12 = add i32 %l12, %add11
+ %add13 = add i32 %l13, %add12
+ %add14 = add i32 %l14, %add13
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add6, i32 *%ptr
+ store volatile i32 %add7, i32 *%ptr
+ store volatile i32 %add8, i32 *%ptr
+ store volatile i32 %add9, i32 *%ptr
+ store volatile i32 %add10, i32 *%ptr
+ store volatile i32 %add11, i32 *%ptr
+ store volatile i32 %add12, i32 *%ptr
+ store volatile i32 %add13, i32 *%ptr
+ store volatile i32 %add14, i32 *%ptr
+ %y = alloca [65508 x i64], align 8
+ %entry = getelementptr inbounds [65508 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %entry
+ ret void
+}
+
+; This is the largest frame size that can use a plain LMG for %r14 and above
+; It is big enough to require an emergency spill slot at 160(%r15),
+; so get a frame of size 524168 by allocating (524168 - 168) / 8 = 65500
+; extra doublewords.
+define void @f2(i32 *%ptr, i64 %x) {
+; CHECK: f2:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -524168
+; CHECK: .cfi_def_cfa_offset 524328
+; ...main function body...
+; CHECK-NOT: ag
+; CHECK: lmg %r14, %r15, 524280(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add4 = add i32 %l4, %add1
+ %add5 = add i32 %l5, %add4
+ %add14 = add i32 %l14, %add5
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add14, i32 *%ptr
+ %y = alloca [65500 x i64], align 8
+ %entry = getelementptr inbounds [65500 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %entry
+ ret void
+}
+
+; Like f1 but with a frame that is 8 bytes bigger. This is the smallest
+; frame size that needs two instructions to perform the final LMG for
+; %r6 and above.
+define void @f3(i32 *%ptr, i64 %x) {
+; CHECK: f3:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK: .cfi_offset %r6, -112
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -524240
+; CHECK: .cfi_def_cfa_offset 524400
+; ...main function body...
+; CHECK: aghi %r15, 8
+; CHECK: lmg %r6, %r15, 524280(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l6 = load volatile i32 *%ptr
+ %l7 = load volatile i32 *%ptr
+ %l8 = load volatile i32 *%ptr
+ %l9 = load volatile i32 *%ptr
+ %l10 = load volatile i32 *%ptr
+ %l11 = load volatile i32 *%ptr
+ %l12 = load volatile i32 *%ptr
+ %l13 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add4 = add i32 %l4, %add1
+ %add5 = add i32 %l5, %add4
+ %add6 = add i32 %l6, %add5
+ %add7 = add i32 %l7, %add6
+ %add8 = add i32 %l8, %add7
+ %add9 = add i32 %l9, %add8
+ %add10 = add i32 %l10, %add9
+ %add11 = add i32 %l11, %add10
+ %add12 = add i32 %l12, %add11
+ %add13 = add i32 %l13, %add12
+ %add14 = add i32 %l14, %add13
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add6, i32 *%ptr
+ store volatile i32 %add7, i32 *%ptr
+ store volatile i32 %add8, i32 *%ptr
+ store volatile i32 %add9, i32 *%ptr
+ store volatile i32 %add10, i32 *%ptr
+ store volatile i32 %add11, i32 *%ptr
+ store volatile i32 %add12, i32 *%ptr
+ store volatile i32 %add13, i32 *%ptr
+ store volatile i32 %add14, i32 *%ptr
+ %y = alloca [65509 x i64], align 8
+ %entry = getelementptr inbounds [65509 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %entry
+ ret void
+}
+
+; Like f2 but with a frame that is 8 bytes bigger. This is the smallest
+; frame size that needs two instructions to perform the final LMG for
+; %r14 and %r15.
+define void @f4(i32 *%ptr, i64 %x) {
+; CHECK: f4:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -524176
+; CHECK: .cfi_def_cfa_offset 524336
+; ...main function body...
+; CHECK: aghi %r15, 8
+; CHECK: lmg %r14, %r15, 524280(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add4 = add i32 %l4, %add1
+ %add5 = add i32 %l5, %add4
+ %add14 = add i32 %l14, %add5
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add14, i32 *%ptr
+ %y = alloca [65501 x i64], align 8
+ %entry = getelementptr inbounds [65501 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %entry
+ ret void
+}
+
+; This is the largest frame size for which the prepatory increment for
+; "lmg %r14, %r15, ..." can be done using AGHI.
+define void @f5(i32 *%ptr, i64 %x) {
+; CHECK: f5:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -556928
+; CHECK: .cfi_def_cfa_offset 557088
+; ...main function body...
+; CHECK: aghi %r15, 32760
+; CHECK: lmg %r14, %r15, 524280(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add4 = add i32 %l4, %add1
+ %add5 = add i32 %l5, %add4
+ %add14 = add i32 %l14, %add5
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add14, i32 *%ptr
+ %y = alloca [69595 x i64], align 8
+ %entry = getelementptr inbounds [69595 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %entry
+ ret void
+}
+
+; This is the smallest frame size for which the prepatory increment for
+; "lmg %r14, %r15, ..." needs to be done using AGFI.
+define void @f6(i32 *%ptr, i64 %x) {
+; CHECK: f6:
+; CHECK: stmg %r14, %r15, 112(%r15)
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -556936
+; CHECK: .cfi_def_cfa_offset 557096
+; ...main function body...
+; CHECK: agfi %r15, 32768
+; CHECK: lmg %r14, %r15, 524280(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add4 = add i32 %l4, %add1
+ %add5 = add i32 %l5, %add4
+ %add14 = add i32 %l14, %add5
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add14, i32 *%ptr
+ %y = alloca [69596 x i64], align 8
+ %entry = getelementptr inbounds [69596 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %entry
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-09.ll b/test/CodeGen/SystemZ/frame-09.ll
new file mode 100644
index 000000000000..eac633623c5f
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-09.ll
@@ -0,0 +1,153 @@
+; Test the handling of the frame pointer (%r11).
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck %s
+
+; We should always initialise %r11 when FP elimination is disabled.
+; We don't need to allocate any more than the caller-provided 160-byte
+; area though.
+define i32 @f1(i32 %x) {
+; CHECK: f1:
+; CHECK: stmg %r11, %r15, 88(%r15)
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r15, -40
+; CHECK-NOT: ag
+; CHECK: lgr %r11, %r15
+; CHECK: .cfi_def_cfa_register %r11
+; CHECK: lmg %r11, %r15, 88(%r11)
+; CHECK: br %r14
+ %y = add i32 %x, 1
+ ret i32 %y
+}
+
+; Make sure that frame accesses after the initial allocation are relative
+; to %r11 rather than %r15.
+define void @f2(i64 %x) {
+; CHECK: f2:
+; CHECK: stmg %r11, %r15, 88(%r15)
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r15, -40
+; CHECK: aghi %r15, -168
+; CHECK: .cfi_def_cfa_offset 328
+; CHECK: lgr %r11, %r15
+; CHECK: .cfi_def_cfa_register %r11
+; CHECK: stg %r2, 160(%r11)
+; CHECK: lmg %r11, %r15, 256(%r11)
+; CHECK: br %r14
+ %y = alloca i64, align 8
+ store volatile i64 %x, i64* %y
+ ret void
+}
+
+; This function should require all GPRs but no other spill slots.
+; It shouldn't need to allocate its own frame.
+define void @f3(i32 *%ptr) {
+; CHECK: f3:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK-NOT: %r15
+; CHECK-NOT: %r11
+; CHECK: .cfi_offset %r6, -112
+; CHECK: .cfi_offset %r7, -104
+; CHECK: .cfi_offset %r8, -96
+; CHECK: .cfi_offset %r9, -88
+; CHECK: .cfi_offset %r10, -80
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r12, -64
+; CHECK: .cfi_offset %r13, -56
+; CHECK: .cfi_offset %r14, -48
+; CHECK: .cfi_offset %r15, -40
+; CHECK-NOT: ag
+; CHECK: lgr %r11, %r15
+; CHECK: .cfi_def_cfa_register %r11
+; ...main function body...
+; CHECK-NOT: %r15
+; CHECK-NOT: %r11
+; CHECK: st {{.*}}, 4(%r2)
+; CHECK: lmg %r6, %r15, 48(%r11)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l3 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l6 = load volatile i32 *%ptr
+ %l7 = load volatile i32 *%ptr
+ %l8 = load volatile i32 *%ptr
+ %l9 = load volatile i32 *%ptr
+ %l10 = load volatile i32 *%ptr
+ %l12 = load volatile i32 *%ptr
+ %l13 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %add0 = add i32 %l0, %l0
+ %add1 = add i32 %l1, %add0
+ %add3 = add i32 %l3, %add1
+ %add4 = add i32 %l4, %add3
+ %add5 = add i32 %l5, %add4
+ %add6 = add i32 %l6, %add5
+ %add7 = add i32 %l7, %add6
+ %add8 = add i32 %l8, %add7
+ %add9 = add i32 %l9, %add8
+ %add10 = add i32 %l10, %add9
+ %add12 = add i32 %l12, %add10
+ %add13 = add i32 %l13, %add12
+ %add14 = add i32 %l14, %add13
+ store volatile i32 %add0, i32 *%ptr
+ store volatile i32 %add1, i32 *%ptr
+ store volatile i32 %add3, i32 *%ptr
+ store volatile i32 %add4, i32 *%ptr
+ store volatile i32 %add5, i32 *%ptr
+ store volatile i32 %add6, i32 *%ptr
+ store volatile i32 %add7, i32 *%ptr
+ store volatile i32 %add8, i32 *%ptr
+ store volatile i32 %add9, i32 *%ptr
+ store volatile i32 %add10, i32 *%ptr
+ store volatile i32 %add12, i32 *%ptr
+ store volatile i32 %add13, i32 *%ptr
+ %final = getelementptr i32 *%ptr, i32 1
+ store volatile i32 %add14, i32 *%final
+ ret void
+}
+
+; The largest frame for which the LMG is in range. This frame has an
+; emergency spill slot at 160(%r11), so create a frame of size 524192
+; by allocating (524192 - 168) / 8 = 65503 doublewords.
+define void @f4(i64 %x) {
+; CHECK: f4:
+; CHECK: stmg %r11, %r15, 88(%r15)
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -524192
+; CHECK: .cfi_def_cfa_offset 524352
+; CHECK: lgr %r11, %r15
+; CHECK: .cfi_def_cfa_register %r11
+; CHECK: stg %r2, 168(%r11)
+; CHECK-NOT: ag
+; CHECK: lmg %r11, %r15, 524280(%r11)
+; CHECK: br %r14
+ %y = alloca [65503 x i64], align 8
+ %ptr = getelementptr inbounds [65503 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; The next frame size larger than f4.
+define void @f5(i64 %x) {
+; CHECK: f5:
+; CHECK: stmg %r11, %r15, 88(%r15)
+; CHECK: .cfi_offset %r11, -72
+; CHECK: .cfi_offset %r15, -40
+; CHECK: agfi %r15, -524200
+; CHECK: .cfi_def_cfa_offset 524360
+; CHECK: lgr %r11, %r15
+; CHECK: .cfi_def_cfa_register %r11
+; CHECK: stg %r2, 168(%r11)
+; CHECK: aghi %r11, 8
+; CHECK: lmg %r11, %r15, 524280(%r11)
+; CHECK: br %r14
+ %y = alloca [65504 x i64], align 8
+ %ptr = getelementptr inbounds [65504 x i64]* %y, i64 0, i64 0
+ store volatile i64 %x, i64* %ptr
+ ret void
+}
+
+; The tests above establish that %r11 is handled like %r15 for LMG.
+; Rely on the %r15-based tests in frame-08.ll for other cases.
diff --git a/test/CodeGen/SystemZ/frame-10.ll b/test/CodeGen/SystemZ/frame-10.ll
new file mode 100644
index 000000000000..399a4125933d
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-10.ll
@@ -0,0 +1,14 @@
+; Test the stacksave builtin.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i8 *@llvm.stacksave()
+
+define void @f1(i8 **%dest) {
+; CHECK: f1:
+; CHECK: stg %r15, 0(%r2)
+; CHECK: br %r14
+ %addr = call i8 *@llvm.stacksave()
+ store volatile i8 *%addr, i8 **%dest
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-11.ll b/test/CodeGen/SystemZ/frame-11.ll
new file mode 100644
index 000000000000..84222056e6d0
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-11.ll
@@ -0,0 +1,18 @@
+; Test the stackrestore builtin.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare void @llvm.stackrestore(i8 *)
+
+; we should use a frame pointer and tear down the frame based on %r11
+; rather than %r15.
+define void @f1(i8 *%src) {
+; CHECK: f1:
+; CHECK: stmg %r11, %r15, 88(%r15)
+; CHECK: lgr %r11, %r15
+; CHECK: lgr %r15, %r2
+; CHECK: lmg %r11, %r15, 88(%r11)
+; CHECK: br %r14
+ call void @llvm.stackrestore(i8 *%src)
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-13.ll b/test/CodeGen/SystemZ/frame-13.ll
new file mode 100644
index 000000000000..fa6b845ea6f2
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-13.ll
@@ -0,0 +1,299 @@
+; Test the handling of base + 12-bit displacement addresses for large frames,
+; in cases where no 20-bit form exists.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck -check-prefix=CHECK-NOFP %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+
+; This file tests what happens when a displacement is converted from
+; being relative to the start of a frame object to being relative to
+; the frame itself. In some cases the test is only possible if two
+; objects are allocated.
+;
+; Rather than rely on a particular order for those objects, the tests
+; instead allocate two objects of the same size and apply the test to
+; both of them. For consistency, all tests follow this model, even if
+; one object would actually be enough.
+
+; First check the highest in-range offset after conversion, which is 4092
+; for word-addressing instructions like MVHI.
+;
+; The last in-range doubleword offset is 4088. Since the frame has an
+; emergency spill slot at 160(%r15), the amount that we need to allocate
+; in order to put another object at offset 4088 is (4088 - 168) / 4 = 980
+; words.
+define void @f1() {
+; CHECK-NOFP: f1:
+; CHECK-NOFP: mvhi 4092(%r15), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f1:
+; CHECK-FP: mvhi 4092(%r11), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x i32], align 8
+ %region2 = alloca [980 x i32], align 8
+ %ptr1 = getelementptr inbounds [980 x i32]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [980 x i32]* %region2, i64 0, i64 1
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Test the first out-of-range offset. We cannot use an index register here.
+define void @f2() {
+; CHECK-NOFP: f2:
+; CHECK-NOFP: lay %r1, 4096(%r15)
+; CHECK-NOFP: mvhi 0(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f2:
+; CHECK-FP: lay %r1, 4096(%r11)
+; CHECK-FP: mvhi 0(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x i32], align 8
+ %region2 = alloca [980 x i32], align 8
+ %ptr1 = getelementptr inbounds [980 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [980 x i32]* %region2, i64 0, i64 2
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Test the next offset after that.
+define void @f3() {
+; CHECK-NOFP: f3:
+; CHECK-NOFP: lay %r1, 4096(%r15)
+; CHECK-NOFP: mvhi 4(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f3:
+; CHECK-FP: lay %r1, 4096(%r11)
+; CHECK-FP: mvhi 4(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x i32], align 8
+ %region2 = alloca [980 x i32], align 8
+ %ptr1 = getelementptr inbounds [980 x i32]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [980 x i32]* %region2, i64 0, i64 3
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Add 4096 bytes (1024 words) to the size of each object and repeat.
+define void @f4() {
+; CHECK-NOFP: f4:
+; CHECK-NOFP: lay %r1, 4096(%r15)
+; CHECK-NOFP: mvhi 4092(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f4:
+; CHECK-FP: lay %r1, 4096(%r11)
+; CHECK-FP: mvhi 4092(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [2004 x i32], align 8
+ %region2 = alloca [2004 x i32], align 8
+ %ptr1 = getelementptr inbounds [2004 x i32]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [2004 x i32]* %region2, i64 0, i64 1
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; ...as above.
+define void @f5() {
+; CHECK-NOFP: f5:
+; CHECK-NOFP: lay %r1, 8192(%r15)
+; CHECK-NOFP: mvhi 0(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f5:
+; CHECK-FP: lay %r1, 8192(%r11)
+; CHECK-FP: mvhi 0(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [2004 x i32], align 8
+ %region2 = alloca [2004 x i32], align 8
+ %ptr1 = getelementptr inbounds [2004 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [2004 x i32]* %region2, i64 0, i64 2
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; ...as above.
+define void @f6() {
+; CHECK-NOFP: f6:
+; CHECK-NOFP: lay %r1, 8192(%r15)
+; CHECK-NOFP: mvhi 4(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f6:
+; CHECK-FP: lay %r1, 8192(%r11)
+; CHECK-FP: mvhi 4(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [2004 x i32], align 8
+ %region2 = alloca [2004 x i32], align 8
+ %ptr1 = getelementptr inbounds [2004 x i32]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [2004 x i32]* %region2, i64 0, i64 3
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Now try an offset of 4092 from the start of the object, with the object
+; being at offset 8192. This time we need objects of (8192 - 168) / 4 = 2006
+; words.
+define void @f7() {
+; CHECK-NOFP: f7:
+; CHECK-NOFP: lay %r1, 8192(%r15)
+; CHECK-NOFP: mvhi 4092(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f7:
+; CHECK-FP: lay %r1, 8192(%r11)
+; CHECK-FP: mvhi 4092(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [2006 x i32], align 8
+ %region2 = alloca [2006 x i32], align 8
+ %ptr1 = getelementptr inbounds [2006 x i32]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2006 x i32]* %region2, i64 0, i64 1023
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Keep the object-relative offset the same but bump the size of the
+; objects by one doubleword.
+define void @f8() {
+; CHECK-NOFP: f8:
+; CHECK-NOFP: lay %r1, 12288(%r15)
+; CHECK-NOFP: mvhi 4(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f8:
+; CHECK-FP: lay %r1, 12288(%r11)
+; CHECK-FP: mvhi 4(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [2008 x i32], align 8
+ %region2 = alloca [2008 x i32], align 8
+ %ptr1 = getelementptr inbounds [2008 x i32]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2008 x i32]* %region2, i64 0, i64 1023
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Check a case where the original displacement is out of range. The backend
+; should force an LAY from the outset. We don't yet do any kind of anchor
+; optimization, so there should be no offset on the MVHI itself.
+define void @f9() {
+; CHECK-NOFP: f9:
+; CHECK-NOFP: lay %r1, 12296(%r15)
+; CHECK-NOFP: mvhi 0(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f9:
+; CHECK-FP: lay %r1, 12296(%r11)
+; CHECK-FP: mvhi 0(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [2008 x i32], align 8
+ %region2 = alloca [2008 x i32], align 8
+ %ptr1 = getelementptr inbounds [2008 x i32]* %region1, i64 0, i64 1024
+ %ptr2 = getelementptr inbounds [2008 x i32]* %region2, i64 0, i64 1024
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ ret void
+}
+
+; Repeat f2 in a case that needs the emergency spill slot (because all
+; call-clobbered registers are live and no call-saved ones have been
+; allocated).
+define void @f10(i32 *%vptr) {
+; CHECK-NOFP: f10:
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: lay [[REGISTER]], 4096(%r15)
+; CHECK-NOFP: mvhi 0([[REGISTER]]), 42
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f10:
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: lay [[REGISTER]], 4096(%r11)
+; CHECK-FP: mvhi 0([[REGISTER]]), 42
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: br %r14
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i3 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %region1 = alloca [980 x i32], align 8
+ %region2 = alloca [980 x i32], align 8
+ %ptr1 = getelementptr inbounds [980 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [980 x i32]* %region2, i64 0, i64 2
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i3, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ ret void
+}
+
+; And again with maximum register pressure. The only spill slot that the
+; NOFP case needs is the emergency one, so the offsets are the same as for f2.
+; However, the FP case uses %r11 as the frame pointer and must therefore
+; spill a second register. This leads to an extra displacement of 8.
+define void @f11(i32 *%vptr) {
+; CHECK-NOFP: f11:
+; CHECK-NOFP: stmg %r6, %r15,
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: lay [[REGISTER]], 4096(%r15)
+; CHECK-NOFP: mvhi 0([[REGISTER]]), 42
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: lmg %r6, %r15,
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f11:
+; CHECK-FP: stmg %r6, %r15,
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: lay [[REGISTER]], 4096(%r11)
+; CHECK-FP: mvhi 8([[REGISTER]]), 42
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: lmg %r6, %r15,
+; CHECK-FP: br %r14
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i3 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %i6 = load volatile i32 *%vptr
+ %i7 = load volatile i32 *%vptr
+ %i8 = load volatile i32 *%vptr
+ %i9 = load volatile i32 *%vptr
+ %i10 = load volatile i32 *%vptr
+ %i11 = load volatile i32 *%vptr
+ %i12 = load volatile i32 *%vptr
+ %i13 = load volatile i32 *%vptr
+ %i14 = load volatile i32 *%vptr
+ %region1 = alloca [980 x i32], align 8
+ %region2 = alloca [980 x i32], align 8
+ %ptr1 = getelementptr inbounds [980 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [980 x i32]* %region2, i64 0, i64 2
+ store volatile i32 42, i32 *%ptr1
+ store volatile i32 42, i32 *%ptr2
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i3, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ store volatile i32 %i6, i32 *%vptr
+ store volatile i32 %i7, i32 *%vptr
+ store volatile i32 %i8, i32 *%vptr
+ store volatile i32 %i9, i32 *%vptr
+ store volatile i32 %i10, i32 *%vptr
+ store volatile i32 %i11, i32 *%vptr
+ store volatile i32 %i12, i32 *%vptr
+ store volatile i32 %i13, i32 *%vptr
+ store volatile i32 %i14, i32 *%vptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-14.ll b/test/CodeGen/SystemZ/frame-14.ll
new file mode 100644
index 000000000000..d8ff0a54a761
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-14.ll
@@ -0,0 +1,322 @@
+; Test the handling of base + displacement addresses for large frames,
+; in cases where both 12-bit and 20-bit displacements are allowed.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck -check-prefix=CHECK-NOFP %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+
+; This file tests what happens when a displacement is converted from
+; being relative to the start of a frame object to being relative to
+; the frame itself. In some cases the test is only possible if two
+; objects are allocated.
+;
+; Rather than rely on a particular order for those objects, the tests
+; instead allocate two objects of the same size and apply the test to
+; both of them. For consistency, all tests follow this model, even if
+; one object would actually be enough.
+
+; First check the highest offset that is in range of the 12-bit form.
+;
+; The last in-range doubleword offset is 4088. Since the frame has an
+; emergency spill slot at 160(%r15), the amount that we need to allocate
+; in order to put another object at offset 4088 is 4088 - 168 = 3920 bytes.
+define void @f1() {
+; CHECK-NOFP: f1:
+; CHECK-NOFP: mvi 4095(%r15), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f1:
+; CHECK-FP: mvi 4095(%r11), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [3920 x i8], align 8
+ %region2 = alloca [3920 x i8], align 8
+ %ptr1 = getelementptr inbounds [3920 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [3920 x i8]* %region2, i64 0, i64 7
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Test the first offset that is out-of-range of the 12-bit form.
+define void @f2() {
+; CHECK-NOFP: f2:
+; CHECK-NOFP: mviy 4096(%r15), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f2:
+; CHECK-FP: mviy 4096(%r11), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [3920 x i8], align 8
+ %region2 = alloca [3920 x i8], align 8
+ %ptr1 = getelementptr inbounds [3920 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [3920 x i8]* %region2, i64 0, i64 8
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Test the last offset that is in range of the 20-bit form.
+;
+; The last in-range doubleword offset is 524280, so by the same reasoning
+; as above, we need to allocate objects of 524280 - 168 = 524122 bytes.
+define void @f3() {
+; CHECK-NOFP: f3:
+; CHECK-NOFP: mviy 524287(%r15), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f3:
+; CHECK-FP: mviy 524287(%r11), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 7
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Test the first out-of-range offset. We can't use an index register here,
+; and the offset is also out of LAY's range, so expect a constant load
+; followed by an addition.
+define void @f4() {
+; CHECK-NOFP: f4:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: agr %r1, %r15
+; CHECK-NOFP: mvi 0(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f4:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: agr %r1, %r11
+; CHECK-FP: mvi 0(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 8
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Add 4095 to the previous offset, to test the other end of the MVI range.
+; The instruction will actually be STCY before frame lowering.
+define void @f5() {
+; CHECK-NOFP: f5:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: agr %r1, %r15
+; CHECK-NOFP: mvi 4095(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f5:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: agr %r1, %r11
+; CHECK-FP: mvi 4095(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 4103
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 4103
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Test the next offset after that, which uses MVIY instead of MVI.
+define void @f6() {
+; CHECK-NOFP: f6:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: agr %r1, %r15
+; CHECK-NOFP: mviy 4096(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f6:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: agr %r1, %r11
+; CHECK-FP: mviy 4096(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 4104
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 4104
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Now try an offset of 524287 from the start of the object, with the
+; object being at offset 1048576 (1 << 20). The backend prefers to create
+; anchors 0x10000 bytes apart, so that the high part can be loaded using
+; LLILH while still using MVI in more cases than 0x40000 anchors would.
+define void @f7() {
+; CHECK-NOFP: f7:
+; CHECK-NOFP: llilh %r1, 23
+; CHECK-NOFP: agr %r1, %r15
+; CHECK-NOFP: mviy 65535(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f7:
+; CHECK-FP: llilh %r1, 23
+; CHECK-FP: agr %r1, %r11
+; CHECK-FP: mviy 65535(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [1048408 x i8], align 8
+ %region2 = alloca [1048408 x i8], align 8
+ %ptr1 = getelementptr inbounds [1048408 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048408 x i8]* %region2, i64 0, i64 524287
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Keep the object-relative offset the same but bump the size of the
+; objects by one doubleword.
+define void @f8() {
+; CHECK-NOFP: f8:
+; CHECK-NOFP: llilh %r1, 24
+; CHECK-NOFP: agr %r1, %r15
+; CHECK-NOFP: mvi 7(%r1), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f8:
+; CHECK-FP: llilh %r1, 24
+; CHECK-FP: agr %r1, %r11
+; CHECK-FP: mvi 7(%r1), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [1048416 x i8], align 8
+ %region2 = alloca [1048416 x i8], align 8
+ %ptr1 = getelementptr inbounds [1048416 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048416 x i8]* %region2, i64 0, i64 524287
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Check a case where the original displacement is out of range. The backend
+; should force separate address logic from the outset. We don't yet do any
+; kind of anchor optimization, so there should be no offset on the MVI itself.
+;
+; Before frame lowering this is an LA followed by the AGFI seen below.
+; The LA then gets lowered into the LLILH/LA form. The exact sequence
+; isn't that important though.
+define void @f9() {
+; CHECK-NOFP: f9:
+; CHECK-NOFP: llilh [[R1:%r[1-5]]], 16
+; CHECK-NOFP: la [[R2:%r[1-5]]], 8([[R1]],%r15)
+; CHECK-NOFP: agfi [[R2]], 524288
+; CHECK-NOFP: mvi 0([[R2]]), 42
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f9:
+; CHECK-FP: llilh [[R1:%r[1-5]]], 16
+; CHECK-FP: la [[R2:%r[1-5]]], 8([[R1]],%r11)
+; CHECK-FP: agfi [[R2]], 524288
+; CHECK-FP: mvi 0([[R2]]), 42
+; CHECK-FP: br %r14
+ %region1 = alloca [1048416 x i8], align 8
+ %region2 = alloca [1048416 x i8], align 8
+ %ptr1 = getelementptr inbounds [1048416 x i8]* %region1, i64 0, i64 524288
+ %ptr2 = getelementptr inbounds [1048416 x i8]* %region2, i64 0, i64 524288
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ ret void
+}
+
+; Repeat f4 in a case that needs the emergency spill slot (because all
+; call-clobbered registers are live and no call-saved ones have been
+; allocated).
+define void @f10(i32 *%vptr) {
+; CHECK-NOFP: f10:
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: llilh [[REGISTER]], 8
+; CHECK-NOFP: agr [[REGISTER]], %r15
+; CHECK-NOFP: mvi 0([[REGISTER]]), 42
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f10:
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: llilh [[REGISTER]], 8
+; CHECK-FP: agr [[REGISTER]], %r11
+; CHECK-FP: mvi 0([[REGISTER]]), 42
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: br %r14
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i3 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 8
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i3, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ ret void
+}
+
+; And again with maximum register pressure. The only spill slot that the
+; NOFP case needs is the emergency one, so the offsets are the same as for f4.
+; However, the FP case uses %r11 as the frame pointer and must therefore
+; spill a second register. This leads to an extra displacement of 8.
+define void @f11(i32 *%vptr) {
+; CHECK-NOFP: f11:
+; CHECK-NOFP: stmg %r6, %r15,
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: llilh [[REGISTER]], 8
+; CHECK-NOFP: agr [[REGISTER]], %r15
+; CHECK-NOFP: mvi 0([[REGISTER]]), 42
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: lmg %r6, %r15,
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f11:
+; CHECK-FP: stmg %r6, %r15,
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: llilh [[REGISTER]], 8
+; CHECK-FP: agr [[REGISTER]], %r11
+; CHECK-FP: mvi 8([[REGISTER]]), 42
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: lmg %r6, %r15,
+; CHECK-FP: br %r14
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i3 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %i6 = load volatile i32 *%vptr
+ %i7 = load volatile i32 *%vptr
+ %i8 = load volatile i32 *%vptr
+ %i9 = load volatile i32 *%vptr
+ %i10 = load volatile i32 *%vptr
+ %i11 = load volatile i32 *%vptr
+ %i12 = load volatile i32 *%vptr
+ %i13 = load volatile i32 *%vptr
+ %i14 = load volatile i32 *%vptr
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 8
+ store volatile i8 42, i8 *%ptr1
+ store volatile i8 42, i8 *%ptr2
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i3, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ store volatile i32 %i6, i32 *%vptr
+ store volatile i32 %i7, i32 *%vptr
+ store volatile i32 %i8, i32 *%vptr
+ store volatile i32 %i9, i32 *%vptr
+ store volatile i32 %i10, i32 *%vptr
+ store volatile i32 %i11, i32 *%vptr
+ store volatile i32 %i12, i32 *%vptr
+ store volatile i32 %i13, i32 *%vptr
+ store volatile i32 %i14, i32 *%vptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-15.ll b/test/CodeGen/SystemZ/frame-15.ll
new file mode 100644
index 000000000000..bc87e174d0b6
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-15.ll
@@ -0,0 +1,352 @@
+; Test the handling of base + index + 12-bit displacement addresses for
+; large frames, in cases where no 20-bit form exists.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck -check-prefix=CHECK-NOFP %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+
+declare void @foo(float *%ptr1, float *%ptr2)
+
+; This file tests what happens when a displacement is converted from
+; being relative to the start of a frame object to being relative to
+; the frame itself. In some cases the test is only possible if two
+; objects are allocated.
+;
+; Rather than rely on a particular order for those objects, the tests
+; instead allocate two objects of the same size and apply the test to
+; both of them. For consistency, all tests follow this model, even if
+; one object would actually be enough.
+
+; First check the highest in-range offset after conversion, which is 4092
+; for word-addressing instructions like LDEB.
+;
+; The last in-range doubleword offset is 4088. Since the frame has an
+; emergency spill slot at 160(%r15), the amount that we need to allocate
+; in order to put another object at offset 4088 is (4088 - 168) / 4 = 980
+; words.
+define void @f1(double *%dst) {
+; CHECK-NOFP: f1:
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 4092(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f1:
+; CHECK-FP: ldeb {{%f[0-7]}}, 4092(%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x float], align 8
+ %region2 = alloca [980 x float], align 8
+ %start1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 1
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Test the first out-of-range offset.
+define void @f2(double *%dst) {
+; CHECK-NOFP: f2:
+; CHECK-NOFP: lghi %r1, 4096
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 0(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f2:
+; CHECK-FP: lghi %r1, 4096
+; CHECK-FP: ldeb {{%f[0-7]}}, 0(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x float], align 8
+ %region2 = alloca [980 x float], align 8
+ %start1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 2
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Test the next offset after that.
+define void @f3(double *%dst) {
+; CHECK-NOFP: f3:
+; CHECK-NOFP: lghi %r1, 4096
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 4(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f3:
+; CHECK-FP: lghi %r1, 4096
+; CHECK-FP: ldeb {{%f[0-7]}}, 4(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x float], align 8
+ %region2 = alloca [980 x float], align 8
+ %start1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 3
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Add 4096 bytes (1024 words) to the size of each object and repeat.
+define void @f4(double *%dst) {
+; CHECK-NOFP: f4:
+; CHECK-NOFP: lghi %r1, 4096
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 4092(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f4:
+; CHECK-FP: lghi %r1, 4096
+; CHECK-FP: ldeb {{%f[0-7]}}, 4092(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [2004 x float], align 8
+ %region2 = alloca [2004 x float], align 8
+ %start1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 1
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; ...as above.
+define void @f5(double *%dst) {
+; CHECK-NOFP: f5:
+; CHECK-NOFP: lghi %r1, 8192
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 0(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f5:
+; CHECK-FP: lghi %r1, 8192
+; CHECK-FP: ldeb {{%f[0-7]}}, 0(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [2004 x float], align 8
+ %region2 = alloca [2004 x float], align 8
+ %start1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 2
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; ...as above.
+define void @f6(double *%dst) {
+; CHECK-NOFP: f6:
+; CHECK-NOFP: lghi %r1, 8192
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 4(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f6:
+; CHECK-FP: lghi %r1, 8192
+; CHECK-FP: ldeb {{%f[0-7]}}, 4(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [2004 x float], align 8
+ %region2 = alloca [2004 x float], align 8
+ %start1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 3
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Now try an offset of 4092 from the start of the object, with the object
+; being at offset 8192. This time we need objects of (8192 - 168) / 4 = 2006
+; words.
+define void @f7(double *%dst) {
+; CHECK-NOFP: f7:
+; CHECK-NOFP: lghi %r1, 8192
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 4092(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f7:
+; CHECK-FP: lghi %r1, 8192
+; CHECK-FP: ldeb {{%f[0-7]}}, 4092(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [2006 x float], align 8
+ %region2 = alloca [2006 x float], align 8
+ %start1 = getelementptr inbounds [2006 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2006 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [2006 x float]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2006 x float]* %region2, i64 0, i64 1023
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Keep the object-relative offset the same but bump the size of the
+; objects by one doubleword.
+define void @f8(double *%dst) {
+; CHECK-NOFP: f8:
+; CHECK-NOFP: lghi %r1, 12288
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 4(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f8:
+; CHECK-FP: lghi %r1, 12288
+; CHECK-FP: ldeb {{%f[0-7]}}, 4(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [2008 x float], align 8
+ %region2 = alloca [2008 x float], align 8
+ %start1 = getelementptr inbounds [2008 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2008 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [2008 x float]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2008 x float]* %region2, i64 0, i64 1023
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Check a case where the original displacement is out of range. The backend
+; should force an LAY from the outset. We don't yet do any kind of anchor
+; optimization, so there should be no offset on the LDEB itself.
+define void @f9(double *%dst) {
+; CHECK-NOFP: f9:
+; CHECK-NOFP: lay %r1, 12296(%r15)
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 0(%r1)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f9:
+; CHECK-FP: lay %r1, 12296(%r11)
+; CHECK-FP: ldeb {{%f[0-7]}}, 0(%r1)
+; CHECK-FP: br %r14
+ %region1 = alloca [2008 x float], align 8
+ %region2 = alloca [2008 x float], align 8
+ %start1 = getelementptr inbounds [2008 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2008 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [2008 x float]* %region1, i64 0, i64 1024
+ %ptr2 = getelementptr inbounds [2008 x float]* %region2, i64 0, i64 1024
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
+
+; Repeat f2 in a case that needs the emergency spill slot, because all
+; call-clobbered and allocated call-saved registers are live. Note that
+; %vptr and %dst are copied to call-saved registers, freeing up %r2 and
+; %r3 during the main test.
+define void @f10(i32 *%vptr, double *%dst) {
+; CHECK-NOFP: f10:
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: lghi [[REGISTER]], 4096
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r15)
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f10:
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: lghi [[REGISTER]], 4096
+; CHECK-FP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r11)
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x float], align 8
+ %region2 = alloca [980 x float], align 8
+ %start1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %ptr1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 2
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i2 = load volatile i32 *%vptr
+ %i3 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %i14 = load volatile i32 *%vptr
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i2, i32 *%vptr
+ store volatile i32 %i3, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ store volatile i32 %i14, i32 *%vptr
+ ret void
+}
+
+; Repeat f2 in a case where the index register is already occupied.
+define void @f11(double *%dst, i64 %index) {
+; CHECK-NOFP: f11:
+; CHECK-NOFP: lgr [[REGISTER:%r[1-9][0-5]?]], %r3
+; CHECK-NOFP: lay %r1, 4096(%r15)
+; CHECK-NOFP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r1)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f11:
+; CHECK-FP: lgr [[REGISTER:%r[1-9][0-5]?]], %r3
+; CHECK-FP: lay %r1, 4096(%r11)
+; CHECK-FP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r1)
+; CHECK-FP: br %r14
+ %region1 = alloca [980 x float], align 8
+ %region2 = alloca [980 x float], align 8
+ %start1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 0
+ call void @foo(float *%start1, float *%start2)
+ %elem1 = getelementptr inbounds [980 x float]* %region1, i64 0, i64 2
+ %elem2 = getelementptr inbounds [980 x float]* %region2, i64 0, i64 2
+ %base1 = ptrtoint float *%elem1 to i64
+ %base2 = ptrtoint float *%elem2 to i64
+ %addr1 = add i64 %base1, %index
+ %addr2 = add i64 %base2, %index
+ %ptr1 = inttoptr i64 %addr1 to float *
+ %ptr2 = inttoptr i64 %addr2 to float *
+ %float1 = load float *%ptr1
+ %float2 = load float *%ptr2
+ %double1 = fpext float %float1 to double
+ %double2 = fpext float %float2 to double
+ store volatile double %double1, double *%dst
+ store volatile double %double2, double *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-16.ll b/test/CodeGen/SystemZ/frame-16.ll
new file mode 100644
index 000000000000..cc5529f920ca
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-16.ll
@@ -0,0 +1,327 @@
+; Test the handling of base + index + displacement addresses for large frames,
+; in cases where both 12-bit and 20-bit displacements are allowed.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck -check-prefix=CHECK-NOFP %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -disable-fp-elim | FileCheck -check-prefix=CHECK-FP %s
+
+; This file tests what happens when a displacement is converted from
+; being relative to the start of a frame object to being relative to
+; the frame itself. In some cases the test is only possible if two
+; objects are allocated.
+;
+; Rather than rely on a particular order for those objects, the tests
+; instead allocate two objects of the same size and apply the test to
+; both of them. For consistency, all tests follow this model, even if
+; one object would actually be enough.
+
+; First check the highest offset that is in range of the 12-bit form.
+;
+; The last in-range doubleword offset is 4088. Since the frame has an
+; emergency spill slot at 160(%r15), the amount that we need to allocate
+; in order to put another object at offset 4088 is 4088 - 168 = 3920 bytes.
+define void @f1(i8 %byte) {
+; CHECK-NOFP: f1:
+; CHECK-NOFP: stc %r2, 4095(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f1:
+; CHECK-FP: stc %r2, 4095(%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [3920 x i8], align 8
+ %region2 = alloca [3920 x i8], align 8
+ %ptr1 = getelementptr inbounds [3920 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [3920 x i8]* %region2, i64 0, i64 7
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Test the first offset that is out-of-range of the 12-bit form.
+define void @f2(i8 %byte) {
+; CHECK-NOFP: f2:
+; CHECK-NOFP: stcy %r2, 4096(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f2:
+; CHECK-FP: stcy %r2, 4096(%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [3920 x i8], align 8
+ %region2 = alloca [3920 x i8], align 8
+ %ptr1 = getelementptr inbounds [3920 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [3920 x i8]* %region2, i64 0, i64 8
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Test the last offset that is in range of the 20-bit form.
+;
+; The last in-range doubleword offset is 524280, so by the same reasoning
+; as above, we need to allocate objects of 524280 - 168 = 524122 bytes.
+define void @f3(i8 %byte) {
+; CHECK-NOFP: f3:
+; CHECK-NOFP: stcy %r2, 524287(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f3:
+; CHECK-FP: stcy %r2, 524287(%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 7
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Test the first out-of-range offset. We can't use an index register here,
+; and the offset is also out of LAY's range, so expect a constant load
+; followed by an addition.
+define void @f4(i8 %byte) {
+; CHECK-NOFP: f4:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: stc %r2, 0(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f4:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: stc %r2, 0(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 8
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Add 4095 to the previous offset, to test the other end of the STC range.
+; The instruction will actually be STCY before frame lowering.
+define void @f5(i8 %byte) {
+; CHECK-NOFP: f5:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: stc %r2, 4095(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f5:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: stc %r2, 4095(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 4103
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 4103
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Test the next offset after that, which uses STCY instead of STC.
+define void @f6(i8 %byte) {
+; CHECK-NOFP: f6:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: stcy %r2, 4096(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f6:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: stcy %r2, 4096(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 4104
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 4104
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Now try an offset of 524287 from the start of the object, with the
+; object being at offset 1048576 (1 << 20). The backend prefers to create
+; anchors 0x10000 bytes apart, so that the high part can be loaded using
+; LLILH while still using STC in more cases than 0x40000 anchors would.
+define void @f7(i8 %byte) {
+; CHECK-NOFP: f7:
+; CHECK-NOFP: llilh %r1, 23
+; CHECK-NOFP: stcy %r2, 65535(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f7:
+; CHECK-FP: llilh %r1, 23
+; CHECK-FP: stcy %r2, 65535(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [1048408 x i8], align 8
+ %region2 = alloca [1048408 x i8], align 8
+ %ptr1 = getelementptr inbounds [1048408 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048408 x i8]* %region2, i64 0, i64 524287
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Keep the object-relative offset the same but bump the size of the
+; objects by one doubleword.
+define void @f8(i8 %byte) {
+; CHECK-NOFP: f8:
+; CHECK-NOFP: llilh %r1, 24
+; CHECK-NOFP: stc %r2, 7(%r1,%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f8:
+; CHECK-FP: llilh %r1, 24
+; CHECK-FP: stc %r2, 7(%r1,%r11)
+; CHECK-FP: br %r14
+ %region1 = alloca [1048416 x i8], align 8
+ %region2 = alloca [1048416 x i8], align 8
+ %ptr1 = getelementptr inbounds [1048416 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048416 x i8]* %region2, i64 0, i64 524287
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Check a case where the original displacement is out of range. The backend
+; should force separate address logic from the outset. We don't yet do any
+; kind of anchor optimization, so there should be no offset on the STC itself.
+;
+; Before frame lowering this is an LA followed by the AGFI seen below.
+; The LA then gets lowered into the LLILH/LA form. The exact sequence
+; isn't that important though.
+define void @f9(i8 %byte) {
+; CHECK-NOFP: f9:
+; CHECK-NOFP: llilh [[R1:%r[1-5]]], 16
+; CHECK-NOFP: la [[R2:%r[1-5]]], 8([[R1]],%r15)
+; CHECK-NOFP: agfi [[R2]], 524288
+; CHECK-NOFP: stc %r2, 0([[R2]])
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f9:
+; CHECK-FP: llilh [[R1:%r[1-5]]], 16
+; CHECK-FP: la [[R2:%r[1-5]]], 8([[R1]],%r11)
+; CHECK-FP: agfi [[R2]], 524288
+; CHECK-FP: stc %r2, 0([[R2]])
+; CHECK-FP: br %r14
+ %region1 = alloca [1048416 x i8], align 8
+ %region2 = alloca [1048416 x i8], align 8
+ %ptr1 = getelementptr inbounds [1048416 x i8]* %region1, i64 0, i64 524288
+ %ptr2 = getelementptr inbounds [1048416 x i8]* %region2, i64 0, i64 524288
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
+
+; Repeat f4 in a case that needs the emergency spill slot (because all
+; call-clobbered registers are live and no call-saved ones have been
+; allocated).
+define void @f10(i32 *%vptr, i8 %byte) {
+; CHECK-NOFP: f10:
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: llilh [[REGISTER]], 8
+; CHECK-NOFP: stc %r3, 0([[REGISTER]],%r15)
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f10:
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: llilh [[REGISTER]], 8
+; CHECK-FP: stc %r3, 0([[REGISTER]],%r11)
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: br %r14
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 8
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ ret void
+}
+
+; And again with maximum register pressure. The only spill slot that the
+; NOFP case needs is the emergency one, so the offsets are the same as for f4.
+; However, the FP case uses %r11 as the frame pointer and must therefore
+; spill a second register. This leads to an extra displacement of 8.
+define void @f11(i32 *%vptr, i8 %byte) {
+; CHECK-NOFP: f11:
+; CHECK-NOFP: stmg %r6, %r15,
+; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r15)
+; CHECK-NOFP: llilh [[REGISTER]], 8
+; CHECK-NOFP: stc %r3, 0([[REGISTER]],%r15)
+; CHECK-NOFP: lg [[REGISTER]], 160(%r15)
+; CHECK-NOFP: lmg %r6, %r15,
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f11:
+; CHECK-FP: stmg %r6, %r15,
+; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], 160(%r11)
+; CHECK-FP: llilh [[REGISTER]], 8
+; CHECK-FP: stc %r3, 8([[REGISTER]],%r11)
+; CHECK-FP: lg [[REGISTER]], 160(%r11)
+; CHECK-FP: lmg %r6, %r15,
+; CHECK-FP: br %r14
+ %i0 = load volatile i32 *%vptr
+ %i1 = load volatile i32 *%vptr
+ %i4 = load volatile i32 *%vptr
+ %i5 = load volatile i32 *%vptr
+ %i6 = load volatile i32 *%vptr
+ %i7 = load volatile i32 *%vptr
+ %i8 = load volatile i32 *%vptr
+ %i9 = load volatile i32 *%vptr
+ %i10 = load volatile i32 *%vptr
+ %i11 = load volatile i32 *%vptr
+ %i12 = load volatile i32 *%vptr
+ %i13 = load volatile i32 *%vptr
+ %i14 = load volatile i32 *%vptr
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 8
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ store volatile i32 %i0, i32 *%vptr
+ store volatile i32 %i1, i32 *%vptr
+ store volatile i32 %i4, i32 *%vptr
+ store volatile i32 %i5, i32 *%vptr
+ store volatile i32 %i6, i32 *%vptr
+ store volatile i32 %i7, i32 *%vptr
+ store volatile i32 %i8, i32 *%vptr
+ store volatile i32 %i9, i32 *%vptr
+ store volatile i32 %i10, i32 *%vptr
+ store volatile i32 %i11, i32 *%vptr
+ store volatile i32 %i12, i32 *%vptr
+ store volatile i32 %i13, i32 *%vptr
+ store volatile i32 %i14, i32 *%vptr
+ ret void
+}
+
+; Repeat f4 in a case where the index register is already occupied.
+define void @f12(i8 %byte, i64 %index) {
+; CHECK-NOFP: f12:
+; CHECK-NOFP: llilh %r1, 8
+; CHECK-NOFP: agr %r1, %r15
+; CHECK-NOFP: stc %r2, 0(%r3,%r1)
+; CHECK-NOFP: br %r14
+;
+; CHECK-FP: f12:
+; CHECK-FP: llilh %r1, 8
+; CHECK-FP: agr %r1, %r11
+; CHECK-FP: stc %r2, 0(%r3,%r1)
+; CHECK-FP: br %r14
+ %region1 = alloca [524112 x i8], align 8
+ %region2 = alloca [524112 x i8], align 8
+ %index1 = add i64 %index, 8
+ %ptr1 = getelementptr inbounds [524112 x i8]* %region1, i64 0, i64 %index1
+ %ptr2 = getelementptr inbounds [524112 x i8]* %region2, i64 0, i64 %index1
+ store volatile i8 %byte, i8 *%ptr1
+ store volatile i8 %byte, i8 *%ptr2
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-17.ll b/test/CodeGen/SystemZ/frame-17.ll
new file mode 100644
index 000000000000..613d9f879558
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-17.ll
@@ -0,0 +1,177 @@
+; Test spilling of FPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; We need to save and restore 8 of the 16 FPRs and allocate an additional
+; 4-byte spill slot, rounded to 8 bytes. The frame size should be exactly
+; 160 + 8 * 8 = 232.
+define void @f1(float *%ptr) {
+; CHECK: f1:
+; CHECK: aghi %r15, -232
+; CHECK: std %f8, 224(%r15)
+; CHECK: std %f9, 216(%r15)
+; CHECK: std %f10, 208(%r15)
+; CHECK: std %f11, 200(%r15)
+; CHECK: std %f12, 192(%r15)
+; CHECK: std %f13, 184(%r15)
+; CHECK: std %f14, 176(%r15)
+; CHECK: std %f15, 168(%r15)
+; CHECK-NOT: 160(%r15)
+; CHECK: ste [[REGISTER:%f[0-9]+]], 164(%r15)
+; CHECK-NOT: 160(%r15)
+; CHECK: le [[REGISTER]], 164(%r15)
+; CHECK-NOT: 160(%r15)
+; CHECK: ld %f8, 224(%r15)
+; CHECK: ld %f9, 216(%r15)
+; CHECK: ld %f10, 208(%r15)
+; CHECK: ld %f11, 200(%r15)
+; CHECK: ld %f12, 192(%r15)
+; CHECK: ld %f13, 184(%r15)
+; CHECK: ld %f14, 176(%r15)
+; CHECK: ld %f15, 168(%r15)
+; CHECK: aghi %r15, 232
+; CHECK: br %r14
+ %l0 = load volatile float *%ptr
+ %l1 = load volatile float *%ptr
+ %l2 = load volatile float *%ptr
+ %l3 = load volatile float *%ptr
+ %l4 = load volatile float *%ptr
+ %l5 = load volatile float *%ptr
+ %l6 = load volatile float *%ptr
+ %l7 = load volatile float *%ptr
+ %l8 = load volatile float *%ptr
+ %l9 = load volatile float *%ptr
+ %l10 = load volatile float *%ptr
+ %l11 = load volatile float *%ptr
+ %l12 = load volatile float *%ptr
+ %l13 = load volatile float *%ptr
+ %l14 = load volatile float *%ptr
+ %l15 = load volatile float *%ptr
+ %lx = load volatile float *%ptr
+ store volatile float %lx, float *%ptr
+ store volatile float %l15, float *%ptr
+ store volatile float %l14, float *%ptr
+ store volatile float %l13, float *%ptr
+ store volatile float %l12, float *%ptr
+ store volatile float %l11, float *%ptr
+ store volatile float %l10, float *%ptr
+ store volatile float %l9, float *%ptr
+ store volatile float %l8, float *%ptr
+ store volatile float %l7, float *%ptr
+ store volatile float %l6, float *%ptr
+ store volatile float %l5, float *%ptr
+ store volatile float %l4, float *%ptr
+ store volatile float %l3, float *%ptr
+ store volatile float %l2, float *%ptr
+ store volatile float %l1, float *%ptr
+ store volatile float %l0, float *%ptr
+ ret void
+}
+
+; Same for doubles, except that the full spill slot is used.
+define void @f2(double *%ptr) {
+; CHECK: f2:
+; CHECK: aghi %r15, -232
+; CHECK: std %f8, 224(%r15)
+; CHECK: std %f9, 216(%r15)
+; CHECK: std %f10, 208(%r15)
+; CHECK: std %f11, 200(%r15)
+; CHECK: std %f12, 192(%r15)
+; CHECK: std %f13, 184(%r15)
+; CHECK: std %f14, 176(%r15)
+; CHECK: std %f15, 168(%r15)
+; CHECK: std [[REGISTER:%f[0-9]+]], 160(%r15)
+; CHECK: ld [[REGISTER]], 160(%r15)
+; CHECK: ld %f8, 224(%r15)
+; CHECK: ld %f9, 216(%r15)
+; CHECK: ld %f10, 208(%r15)
+; CHECK: ld %f11, 200(%r15)
+; CHECK: ld %f12, 192(%r15)
+; CHECK: ld %f13, 184(%r15)
+; CHECK: ld %f14, 176(%r15)
+; CHECK: ld %f15, 168(%r15)
+; CHECK: aghi %r15, 232
+; CHECK: br %r14
+ %l0 = load volatile double *%ptr
+ %l1 = load volatile double *%ptr
+ %l2 = load volatile double *%ptr
+ %l3 = load volatile double *%ptr
+ %l4 = load volatile double *%ptr
+ %l5 = load volatile double *%ptr
+ %l6 = load volatile double *%ptr
+ %l7 = load volatile double *%ptr
+ %l8 = load volatile double *%ptr
+ %l9 = load volatile double *%ptr
+ %l10 = load volatile double *%ptr
+ %l11 = load volatile double *%ptr
+ %l12 = load volatile double *%ptr
+ %l13 = load volatile double *%ptr
+ %l14 = load volatile double *%ptr
+ %l15 = load volatile double *%ptr
+ %lx = load volatile double *%ptr
+ store volatile double %lx, double *%ptr
+ store volatile double %l15, double *%ptr
+ store volatile double %l14, double *%ptr
+ store volatile double %l13, double *%ptr
+ store volatile double %l12, double *%ptr
+ store volatile double %l11, double *%ptr
+ store volatile double %l10, double *%ptr
+ store volatile double %l9, double *%ptr
+ store volatile double %l8, double *%ptr
+ store volatile double %l7, double *%ptr
+ store volatile double %l6, double *%ptr
+ store volatile double %l5, double *%ptr
+ store volatile double %l4, double *%ptr
+ store volatile double %l3, double *%ptr
+ store volatile double %l2, double *%ptr
+ store volatile double %l1, double *%ptr
+ store volatile double %l0, double *%ptr
+ ret void
+}
+
+; The long double case needs a 16-byte spill slot.
+define void @f3(fp128 *%ptr) {
+; CHECK: f3:
+; CHECK: aghi %r15, -240
+; CHECK: std %f8, 232(%r15)
+; CHECK: std %f9, 224(%r15)
+; CHECK: std %f10, 216(%r15)
+; CHECK: std %f11, 208(%r15)
+; CHECK: std %f12, 200(%r15)
+; CHECK: std %f13, 192(%r15)
+; CHECK: std %f14, 184(%r15)
+; CHECK: std %f15, 176(%r15)
+; CHECK: std [[REGISTER1:%f[0-9]+]], 160(%r15)
+; CHECK: std [[REGISTER2:%f[0-9]+]], 168(%r15)
+; CHECK: ld [[REGISTER1]], 160(%r15)
+; CHECK: ld [[REGISTER2]], 168(%r15)
+; CHECK: ld %f8, 232(%r15)
+; CHECK: ld %f9, 224(%r15)
+; CHECK: ld %f10, 216(%r15)
+; CHECK: ld %f11, 208(%r15)
+; CHECK: ld %f12, 200(%r15)
+; CHECK: ld %f13, 192(%r15)
+; CHECK: ld %f14, 184(%r15)
+; CHECK: ld %f15, 176(%r15)
+; CHECK: aghi %r15, 240
+; CHECK: br %r14
+ %l0 = load volatile fp128 *%ptr
+ %l1 = load volatile fp128 *%ptr
+ %l4 = load volatile fp128 *%ptr
+ %l5 = load volatile fp128 *%ptr
+ %l8 = load volatile fp128 *%ptr
+ %l9 = load volatile fp128 *%ptr
+ %l12 = load volatile fp128 *%ptr
+ %l13 = load volatile fp128 *%ptr
+ %lx = load volatile fp128 *%ptr
+ store volatile fp128 %lx, fp128 *%ptr
+ store volatile fp128 %l13, fp128 *%ptr
+ store volatile fp128 %l12, fp128 *%ptr
+ store volatile fp128 %l9, fp128 *%ptr
+ store volatile fp128 %l8, fp128 *%ptr
+ store volatile fp128 %l5, fp128 *%ptr
+ store volatile fp128 %l4, fp128 *%ptr
+ store volatile fp128 %l1, fp128 *%ptr
+ store volatile fp128 %l0, fp128 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/frame-18.ll b/test/CodeGen/SystemZ/frame-18.ll
new file mode 100644
index 000000000000..a9977ed04b42
--- /dev/null
+++ b/test/CodeGen/SystemZ/frame-18.ll
@@ -0,0 +1,91 @@
+; Test spilling of GPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; We need to allocate a 4-byte spill slot, rounded to 8 bytes. The frame
+; size should be exactly 160 + 8 = 168.
+define void @f1(i32 *%ptr) {
+; CHECK: f1:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK: aghi %r15, -168
+; CHECK-NOT: 160(%r15)
+; CHECK: st [[REGISTER:%r[0-9]+]], 164(%r15)
+; CHECK-NOT: 160(%r15)
+; CHECK: l [[REGISTER]], 164(%r15)
+; CHECK-NOT: 160(%r15)
+; CHECK: lmg %r6, %r15, 216(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i32 *%ptr
+ %l1 = load volatile i32 *%ptr
+ %l3 = load volatile i32 *%ptr
+ %l4 = load volatile i32 *%ptr
+ %l5 = load volatile i32 *%ptr
+ %l6 = load volatile i32 *%ptr
+ %l7 = load volatile i32 *%ptr
+ %l8 = load volatile i32 *%ptr
+ %l9 = load volatile i32 *%ptr
+ %l10 = load volatile i32 *%ptr
+ %l11 = load volatile i32 *%ptr
+ %l12 = load volatile i32 *%ptr
+ %l13 = load volatile i32 *%ptr
+ %l14 = load volatile i32 *%ptr
+ %lx = load volatile i32 *%ptr
+ store volatile i32 %lx, i32 *%ptr
+ store volatile i32 %l14, i32 *%ptr
+ store volatile i32 %l13, i32 *%ptr
+ store volatile i32 %l12, i32 *%ptr
+ store volatile i32 %l11, i32 *%ptr
+ store volatile i32 %l10, i32 *%ptr
+ store volatile i32 %l9, i32 *%ptr
+ store volatile i32 %l8, i32 *%ptr
+ store volatile i32 %l7, i32 *%ptr
+ store volatile i32 %l6, i32 *%ptr
+ store volatile i32 %l5, i32 *%ptr
+ store volatile i32 %l4, i32 *%ptr
+ store volatile i32 %l3, i32 *%ptr
+ store volatile i32 %l1, i32 *%ptr
+ store volatile i32 %l0, i32 *%ptr
+ ret void
+}
+
+; Same for i64, except that the full spill slot is used.
+define void @f2(i64 *%ptr) {
+; CHECK: f2:
+; CHECK: stmg %r6, %r15, 48(%r15)
+; CHECK: aghi %r15, -168
+; CHECK: stg [[REGISTER:%r[0-9]+]], 160(%r15)
+; CHECK: lg [[REGISTER]], 160(%r15)
+; CHECK: lmg %r6, %r15, 216(%r15)
+; CHECK: br %r14
+ %l0 = load volatile i64 *%ptr
+ %l1 = load volatile i64 *%ptr
+ %l3 = load volatile i64 *%ptr
+ %l4 = load volatile i64 *%ptr
+ %l5 = load volatile i64 *%ptr
+ %l6 = load volatile i64 *%ptr
+ %l7 = load volatile i64 *%ptr
+ %l8 = load volatile i64 *%ptr
+ %l9 = load volatile i64 *%ptr
+ %l10 = load volatile i64 *%ptr
+ %l11 = load volatile i64 *%ptr
+ %l12 = load volatile i64 *%ptr
+ %l13 = load volatile i64 *%ptr
+ %l14 = load volatile i64 *%ptr
+ %lx = load volatile i64 *%ptr
+ store volatile i64 %lx, i64 *%ptr
+ store volatile i64 %l14, i64 *%ptr
+ store volatile i64 %l13, i64 *%ptr
+ store volatile i64 %l12, i64 *%ptr
+ store volatile i64 %l11, i64 *%ptr
+ store volatile i64 %l10, i64 *%ptr
+ store volatile i64 %l9, i64 *%ptr
+ store volatile i64 %l8, i64 *%ptr
+ store volatile i64 %l7, i64 *%ptr
+ store volatile i64 %l6, i64 *%ptr
+ store volatile i64 %l5, i64 *%ptr
+ store volatile i64 %l4, i64 *%ptr
+ store volatile i64 %l3, i64 *%ptr
+ store volatile i64 %l1, i64 *%ptr
+ store volatile i64 %l0, i64 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/insert-01.ll b/test/CodeGen/SystemZ/insert-01.ll
new file mode 100644
index 000000000000..98ddf56959bf
--- /dev/null
+++ b/test/CodeGen/SystemZ/insert-01.ll
@@ -0,0 +1,230 @@
+; Test insertions of memory into the low byte of an i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check a plain insertion with (or (and ... -0xff) (zext (load ....))).
+; The whole sequence can be performed by IC.
+define i32 @f1(i32 %orig, i8 *%ptr) {
+; CHECK: f1:
+; CHECK-NOT: ni
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %ptr1 = and i32 %orig, -256
+ %or = or i32 %ptr1, %ptr2
+ ret i32 %or
+}
+
+; Like f1, but with the operands reversed.
+define i32 @f2(i32 %orig, i8 *%ptr) {
+; CHECK: f2:
+; CHECK-NOT: ni
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %ptr1 = and i32 %orig, -256
+ %or = or i32 %ptr2, %ptr1
+ ret i32 %or
+}
+
+; Check a case where more bits than lower 8 are masked out of the
+; register value. We can use IC but must keep the original mask.
+define i32 @f3(i32 %orig, i8 *%ptr) {
+; CHECK: f3:
+; CHECK: nill %r2, 65024
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %ptr1 = and i32 %orig, -512
+ %or = or i32 %ptr1, %ptr2
+ ret i32 %or
+}
+
+; Like f3, but with the operands reversed.
+define i32 @f4(i32 %orig, i8 *%ptr) {
+; CHECK: f4:
+; CHECK: nill %r2, 65024
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %ptr1 = and i32 %orig, -512
+ %or = or i32 %ptr2, %ptr1
+ ret i32 %or
+}
+
+; Check a case where the low 8 bits are cleared by a shift left.
+define i32 @f5(i32 %orig, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: sll %r2, 8
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %ptr1 = shl i32 %orig, 8
+ %or = or i32 %ptr1, %ptr2
+ ret i32 %or
+}
+
+; Like f5, but with the operands reversed.
+define i32 @f6(i32 %orig, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: sll %r2, 8
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %ptr1 = shl i32 %orig, 8
+ %or = or i32 %ptr2, %ptr1
+ ret i32 %or
+}
+
+; Check insertions into a constant.
+define i32 @f7(i32 %orig, i8 *%ptr) {
+; CHECK: f7:
+; CHECK: lhi %r2, 256
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %or = or i32 %ptr2, 256
+ ret i32 %or
+}
+
+; Like f7, but with the operands reversed.
+define i32 @f8(i32 %orig, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: lhi %r2, 256
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i32
+ %or = or i32 256, %ptr2
+ ret i32 %or
+}
+
+; Check the high end of the IC range.
+define i32 @f9(i32 %orig, i8 *%src) {
+; CHECK: f9:
+; CHECK: ic %r2, 4095(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check the next byte up, which should use ICY instead of IC.
+define i32 @f10(i32 %orig, i8 *%src) {
+; CHECK: f10:
+; CHECK: icy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check the high end of the ICY range.
+define i32 @f11(i32 %orig, i8 *%src) {
+; CHECK: f11:
+; CHECK: icy %r2, 524287(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f12(i32 %orig, i8 *%src) {
+; CHECK: f12:
+; CHECK: agfi %r3, 524288
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check the high end of the negative ICY range.
+define i32 @f13(i32 %orig, i8 *%src) {
+; CHECK: f13:
+; CHECK: icy %r2, -1(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check the low end of the ICY range.
+define i32 @f14(i32 %orig, i8 *%src) {
+; CHECK: f14:
+; CHECK: icy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f15(i32 %orig, i8 *%src) {
+; CHECK: f15:
+; CHECK: agfi %r3, -524289
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check that IC allows an index.
+define i32 @f16(i32 %orig, i8 *%src, i64 %index) {
+; CHECK: f16:
+; CHECK: ic %r2, 4095({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %ptr1 = getelementptr i8 *%src, i64 %index
+ %ptr2 = getelementptr i8 *%ptr1, i64 4095
+ %val = load i8 *%ptr2
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
+
+; Check that ICY allows an index.
+define i32 @f17(i32 %orig, i8 *%src, i64 %index) {
+; CHECK: f17:
+; CHECK: icy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %ptr1 = getelementptr i8 *%src, i64 %index
+ %ptr2 = getelementptr i8 *%ptr1, i64 4096
+ %val = load i8 *%ptr2
+ %src2 = zext i8 %val to i32
+ %src1 = and i32 %orig, -256
+ %or = or i32 %src2, %src1
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/insert-02.ll b/test/CodeGen/SystemZ/insert-02.ll
new file mode 100644
index 000000000000..471889dede6a
--- /dev/null
+++ b/test/CodeGen/SystemZ/insert-02.ll
@@ -0,0 +1,230 @@
+; Test insertions of memory into the low byte of an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check a plain insertion with (or (and ... -0xff) (zext (load ....))).
+; The whole sequence can be performed by IC.
+define i64 @f1(i64 %orig, i8 *%ptr) {
+; CHECK: f1:
+; CHECK-NOT: ni
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %ptr1 = and i64 %orig, -256
+ %or = or i64 %ptr1, %ptr2
+ ret i64 %or
+}
+
+; Like f1, but with the operands reversed.
+define i64 @f2(i64 %orig, i8 *%ptr) {
+; CHECK: f2:
+; CHECK-NOT: ni
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %ptr1 = and i64 %orig, -256
+ %or = or i64 %ptr2, %ptr1
+ ret i64 %or
+}
+
+; Check a case where more bits than lower 8 are masked out of the
+; register value. We can use IC but must keep the original mask.
+define i64 @f3(i64 %orig, i8 *%ptr) {
+; CHECK: f3:
+; CHECK: nill %r2, 65024
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %ptr1 = and i64 %orig, -512
+ %or = or i64 %ptr1, %ptr2
+ ret i64 %or
+}
+
+; Like f3, but with the operands reversed.
+define i64 @f4(i64 %orig, i8 *%ptr) {
+; CHECK: f4:
+; CHECK: nill %r2, 65024
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %ptr1 = and i64 %orig, -512
+ %or = or i64 %ptr2, %ptr1
+ ret i64 %or
+}
+
+; Check a case where the low 8 bits are cleared by a shift left.
+define i64 @f5(i64 %orig, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: sllg %r2, %r2, 8
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %ptr1 = shl i64 %orig, 8
+ %or = or i64 %ptr1, %ptr2
+ ret i64 %or
+}
+
+; Like f5, but with the operands reversed.
+define i64 @f6(i64 %orig, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: sllg %r2, %r2, 8
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %ptr1 = shl i64 %orig, 8
+ %or = or i64 %ptr2, %ptr1
+ ret i64 %or
+}
+
+; Check insertions into a constant.
+define i64 @f7(i64 %orig, i8 *%ptr) {
+; CHECK: f7:
+; CHECK: lghi %r2, 256
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %or = or i64 %ptr2, 256
+ ret i64 %or
+}
+
+; Like f7, but with the operands reversed.
+define i64 @f8(i64 %orig, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: lghi %r2, 256
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ptr2 = zext i8 %val to i64
+ %or = or i64 256, %ptr2
+ ret i64 %or
+}
+
+; Check the high end of the IC range.
+define i64 @f9(i64 %orig, i8 *%src) {
+; CHECK: f9:
+; CHECK: ic %r2, 4095(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check the next byte up, which should use ICY instead of IC.
+define i64 @f10(i64 %orig, i8 *%src) {
+; CHECK: f10:
+; CHECK: icy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check the high end of the ICY range.
+define i64 @f11(i64 %orig, i8 *%src) {
+; CHECK: f11:
+; CHECK: icy %r2, 524287(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f12(i64 %orig, i8 *%src) {
+; CHECK: f12:
+; CHECK: agfi %r3, 524288
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check the high end of the negative ICY range.
+define i64 @f13(i64 %orig, i8 *%src) {
+; CHECK: f13:
+; CHECK: icy %r2, -1(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check the low end of the ICY range.
+define i64 @f14(i64 %orig, i8 *%src) {
+; CHECK: f14:
+; CHECK: icy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f15(i64 %orig, i8 *%src) {
+; CHECK: f15:
+; CHECK: agfi %r3, -524289
+; CHECK: ic %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %val = load i8 *%ptr
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check that IC allows an index.
+define i64 @f16(i64 %orig, i8 *%src, i64 %index) {
+; CHECK: f16:
+; CHECK: ic %r2, 4095({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %ptr1 = getelementptr i8 *%src, i64 %index
+ %ptr2 = getelementptr i8 *%ptr1, i64 4095
+ %val = load i8 *%ptr2
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
+
+; Check that ICY allows an index.
+define i64 @f17(i64 %orig, i8 *%src, i64 %index) {
+; CHECK: f17:
+; CHECK: icy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %ptr1 = getelementptr i8 *%src, i64 %index
+ %ptr2 = getelementptr i8 *%ptr1, i64 4096
+ %val = load i8 *%ptr2
+ %src2 = zext i8 %val to i64
+ %src1 = and i64 %orig, -256
+ %or = or i64 %src2, %src1
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/insert-03.ll b/test/CodeGen/SystemZ/insert-03.ll
new file mode 100644
index 000000000000..261eabd1be7d
--- /dev/null
+++ b/test/CodeGen/SystemZ/insert-03.ll
@@ -0,0 +1,71 @@
+; Test insertions of 16-bit constants into one half of an i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful IILL value. (We use NILL rather than IILL
+; to clear 16 bits.)
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK-NOT: ni
+; CHECK: iill %r2, 1
+; CHECK: br %r14
+ %and = and i32 %a, 4294901760
+ %or = or i32 %and, 1
+ ret i32 %or
+}
+
+; Check a middle value.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK-NOT: ni
+; CHECK: iill %r2, 32769
+; CHECK: br %r14
+ %and = and i32 %a, -65536
+ %or = or i32 %and, 32769
+ ret i32 %or
+}
+
+; Check the highest useful IILL value. (We use OILL rather than IILL
+; to set 16 bits.)
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK-NOT: ni
+; CHECK: iill %r2, 65534
+; CHECK: br %r14
+ %and = and i32 %a, 4294901760
+ %or = or i32 %and, 65534
+ ret i32 %or
+}
+
+; Check the lowest useful IILH value.
+define i32 @f4(i32 %a) {
+; CHECK: f4:
+; CHECK-NOT: ni
+; CHECK: iilh %r2, 1
+; CHECK: br %r14
+ %and = and i32 %a, 65535
+ %or = or i32 %and, 65536
+ ret i32 %or
+}
+
+; Check a middle value.
+define i32 @f5(i32 %a) {
+; CHECK: f5:
+; CHECK-NOT: ni
+; CHECK: iilh %r2, 32767
+; CHECK: br %r14
+ %and = and i32 %a, 65535
+ %or = or i32 %and, 2147418112
+ ret i32 %or
+}
+
+; Check the highest useful IILH value.
+define i32 @f6(i32 %a) {
+; CHECK: f6:
+; CHECK-NOT: ni
+; CHECK: iilh %r2, 65534
+; CHECK: br %r14
+ %and = and i32 %a, 65535
+ %or = or i32 %and, -131072
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/insert-04.ll b/test/CodeGen/SystemZ/insert-04.ll
new file mode 100644
index 000000000000..07f88b9859eb
--- /dev/null
+++ b/test/CodeGen/SystemZ/insert-04.ll
@@ -0,0 +1,137 @@
+; Test insertions of 16-bit constants into an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful IILL value. (We use NILL rather than IILL
+; to clear 16 bits.)
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK-NOT: ni
+; CHECK: iill %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 18446744073709486080
+ %or = or i64 %and, 1
+ ret i64 %or
+}
+
+; Check a middle value.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK-NOT: ni
+; CHECK: iill %r2, 32769
+; CHECK: br %r14
+ %and = and i64 %a, -65536
+ %or = or i64 %and, 32769
+ ret i64 %or
+}
+
+; Check the highest useful IILL value. (We use OILL rather than IILL
+; to set 16 bits.)
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK-NOT: ni
+; CHECK: iill %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, 18446744073709486080
+ %or = or i64 %and, 65534
+ ret i64 %or
+}
+
+; Check the lowest useful IILH value.
+define i64 @f4(i64 %a) {
+; CHECK: f4:
+; CHECK-NOT: ni
+; CHECK: iilh %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414649855
+ %or = or i64 %and, 65536
+ ret i64 %or
+}
+
+; Check a middle value.
+define i64 @f5(i64 %a) {
+; CHECK: f5:
+; CHECK-NOT: ni
+; CHECK: iilh %r2, 32767
+; CHECK: br %r14
+ %and = and i64 %a, -4294901761
+ %or = or i64 %and, 2147418112
+ ret i64 %or
+}
+
+; Check the highest useful IILH value.
+define i64 @f6(i64 %a) {
+; CHECK: f6:
+; CHECK-NOT: ni
+; CHECK: iilh %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414649855
+ %or = or i64 %and, 4294836224
+ ret i64 %or
+}
+
+; Check the lowest useful IIHL value.
+define i64 @f7(i64 %a) {
+; CHECK: f7:
+; CHECK-NOT: ni
+; CHECK: iihl %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 18446462603027808255
+ %or = or i64 %and, 4294967296
+ ret i64 %or
+}
+
+; Check a middle value.
+define i64 @f8(i64 %a) {
+; CHECK: f8:
+; CHECK-NOT: ni
+; CHECK: iihl %r2, 32767
+; CHECK: br %r14
+ %and = and i64 %a, -281470681743361
+ %or = or i64 %and, 140733193388032
+ ret i64 %or
+}
+
+; Check the highest useful IIHL value.
+define i64 @f9(i64 %a) {
+; CHECK: f9:
+; CHECK-NOT: ni
+; CHECK: iihl %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, 18446462603027808255
+ %or = or i64 %and, 281466386776064
+ ret i64 %or
+}
+
+; Check the lowest useful IIHH value.
+define i64 @f10(i64 %a) {
+; CHECK: f10:
+; CHECK-NOT: ni
+; CHECK: iihh %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 281474976710655
+ %or = or i64 %and, 281474976710656
+ ret i64 %or
+}
+
+; Check a middle value.
+define i64 @f11(i64 %a) {
+; CHECK: f11:
+; CHECK-NOT: ni
+; CHECK: iihh %r2, 32767
+; CHECK: br %r14
+ %and = and i64 %a, 281474976710655
+ %or = or i64 %and, 9223090561878065152
+ ret i64 %or
+}
+
+; Check the highest useful IIHH value.
+define i64 @f12(i64 %a) {
+; CHECK: f12:
+; CHECK-NOT: ni
+; CHECK: iihh %r2, 65534
+; CHECK: br %r14
+ %and = and i64 %a, 281474976710655
+ %or = or i64 %and, 18446181123756130304
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/insert-05.ll b/test/CodeGen/SystemZ/insert-05.ll
new file mode 100644
index 000000000000..da51676b99cf
--- /dev/null
+++ b/test/CodeGen/SystemZ/insert-05.ll
@@ -0,0 +1,224 @@
+; Test insertions of 32-bit constants into one half of an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Prefer LHI over IILF for signed 16-bit constants.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK-NOT: ni
+; CHECK: lhi %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 1
+ ret i64 %or
+}
+
+; Check the high end of the LHI range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK-NOT: ni
+; CHECK: lhi %r2, 32767
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 32767
+ ret i64 %or
+}
+
+; Check the next value up, which should use IILF instead.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK-NOT: ni
+; CHECK: iilf %r2, 32768
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 32768
+ ret i64 %or
+}
+
+; Check a value in which the lower 16 bits are clear.
+define i64 @f4(i64 %a) {
+; CHECK: f4:
+; CHECK-NOT: ni
+; CHECK: iilf %r2, 65536
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 65536
+ ret i64 %or
+}
+
+; Check the highest useful IILF value (-0x8001).
+define i64 @f5(i64 %a) {
+; CHECK: f5:
+; CHECK-NOT: ni
+; CHECK: iilf %r2, 4294934527
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 4294934527
+ ret i64 %or
+}
+
+; Check the next value up, which should use LHI instead.
+define i64 @f6(i64 %a) {
+; CHECK: f6:
+; CHECK-NOT: ni
+; CHECK: lhi %r2, -32768
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 4294934528
+ ret i64 %or
+}
+
+; Check the highest useful LHI value. (We use OILF for -1 instead, although
+; LHI might be better there too.)
+define i64 @f7(i64 %a) {
+; CHECK: f7:
+; CHECK-NOT: ni
+; CHECK: lhi %r2, -2
+; CHECK: br %r14
+ %and = and i64 %a, 18446744069414584320
+ %or = or i64 %and, 4294967294
+ ret i64 %or
+}
+
+; Check that SRLG is still used if some of the high bits are known to be 0
+; (and so might be removed from the mask).
+define i64 @f8(i64 %a) {
+; CHECK: f8:
+; CHECK: srlg %r2, %r2, 1
+; CHECK-NEXT: iilf %r2, 32768
+; CHECK: br %r14
+ %shifted = lshr i64 %a, 1
+ %and = and i64 %shifted, 18446744069414584320
+ %or = or i64 %and, 32768
+ ret i64 %or
+}
+
+; Repeat f8 with addition, which is known to be equivalent to OR in this case.
+define i64 @f9(i64 %a) {
+; CHECK: f9:
+; CHECK: srlg %r2, %r2, 1
+; CHECK-NEXT: iilf %r2, 32768
+; CHECK: br %r14
+ %shifted = lshr i64 %a, 1
+ %and = and i64 %shifted, 18446744069414584320
+ %or = add i64 %and, 32768
+ ret i64 %or
+}
+
+; Repeat f8 with already-zero bits removed from the mask.
+define i64 @f10(i64 %a) {
+; CHECK: f10:
+; CHECK: srlg %r2, %r2, 1
+; CHECK-NEXT: iilf %r2, 32768
+; CHECK: br %r14
+ %shifted = lshr i64 %a, 1
+ %and = and i64 %shifted, 9223372032559808512
+ %or = or i64 %and, 32768
+ ret i64 %or
+}
+
+; Repeat f10 with addition, which is known to be equivalent to OR in this case.
+define i64 @f11(i64 %a) {
+; CHECK: f11:
+; CHECK: srlg %r2, %r2, 1
+; CHECK-NEXT: iilf %r2, 32768
+; CHECK: br %r14
+ %shifted = lshr i64 %a, 1
+ %and = and i64 %shifted, 9223372032559808512
+ %or = add i64 %and, 32768
+ ret i64 %or
+}
+
+; Check the lowest useful IIHF value.
+define i64 @f12(i64 %a) {
+; CHECK: f12:
+; CHECK-NOT: ni
+; CHECK: iihf %r2, 1
+; CHECK: br %r14
+ %and = and i64 %a, 4294967295
+ %or = or i64 %and, 4294967296
+ ret i64 %or
+}
+
+; Check a value in which the lower 16 bits are clear.
+define i64 @f13(i64 %a) {
+; CHECK: f13:
+; CHECK-NOT: ni
+; CHECK: iihf %r2, 2147483648
+; CHECK: br %r14
+ %and = and i64 %a, 4294967295
+ %or = or i64 %and, 9223372036854775808
+ ret i64 %or
+}
+
+; Check the highest useful IIHF value (0xfffffffe).
+define i64 @f14(i64 %a) {
+; CHECK: f14:
+; CHECK-NOT: ni
+; CHECK: iihf %r2, 4294967294
+; CHECK: br %r14
+ %and = and i64 %a, 4294967295
+ %or = or i64 %and, 18446744065119617024
+ ret i64 %or
+}
+
+; Check a case in which some of the low 32 bits are known to be clear,
+; and so could be removed from the AND mask.
+define i64 @f15(i64 %a) {
+; CHECK: f15:
+; CHECK: sllg %r2, %r2, 1
+; CHECK-NEXT: iihf %r2, 1
+; CHECK: br %r14
+ %shifted = shl i64 %a, 1
+ %and = and i64 %shifted, 4294967295
+ %or = or i64 %and, 4294967296
+ ret i64 %or
+}
+
+; Repeat f15 with the zero bits explicitly removed from the mask.
+define i64 @f16(i64 %a) {
+; CHECK: f16:
+; CHECK: sllg %r2, %r2, 1
+; CHECK-NEXT: iihf %r2, 1
+; CHECK: br %r14
+ %shifted = shl i64 %a, 1
+ %and = and i64 %shifted, 4294967294
+ %or = or i64 %and, 4294967296
+ ret i64 %or
+}
+
+; Check concatenation of two i32s.
+define i64 @f17(i32 %a) {
+; CHECK: f17:
+; CHECK: msr %r2, %r2
+; CHECK-NEXT: iihf %r2, 1
+; CHECK: br %r14
+ %mul = mul i32 %a, %a
+ %ext = zext i32 %mul to i64
+ %or = or i64 %ext, 4294967296
+ ret i64 %or
+}
+
+; Repeat f17 with the operands reversed.
+define i64 @f18(i32 %a) {
+; CHECK: f18:
+; CHECK: msr %r2, %r2
+; CHECK-NEXT: iihf %r2, 1
+; CHECK: br %r14
+ %mul = mul i32 %a, %a
+ %ext = zext i32 %mul to i64
+ %or = or i64 4294967296, %ext
+ ret i64 %or
+}
+
+; The truncation here isn't free; we need an explicit zero extension.
+define i64 @f19(i32 %a) {
+; CHECK: f19:
+; CHECK: llgcr %r2, %r2
+; CHECK: oihl %r2, 1
+; CHECK: br %r14
+ %trunc = trunc i32 %a to i8
+ %ext = zext i8 %trunc to i64
+ %or = or i64 %ext, 4294967296
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/insert-06.ll b/test/CodeGen/SystemZ/insert-06.ll
new file mode 100644
index 000000000000..4a13ef47c888
--- /dev/null
+++ b/test/CodeGen/SystemZ/insert-06.ll
@@ -0,0 +1,167 @@
+; Test insertions of i32s into the low half of an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Insertion of an i32 can be done using LR.
+define i64 @f1(i64 %a, i32 %b) {
+; CHECK: f1:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %low = zext i32 %b to i64
+ %high = and i64 %a, -4294967296
+ %res = or i64 %high, %low
+ ret i64 %res
+}
+
+; ... and again with the operands reversed.
+define i64 @f2(i64 %a, i32 %b) {
+; CHECK: f2:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %low = zext i32 %b to i64
+ %high = and i64 %a, -4294967296
+ %res = or i64 %low, %high
+ ret i64 %res
+}
+
+; Like f1, but with "in register" zero extension.
+define i64 @f3(i64 %a, i64 %b) {
+; CHECK: f3:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %low = and i64 %b, 4294967295
+ %high = and i64 %a, -4294967296
+ %res = or i64 %high, %low
+ ret i64 %res
+}
+
+; ... and again with the operands reversed.
+define i64 @f4(i64 %a, i64 %b) {
+; CHECK: f4:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %low = and i64 %b, 4294967295
+ %high = and i64 %a, -4294967296
+ %res = or i64 %low, %high
+ ret i64 %res
+}
+
+; Unary operations can be done directly into the low half.
+define i64 @f5(i64 %a, i32 %b) {
+; CHECK: f5:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lcr %r2, %r3
+; CHECK: br %r14
+ %neg = sub i32 0, %b
+ %low = zext i32 %neg to i64
+ %high = and i64 %a, -4294967296
+ %res = or i64 %high, %low
+ ret i64 %res
+}
+
+; ...likewise three-operand binary operations like RLL.
+define i64 @f6(i64 %a, i32 %b) {
+; CHECK: f6:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: rll %r2, %r3, 1
+; CHECK: br %r14
+ %parta = shl i32 %b, 1
+ %partb = lshr i32 %b, 31
+ %rot = or i32 %parta, %partb
+ %low = zext i32 %rot to i64
+ %high = and i64 %a, -4294967296
+ %res = or i64 %low, %high
+ ret i64 %res
+}
+
+; Loads can be done directly into the low half. The range of L is checked
+; in the move tests.
+define i64 @f7(i64 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: l %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %low = zext i32 %b to i64
+ %high = and i64 %a, -4294967296
+ %res = or i64 %high, %low
+ ret i64 %res
+}
+
+; ...likewise extending loads.
+define i64 @f8(i64 %a, i8 *%src) {
+; CHECK: f8:
+; CHECK-NOT: {{%r[23]}}
+; CHECK: lb %r2, 0(%r3)
+; CHECK: br %r14
+ %byte = load i8 *%src
+ %b = sext i8 %byte to i32
+ %low = zext i32 %b to i64
+ %high = and i64 %a, -4294967296
+ %res = or i64 %high, %low
+ ret i64 %res
+}
+
+; Check a case like f1 in which there is no AND. We simply know from context
+; that the upper half of one OR operand and the lower half of the other are
+; both clear.
+define i64 @f9(i64 %a, i32 %b) {
+; CHECK: f9:
+; CHECK: sllg %r2, %r2, 32
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %shift = shl i64 %a, 32
+ %low = zext i32 %b to i64
+ %or = or i64 %shift, %low
+ ret i64 %or
+}
+
+; ...and again with the operands reversed.
+define i64 @f10(i64 %a, i32 %b) {
+; CHECK: f10:
+; CHECK: sllg %r2, %r2, 32
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %shift = shl i64 %a, 32
+ %low = zext i32 %b to i64
+ %or = or i64 %low, %shift
+ ret i64 %or
+}
+
+; Like f9, but with "in register" zero extension.
+define i64 @f11(i64 %a, i64 %b) {
+; CHECK: f11:
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %shift = shl i64 %a, 32
+ %low = and i64 %b, 4294967295
+ %or = or i64 %shift, %low
+ ret i64 %or
+}
+
+; ...and again with the operands reversed.
+define i64 @f12(i64 %a, i64 %b) {
+; CHECK: f12:
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %shift = shl i64 %a, 32
+ %low = and i64 %b, 4294967295
+ %or = or i64 %low, %shift
+ ret i64 %or
+}
+
+; Like f9, but for larger shifts than 32.
+define i64 @f13(i64 %a, i32 %b) {
+; CHECK: f13:
+; CHECK: sllg %r2, %r2, 60
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ %shift = shl i64 %a, 60
+ %low = zext i32 %b to i64
+ %or = or i64 %shift, %low
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/int-add-01.ll b/test/CodeGen/SystemZ/int-add-01.ll
new file mode 100644
index 000000000000..d12ac229774e
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-01.ll
@@ -0,0 +1,131 @@
+; Test 32-bit addition in which the second operand is a sign-extended
+; i16 memory value.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the AH range.
+define i32 @f1(i32 %lhs, i16 *%src) {
+; CHECK: f1:
+; CHECK: ah %r2, 0(%r3)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the high end of the aligned AH range.
+define i32 @f2(i32 %lhs, i16 *%src) {
+; CHECK: f2:
+; CHECK: ah %r2, 4094(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2047
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the next halfword up, which should use AHY instead of AH.
+define i32 @f3(i32 %lhs, i16 *%src) {
+; CHECK: f3:
+; CHECK: ahy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2048
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the high end of the aligned AHY range.
+define i32 @f4(i32 %lhs, i16 *%src) {
+; CHECK: f4:
+; CHECK: ahy %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f5(i32 %lhs, i16 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r3, 524288
+; CHECK: ah %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the high end of the negative aligned AHY range.
+define i32 @f6(i32 %lhs, i16 *%src) {
+; CHECK: f6:
+; CHECK: ahy %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the low end of the AHY range.
+define i32 @f7(i32 %lhs, i16 *%src) {
+; CHECK: f7:
+; CHECK: ahy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f8(i32 %lhs, i16 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r3, -524290
+; CHECK: ah %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check that AH allows an index.
+define i32 @f9(i32 %lhs, i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: ah %r2, 4094({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4094
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check that AHY allows an index.
+define i32 @f10(i32 %lhs, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: ahy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = add i32 %lhs, %rhs
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/int-add-02.ll b/test/CodeGen/SystemZ/int-add-02.ll
new file mode 100644
index 000000000000..568ad1c4471d
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-02.ll
@@ -0,0 +1,129 @@
+; Test 32-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check AR.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: ar %r2, %r3
+; CHECK: br %r14
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the low end of the A range.
+define i32 @f2(i32 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: a %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the high end of the aligned A range.
+define i32 @f3(i32 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: a %r2, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the next word up, which should use AY instead of A.
+define i32 @f4(i32 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: ay %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the high end of the aligned AY range.
+define i32 @f5(i32 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: ay %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: a %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the high end of the negative aligned AY range.
+define i32 @f7(i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: ay %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the low end of the AY range.
+define i32 @f8(i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: ay %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: a %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check that A allows an index.
+define i32 @f10(i32 %a, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: a %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
+
+; Check that AY allows an index.
+define i32 @f11(i32 %a, i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: ay %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %add = add i32 %a, %b
+ ret i32 %add
+}
diff --git a/test/CodeGen/SystemZ/int-add-03.ll b/test/CodeGen/SystemZ/int-add-03.ll
new file mode 100644
index 000000000000..46103575b7b2
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-03.ll
@@ -0,0 +1,102 @@
+; Test additions between an i64 and a sign-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check AGFR.
+define i64 @f1(i64 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: agfr %r2, %r3
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check AGF with no displacement.
+define i64 @f2(i64 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: agf %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the aligned AGF range.
+define i64 @f3(i64 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: agf %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: agf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the negative aligned AGF range.
+define i64 @f5(i64 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: agf %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the low end of the AGF range.
+define i64 @f6(i64 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agf %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524292
+; CHECK: agf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check that AGF allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: agf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
diff --git a/test/CodeGen/SystemZ/int-add-04.ll b/test/CodeGen/SystemZ/int-add-04.ll
new file mode 100644
index 000000000000..1c2dc76781ce
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-04.ll
@@ -0,0 +1,102 @@
+; Test additions between an i64 and a zero-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ALGFR.
+define i64 @f1(i64 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: algfr %r2, %r3
+; CHECK: br %r14
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check ALGF with no displacement.
+define i64 @f2(i64 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: algf %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the aligned ALGF range.
+define i64 @f3(i64 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: algf %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: algf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the high end of the negative aligned ALGF range.
+define i64 @f5(i64 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: algf %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the low end of the ALGF range.
+define i64 @f6(i64 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: algf %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524292
+; CHECK: algf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
+
+; Check that ALGF allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: algf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %add = add i64 %a, %bext
+ ret i64 %add
+}
diff --git a/test/CodeGen/SystemZ/int-add-05.ll b/test/CodeGen/SystemZ/int-add-05.ll
new file mode 100644
index 000000000000..ae32cc4ad01a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-05.ll
@@ -0,0 +1,94 @@
+; Test 64-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check AGR.
+define i64 @f1(i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK: agr %r2, %r3
+; CHECK: br %r14
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check AG with no displacement.
+define i64 @f2(i64 %a, i64 *%src) {
+; CHECK: f2:
+; CHECK: ag %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check the high end of the aligned AG range.
+define i64 @f3(i64 %a, i64 *%src) {
+; CHECK: f3:
+; CHECK: ag %r2, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: ag %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check the high end of the negative aligned AG range.
+define i64 @f5(i64 %a, i64 *%src) {
+; CHECK: f5:
+; CHECK: ag %r2, -8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check the low end of the AG range.
+define i64 @f6(i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK: ag %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: ag %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %add = add i64 %a, %b
+ ret i64 %add
+}
+
+; Check that AG allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: ag %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %add = add i64 %a, %b
+ ret i64 %add
+}
diff --git a/test/CodeGen/SystemZ/int-add-06.ll b/test/CodeGen/SystemZ/int-add-06.ll
new file mode 100644
index 000000000000..3a9c698dd241
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-06.ll
@@ -0,0 +1,93 @@
+; Test 32-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check additions of 1.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: ahi %r2, 1
+; CHECK: br %r14
+ %add = add i32 %a, 1
+ ret i32 %add
+}
+
+; Check the high end of the AHI range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: ahi %r2, 32767
+; CHECK: br %r14
+ %add = add i32 %a, 32767
+ ret i32 %add
+}
+
+; Check the next value up, which must use AFI instead.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK: afi %r2, 32768
+; CHECK: br %r14
+ %add = add i32 %a, 32768
+ ret i32 %add
+}
+
+; Check the high end of the signed 32-bit range.
+define i32 @f4(i32 %a) {
+; CHECK: f4:
+; CHECK: afi %r2, 2147483647
+; CHECK: br %r14
+ %add = add i32 %a, 2147483647
+ ret i32 %add
+}
+
+; Check the next value up, which is treated as a negative value.
+define i32 @f5(i32 %a) {
+; CHECK: f5:
+; CHECK: afi %r2, -2147483648
+; CHECK: br %r14
+ %add = add i32 %a, 2147483648
+ ret i32 %add
+}
+
+; Check the high end of the negative AHI range.
+define i32 @f6(i32 %a) {
+; CHECK: f6:
+; CHECK: ahi %r2, -1
+; CHECK: br %r14
+ %add = add i32 %a, -1
+ ret i32 %add
+}
+
+; Check the low end of the AHI range.
+define i32 @f7(i32 %a) {
+; CHECK: f7:
+; CHECK: ahi %r2, -32768
+; CHECK: br %r14
+ %add = add i32 %a, -32768
+ ret i32 %add
+}
+
+; Check the next value down, which must use AFI instead.
+define i32 @f8(i32 %a) {
+; CHECK: f8:
+; CHECK: afi %r2, -32769
+; CHECK: br %r14
+ %add = add i32 %a, -32769
+ ret i32 %add
+}
+
+; Check the low end of the signed 32-bit range.
+define i32 @f9(i32 %a) {
+; CHECK: f9:
+; CHECK: afi %r2, -2147483648
+; CHECK: br %r14
+ %add = add i32 %a, -2147483648
+ ret i32 %add
+}
+
+; Check the next value down, which is treated as a positive value.
+define i32 @f10(i32 %a) {
+; CHECK: f10:
+; CHECK: afi %r2, 2147483647
+; CHECK: br %r14
+ %add = add i32 %a, -2147483649
+ ret i32 %add
+}
diff --git a/test/CodeGen/SystemZ/int-add-07.ll b/test/CodeGen/SystemZ/int-add-07.ll
new file mode 100644
index 000000000000..a065bb2ee137
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-07.ll
@@ -0,0 +1,131 @@
+; Test 64-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check additions of 1.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: {{aghi %r2, 1|la %r[0-5], 1\(%r2\)}}
+; CHECK: br %r14
+ %add = add i64 %a, 1
+ ret i64 %add
+}
+
+; Check the high end of the AGHI range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: aghi %r2, 32767
+; CHECK: br %r14
+ %add = add i64 %a, 32767
+ ret i64 %add
+}
+
+; Check the next value up, which must use AGFI instead.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: {{agfi %r2, 32768|lay %r[0-5], 32768\(%r2\)}}
+; CHECK: br %r14
+ %add = add i64 %a, 32768
+ ret i64 %add
+}
+
+; Check the high end of the AGFI range.
+define i64 @f4(i64 %a) {
+; CHECK: f4:
+; CHECK: agfi %r2, 2147483647
+; CHECK: br %r14
+ %add = add i64 %a, 2147483647
+ ret i64 %add
+}
+
+; Check the next value up, which must use ALGFI instead.
+define i64 @f5(i64 %a) {
+; CHECK: f5:
+; CHECK: algfi %r2, 2147483648
+; CHECK: br %r14
+ %add = add i64 %a, 2147483648
+ ret i64 %add
+}
+
+; Check the high end of the ALGFI range.
+define i64 @f6(i64 %a) {
+; CHECK: f6:
+; CHECK: algfi %r2, 4294967295
+; CHECK: br %r14
+ %add = add i64 %a, 4294967295
+ ret i64 %add
+}
+
+; Check the next value up, which must be loaded into a register first.
+define i64 @f7(i64 %a) {
+; CHECK: f7:
+; CHECK: llihl %r0, 1
+; CHECK: agr
+; CHECK: br %r14
+ %add = add i64 %a, 4294967296
+ ret i64 %add
+}
+
+; Check the high end of the negative AGHI range.
+define i64 @f8(i64 %a) {
+; CHECK: f8:
+; CHECK: aghi %r2, -1
+; CHECK: br %r14
+ %add = add i64 %a, -1
+ ret i64 %add
+}
+
+; Check the low end of the AGHI range.
+define i64 @f9(i64 %a) {
+; CHECK: f9:
+; CHECK: aghi %r2, -32768
+; CHECK: br %r14
+ %add = add i64 %a, -32768
+ ret i64 %add
+}
+
+; Check the next value down, which must use AGFI instead.
+define i64 @f10(i64 %a) {
+; CHECK: f10:
+; CHECK: {{agfi %r2, -32769|lay %r[0-5]+, -32769\(%r2\)}}
+; CHECK: br %r14
+ %add = add i64 %a, -32769
+ ret i64 %add
+}
+
+; Check the low end of the AGFI range.
+define i64 @f11(i64 %a) {
+; CHECK: f11:
+; CHECK: agfi %r2, -2147483648
+; CHECK: br %r14
+ %add = add i64 %a, -2147483648
+ ret i64 %add
+}
+
+; Check the next value down, which must use SLGFI instead.
+define i64 @f12(i64 %a) {
+; CHECK: f12:
+; CHECK: slgfi %r2, 2147483649
+; CHECK: br %r14
+ %add = add i64 %a, -2147483649
+ ret i64 %add
+}
+
+; Check the low end of the SLGFI range.
+define i64 @f13(i64 %a) {
+; CHECK: f13:
+; CHECK: slgfi %r2, 4294967295
+; CHECK: br %r14
+ %add = add i64 %a, -4294967295
+ ret i64 %add
+}
+
+; Check the next value down, which must use register addition instead.
+define i64 @f14(i64 %a) {
+; CHECK: f14:
+; CHECK: llihf %r0, 4294967295
+; CHECK: agr
+; CHECK: br %r14
+ %add = add i64 %a, -4294967296
+ ret i64 %add
+}
diff --git a/test/CodeGen/SystemZ/int-add-08.ll b/test/CodeGen/SystemZ/int-add-08.ll
new file mode 100644
index 000000000000..b1f820fe3d84
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-08.ll
@@ -0,0 +1,110 @@
+; Test 128-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register addition.
+define void @f1(i128 *%ptr) {
+; CHECK: f1:
+; CHECK: algr
+; CHECK: alcgr
+; CHECK: br %r14
+ %value = load i128 *%ptr
+ %add = add i128 %value, %value
+ store i128 %add, i128 *%ptr
+ ret void
+}
+
+; Test memory addition with no offset. Making the load of %a volatile
+; should force the memory operand to be %b.
+define void @f2(i128 *%aptr, i64 %addr) {
+; CHECK: f2:
+; CHECK: alg {{%r[0-5]}}, 8(%r3)
+; CHECK: alcg {{%r[0-5]}}, 0(%r3)
+; CHECK: br %r14
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test the highest aligned offset that is in range of both ALG and ALCG.
+define void @f3(i128 *%aptr, i64 %base) {
+; CHECK: f3:
+; CHECK: alg {{%r[0-5]}}, 524280(%r3)
+; CHECK: alcg {{%r[0-5]}}, 524272(%r3)
+; CHECK: br %r14
+ %addr = add i64 %base, 524272
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test the next doubleword up, which requires separate address logic for ALG.
+define void @f4(i128 *%aptr, i64 %base) {
+; CHECK: f4:
+; CHECK: lgr [[BASE:%r[1-5]]], %r3
+; CHECK: agfi [[BASE]], 524288
+; CHECK: alg {{%r[0-5]}}, 0([[BASE]])
+; CHECK: alcg {{%r[0-5]}}, 524280(%r3)
+; CHECK: br %r14
+ %addr = add i64 %base, 524280
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test the next doubleword after that, which requires separate logic for
+; both instructions. It would be better to create an anchor at 524288
+; that both instructions can use, but that isn't implemented yet.
+define void @f5(i128 *%aptr, i64 %base) {
+; CHECK: f5:
+; CHECK: alg {{%r[0-5]}}, 0({{%r[1-5]}})
+; CHECK: alcg {{%r[0-5]}}, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %addr = add i64 %base, 524288
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test the lowest displacement that is in range of both ALG and ALCG.
+define void @f6(i128 *%aptr, i64 %base) {
+; CHECK: f6:
+; CHECK: alg {{%r[0-5]}}, -524280(%r3)
+; CHECK: alcg {{%r[0-5]}}, -524288(%r3)
+; CHECK: br %r14
+ %addr = add i64 %base, -524288
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test the next doubleword down, which is out of range of the ALCG.
+define void @f7(i128 *%aptr, i64 %base) {
+; CHECK: f7:
+; CHECK: alg {{%r[0-5]}}, -524288(%r3)
+; CHECK: alcg {{%r[0-5]}}, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %addr = add i64 %base, -524296
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
diff --git a/test/CodeGen/SystemZ/int-add-09.ll b/test/CodeGen/SystemZ/int-add-09.ll
new file mode 100644
index 000000000000..bfe63389f189
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-09.ll
@@ -0,0 +1,56 @@
+; Test 128-bit addition in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check additions of 1. The XOR ensures that we don't instead load the
+; constant into a register and use memory addition.
+define void @f1(i128 *%aptr) {
+; CHECK: f1:
+; CHECK: algfi {{%r[0-5]}}, 1
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 128
+ %add = add i128 %xor, 1
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the high end of the ALGFI range.
+define void @f2(i128 *%aptr) {
+; CHECK: f2:
+; CHECK: algfi {{%r[0-5]}}, 4294967295
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 128
+ %add = add i128 %xor, 4294967295
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the next value up, which must use register addition.
+define void @f3(i128 *%aptr) {
+; CHECK: f3:
+; CHECK: algr
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 128
+ %add = add i128 %xor, 4294967296
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check addition of -1, which must also use register addition.
+define void @f4(i128 *%aptr) {
+; CHECK: f4:
+; CHECK: algr
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 128
+ %add = add i128 %xor, -1
+ store i128 %add, i128 *%aptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-add-10.ll b/test/CodeGen/SystemZ/int-add-10.ll
new file mode 100644
index 000000000000..17cfdbe33771
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-10.ll
@@ -0,0 +1,165 @@
+; Test 128-bit addition in which the second operand is a zero-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register additions. The XOR ensures that we don't instead zero-extend
+; %b into a register and use memory addition.
+define void @f1(i128 *%aptr, i32 %b) {
+; CHECK: f1:
+; CHECK: algfr {{%r[0-5]}}, %r3
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Like f1, but using an "in-register" extension.
+define void @f2(i128 *%aptr, i64 %b) {
+; CHECK: f2:
+; CHECK: algfr {{%r[0-5]}}, %r3
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %trunc = trunc i64 %b to i32
+ %bext = zext i32 %trunc to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test register addition in cases where the second operand is zero extended
+; from i64 rather than i32, but is later masked to i32 range.
+define void @f3(i128 *%aptr, i64 %b) {
+; CHECK: f3:
+; CHECK: algfr {{%r[0-5]}}, %r3
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %bext = zext i64 %b to i128
+ %and = and i128 %bext, 4294967295
+ %add = add i128 %xor, %and
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Test ALGF with no offset.
+define void @f4(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f4:
+; CHECK: algf {{%r[0-5]}}, 0(%r3)
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %b = load i32 *%bsrc
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the high end of the ALGF range.
+define void @f5(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f5:
+; CHECK: algf {{%r[0-5]}}, 524284(%r3)
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i64 131071
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: algf {{%r[0-5]}}, 0(%r3)
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i64 131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the high end of the negative aligned ALGF range.
+define void @f7(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f7:
+; CHECK: algf {{%r[0-5]}}, -4(%r3)
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i128 -1
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the low end of the ALGF range.
+define void @f8(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f8:
+; CHECK: algf {{%r[0-5]}}, -524288(%r3)
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i128 -131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f9(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: algf {{%r[0-5]}}, 0(%r3)
+; CHECK: alcgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i128 -131073
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+; Check that ALGF allows an index.
+define void @f10(i128 *%aptr, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: algf {{%r[0-5]}}, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %add = add i128 %xor, %bext
+ store i128 %add, i128 *%aptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-add-11.ll b/test/CodeGen/SystemZ/int-add-11.ll
new file mode 100644
index 000000000000..47a776ecf6ec
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-11.ll
@@ -0,0 +1,128 @@
+; Test 32-bit additions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check additions of 1.
+define void @f1(i32 *%ptr) {
+; CHECK: f1:
+; CHECK: asi 0(%r2), 1
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %add = add i32 %val, 127
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the high end of the constant range.
+define void @f2(i32 *%ptr) {
+; CHECK: f2:
+; CHECK: asi 0(%r2), 127
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %add = add i32 %val, 127
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the next constant up, which must use an addition and a store.
+; Both L/AHI and LHI/A would be OK.
+define void @f3(i32 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: asi
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %add = add i32 %val, 128
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the low end of the constant range.
+define void @f4(i32 *%ptr) {
+; CHECK: f4:
+; CHECK: asi 0(%r2), -128
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %add = add i32 %val, -128
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the next value down, with the same comment as f3.
+define void @f5(i32 *%ptr) {
+; CHECK: f5:
+; CHECK-NOT: asi
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %add = add i32 %val, -129
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the high end of the aligned ASI range.
+define void @f6(i32 *%base) {
+; CHECK: f6:
+; CHECK: asi 524284(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131071
+ %val = load i32 *%ptr
+ %add = add i32 %val, 1
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define void @f7(i32 *%base) {
+; CHECK: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: asi 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131072
+ %val = load i32 *%ptr
+ %add = add i32 %val, 1
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the low end of the ASI range.
+define void @f8(i32 *%base) {
+; CHECK: f8:
+; CHECK: asi -524288(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131072
+ %val = load i32 *%ptr
+ %add = add i32 %val, 1
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check the next word down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define void @f9(i32 *%base) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: asi 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131073
+ %val = load i32 *%ptr
+ %add = add i32 %val, 1
+ store i32 %add, i32 *%ptr
+ ret void
+}
+
+; Check that ASI does not allow indices.
+define void @f10(i64 %base, i64 %index) {
+; CHECK: f10:
+; CHECK: agr %r2, %r3
+; CHECK: asi 4(%r2), 1
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4
+ %ptr = inttoptr i64 %add2 to i32 *
+ %val = load i32 *%ptr
+ %add = add i32 %val, 1
+ store i32 %add, i32 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-add-12.ll b/test/CodeGen/SystemZ/int-add-12.ll
new file mode 100644
index 000000000000..ae1c1f735fa7
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-add-12.ll
@@ -0,0 +1,128 @@
+; Test 64-bit additions of constants to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check additions of 1.
+define void @f1(i64 *%ptr) {
+; CHECK: f1:
+; CHECK: agsi 0(%r2), 1
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %add = add i64 %val, 127
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the high end of the constant range.
+define void @f2(i64 *%ptr) {
+; CHECK: f2:
+; CHECK: agsi 0(%r2), 127
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %add = add i64 %val, 127
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the next constant up, which must use an addition and a store.
+; Both LG/AGHI and LGHI/AG would be OK.
+define void @f3(i64 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: agsi
+; CHECK: stg %r0, 0(%r2)
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %add = add i64 %val, 128
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the low end of the constant range.
+define void @f4(i64 *%ptr) {
+; CHECK: f4:
+; CHECK: agsi 0(%r2), -128
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %add = add i64 %val, -128
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the next value down, with the same comment as f3.
+define void @f5(i64 *%ptr) {
+; CHECK: f5:
+; CHECK-NOT: agsi
+; CHECK: stg %r0, 0(%r2)
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %add = add i64 %val, -129
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the high end of the aligned AGSI range.
+define void @f6(i64 *%base) {
+; CHECK: f6:
+; CHECK: agsi 524280(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 65535
+ %val = load i64 *%ptr
+ %add = add i64 %val, 1
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define void @f7(i64 *%base) {
+; CHECK: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: agsi 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 65536
+ %val = load i64 *%ptr
+ %add = add i64 %val, 1
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the low end of the AGSI range.
+define void @f8(i64 *%base) {
+; CHECK: f8:
+; CHECK: agsi -524288(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -65536
+ %val = load i64 *%ptr
+ %add = add i64 %val, 1
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword down, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define void @f9(i64 *%base) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524296
+; CHECK: agsi 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -65537
+ %val = load i64 *%ptr
+ %add = add i64 %val, 1
+ store i64 %add, i64 *%ptr
+ ret void
+}
+
+; Check that AGSI does not allow indices.
+define void @f10(i64 %base, i64 %index) {
+; CHECK: f10:
+; CHECK: agr %r2, %r3
+; CHECK: agsi 8(%r2), 1
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 8
+ %ptr = inttoptr i64 %add2 to i64 *
+ %val = load i64 *%ptr
+ %add = add i64 %val, 1
+ store i64 %add, i64 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-01.ll b/test/CodeGen/SystemZ/int-cmp-01.ll
new file mode 100644
index 000000000000..aa432f0b04fb
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-01.ll
@@ -0,0 +1,151 @@
+; Test 32-bit signed comparison in which the second operand is sign-extended
+; from an i16 memory value.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the CH range.
+define void @f1(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f1:
+; CHECK: ch %r2, 0(%r3)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the high end of the aligned CH range.
+define void @f2(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f2:
+; CHECK: ch %r2, 4094(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2047
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the next halfword up, which should use CHY instead of CH.
+define void @f3(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f3:
+; CHECK: chy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2048
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the high end of the aligned CHY range.
+define void @f4(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f4:
+; CHECK: chy %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f5(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f5:
+; CHECK: agfi %r3, 524288
+; CHECK: ch %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the high end of the negative aligned CHY range.
+define void @f6(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f6:
+; CHECK: chy %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the low end of the CHY range.
+define void @f7(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f7:
+; CHECK: chy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(i32 %lhs, i16 *%src, i32 *%dst) {
+; CHECK: f8:
+; CHECK: agfi %r3, -524290
+; CHECK: ch %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check that CH allows an index.
+define void @f9(i32 %lhs, i64 %base, i64 %index, i32 *%dst) {
+; CHECK: f9:
+; CHECK: ch %r2, 4094({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4094
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
+
+; Check that CHY allows an index.
+define void @f10(i32 %lhs, i64 %base, i64 %index, i32 *%dst) {
+; CHECK: f10:
+; CHECK: chy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %cond = icmp slt i32 %lhs, %rhs
+ %res = select i1 %cond, i32 100, i32 200
+ store i32 %res, i32 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-02.ll b/test/CodeGen/SystemZ/int-cmp-02.ll
new file mode 100644
index 000000000000..c158fb4af77f
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-02.ll
@@ -0,0 +1,162 @@
+; Test 32-bit signed comparison in which the second operand is a variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register comparison.
+define double @f1(double %a, double %b, i32 %i1, i32 %i2) {
+; CHECK: f1:
+; CHECK: cr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the C range.
+define double @f2(double %a, double %b, i32 %i1, i32 *%ptr) {
+; CHECK: f2:
+; CHECK: c %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned C range.
+define double @f3(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f3:
+; CHECK: c %r2, 4092(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1023
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which should use CY instead of C.
+define double @f4(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f4:
+; CHECK: cy %r2, 4096(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1024
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CY range.
+define double @f5(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f5:
+; CHECK: cy %r2, 524284(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131071
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f6(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: c %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131072
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative aligned CY range.
+define double @f7(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f7:
+; CHECK: cy %r2, -4(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -1
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CY range.
+define double @f8(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f8:
+; CHECK: cy %r2, -524288(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131072
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f9(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: c %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131073
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that C allows an index.
+define double @f10(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
+; CHECK: f10:
+; CHECK: c %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CY allows an index.
+define double @f11(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
+; CHECK: f11:
+; CHECK: cy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %i2 = load i32 *%ptr
+ %cond = icmp slt i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-03.ll b/test/CodeGen/SystemZ/int-cmp-03.ll
new file mode 100644
index 000000000000..4203bee6ac44
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-03.ll
@@ -0,0 +1,162 @@
+; Test 32-bit unsigned comparison in which the second operand is a variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register comparison.
+define double @f1(double %a, double %b, i32 %i1, i32 %i2) {
+; CHECK: f1:
+; CHECK: clr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CL range.
+define double @f2(double %a, double %b, i32 %i1, i32 *%ptr) {
+; CHECK: f2:
+; CHECK: cl %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CL range.
+define double @f3(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f3:
+; CHECK: cl %r2, 4092(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1023
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which should use CLY instead of CL.
+define double @f4(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f4:
+; CHECK: cly %r2, 4096(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1024
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CLY range.
+define double @f5(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f5:
+; CHECK: cly %r2, 524284(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131071
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f6(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: cl %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131072
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative aligned CLY range.
+define double @f7(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f7:
+; CHECK: cly %r2, -4(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -1
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CLY range.
+define double @f8(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f8:
+; CHECK: cly %r2, -524288(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131072
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f9(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: cl %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131073
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CL allows an index.
+define double @f10(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
+; CHECK: f10:
+; CHECK: cl %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLY allows an index.
+define double @f11(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
+; CHECK: f11:
+; CHECK: cly %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %i2 = load i32 *%ptr
+ %cond = icmp ult i32 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-04.ll b/test/CodeGen/SystemZ/int-cmp-04.ll
new file mode 100644
index 000000000000..d0625fbddbae
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-04.ll
@@ -0,0 +1,107 @@
+; Test 64-bit signed comparison in which the second operand is sign-extended
+; from an i16 memory value.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check CGH with no displacement.
+define void @f1(i64 %lhs, i16 *%src, i64 *%dst) {
+; CHECK: f1:
+; CHECK: cgh %r2, 0(%r3)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
+
+; Check the high end of the aligned CGH range.
+define void @f2(i64 %lhs, i16 *%src, i64 *%dst) {
+; CHECK: f2:
+; CHECK: cgh %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f3(i64 %lhs, i16 *%src, i64 *%dst) {
+; CHECK: f3:
+; CHECK: agfi %r3, 524288
+; CHECK: cgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
+
+; Check the high end of the negative aligned CGH range.
+define void @f4(i64 %lhs, i16 *%src, i64 *%dst) {
+; CHECK: f4:
+; CHECK: cgh %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
+
+; Check the low end of the CGH range.
+define void @f5(i64 %lhs, i16 *%src, i64 *%dst) {
+; CHECK: f5:
+; CHECK: cgh %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i64 %lhs, i16 *%src, i64 *%dst) {
+; CHECK: f6:
+; CHECK: agfi %r3, -524290
+; CHECK: cgh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
+
+; Check that CGH allows an index.
+define void @f7(i64 %lhs, i64 %base, i64 %index, i64 *%dst) {
+; CHECK: f7:
+; CHECK: cgh %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i64
+ %cond = icmp slt i64 %lhs, %rhs
+ %res = select i1 %cond, i64 100, i64 200
+ store i64 %res, i64 *%dst
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-05.ll b/test/CodeGen/SystemZ/int-cmp-05.ll
new file mode 100644
index 000000000000..2ab64d5319a8
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-05.ll
@@ -0,0 +1,203 @@
+; Test 64-bit comparison in which the second operand is a sign-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check signed register comparison.
+define double @f1(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f1:
+; CHECK: cgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned register comparison, which can't use CGFR.
+define double @f2(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f2:
+; CHECK-NOT: cgfr
+; CHECK: br %r14
+ %i2 = sext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check register equality.
+define double @f3(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f3:
+; CHECK: cgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = sext i32 %unext to i64
+ %cond = icmp eq i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check register inequality.
+define double @f4(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f4:
+; CHECK: cgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = sext i32 %unext to i64
+ %cond = icmp ne i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparisonn with memory.
+define double @f5(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f5:
+; CHECK: cgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison with memory.
+define double @f6(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: cgf
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check memory equality.
+define double @f7(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f7:
+; CHECK: cgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp eq i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check memory inequality.
+define double @f8(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f8:
+; CHECK: cgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp ne i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CGF range.
+define double @f9(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f9:
+; CHECK: cgf %r2, 524284(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131071
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f10(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f10:
+; CHECK: agfi %r3, 524288
+; CHECK: cgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131072
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative aligned CGF range.
+define double @f11(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f11:
+; CHECK: cgf %r2, -4(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -1
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGF range.
+define double @f12(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f12:
+; CHECK: cgf %r2, -524288(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131072
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f13(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f13:
+; CHECK: agfi %r3, -524292
+; CHECK: cgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131073
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CGF allows an index.
+define double @f14(double %a, double %b, i64 %i1, i64 %base, i64 %index) {
+; CHECK: f14:
+; CHECK: cgf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %unext = load i32 *%ptr
+ %i2 = sext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-06.ll b/test/CodeGen/SystemZ/int-cmp-06.ll
new file mode 100644
index 000000000000..26f6dbfe6f22
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-06.ll
@@ -0,0 +1,253 @@
+; Test 64-bit comparison in which the second operand is a zero-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check unsigned register comparison.
+define double @f1(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f1:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and again with a different representation.
+define double @f2(double %a, double %b, i64 %i1, i64 %unext) {
+; CHECK: f2:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = and i64 %unext, 4294967295
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed register comparison, which can't use CLGFR.
+define double @f3(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f3:
+; CHECK-NOT: clgfr
+; CHECK: br %r14
+ %i2 = zext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and again with a different representation
+define double @f4(double %a, double %b, i64 %i1, i64 %unext) {
+; CHECK: f4:
+; CHECK-NOT: clgfr
+; CHECK: br %r14
+ %i2 = and i64 %unext, 4294967295
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check register equality.
+define double @f5(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f5:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = zext i32 %unext to i64
+ %cond = icmp eq i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and again with a different representation
+define double @f6(double %a, double %b, i64 %i1, i64 %unext) {
+; CHECK: f6:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = and i64 %unext, 4294967295
+ %cond = icmp eq i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check register inequality.
+define double @f7(double %a, double %b, i64 %i1, i32 %unext) {
+; CHECK: f7:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ne i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and again with a different representation
+define double @f8(double %a, double %b, i64 %i1, i64 %unext) {
+; CHECK: f8:
+; CHECK: clgfr %r2, %r3
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = and i64 %unext, 4294967295
+ %cond = icmp ne i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparisonn with memory.
+define double @f9(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f9:
+; CHECK: clgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison with memory.
+define double @f10(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: clgf
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check memory equality.
+define double @f11(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f11:
+; CHECK: clgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp eq i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check memory inequality.
+define double @f12(double %a, double %b, i64 %i1, i32 *%ptr) {
+; CHECK: f12:
+; CHECK: clgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ne i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CLGF range.
+define double @f13(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f13:
+; CHECK: clgf %r2, 524284(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131071
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f14(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f14:
+; CHECK: agfi %r3, 524288
+; CHECK: clgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 131072
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative aligned CLGF range.
+define double @f15(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f15:
+; CHECK: clgf %r2, -4(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -1
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CLGF range.
+define double @f16(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f16:
+; CHECK: clgf %r2, -524288(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131072
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f17(double %a, double %b, i64 %i1, i32 *%base) {
+; CHECK: f17:
+; CHECK: agfi %r3, -524292
+; CHECK: clgf %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -131073
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLGF allows an index.
+define double @f18(double %a, double %b, i64 %i1, i64 %base, i64 %index) {
+; CHECK: f18:
+; CHECK: clgf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %unext = load i32 *%ptr
+ %i2 = zext i32 %unext to i64
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-07.ll b/test/CodeGen/SystemZ/int-cmp-07.ll
new file mode 100644
index 000000000000..1a6f6226dd9f
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-07.ll
@@ -0,0 +1,118 @@
+; Test 64-bit signed comparison in which the second operand is a variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check CGR.
+define double @f1(double %a, double %b, i64 %i1, i64 %i2) {
+; CHECK: f1:
+; CHECK: cgr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check CG with no displacement.
+define double @f2(double %a, double %b, i64 %i1, i64 *%ptr) {
+; CHECK: f2:
+; CHECK: cg %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CG range.
+define double @f3(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f3:
+; CHECK: cg %r2, 524280(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 65535
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: cg %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 65536
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative aligned CG range.
+define double @f5(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f5:
+; CHECK: cg %r2, -8(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -1
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CG range.
+define double @f6(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f6:
+; CHECK: cg %r2, -524288(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -65536
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f7(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: cg %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -65537
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CG allows an index.
+define double @f8(double %a, double %b, i64 %i1, i64 %base, i64 %index) {
+; CHECK: f8:
+; CHECK: cg %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %i2 = load i64 *%ptr
+ %cond = icmp slt i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-08.ll b/test/CodeGen/SystemZ/int-cmp-08.ll
new file mode 100644
index 000000000000..6e9a13e9cede
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-08.ll
@@ -0,0 +1,118 @@
+; Test 64-bit unsigned comparison in which the second operand is a variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check CLGR.
+define double @f1(double %a, double %b, i64 %i1, i64 %i2) {
+; CHECK: f1:
+; CHECK: clgr %r2, %r3
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check CLG with no displacement.
+define double @f2(double %a, double %b, i64 %i1, i64 *%ptr) {
+; CHECK: f2:
+; CHECK: clg %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the aligned CLG range.
+define double @f3(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f3:
+; CHECK: clg %r2, 524280(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 65535
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f4(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: clg %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 65536
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative aligned CLG range.
+define double @f5(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f5:
+; CHECK: clg %r2, -8(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -1
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CLG range.
+define double @f6(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f6:
+; CHECK: clg %r2, -524288(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -65536
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f7(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: clg %r2, 0(%r3)
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -65537
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLG allows an index.
+define double @f8(double %a, double %b, i64 %i1, i64 %base, i64 %index) {
+; CHECK: f8:
+; CHECK: clg %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %i2 = load i64 *%ptr
+ %cond = icmp ult i64 %i1, %i2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-09.ll b/test/CodeGen/SystemZ/int-cmp-09.ll
new file mode 100644
index 000000000000..bb7213c6a436
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-09.ll
@@ -0,0 +1,135 @@
+; Test 32-bit signed comparison in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparisons with 0.
+define double @f1(double %a, double %b, i32 %i1) {
+; CHECK: f1:
+; CHECK: chi %r2, 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 1.
+define double @f2(double %a, double %b, i32 %i1) {
+; CHECK: f2:
+; CHECK: chi %r2, 1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CHI range.
+define double @f3(double %a, double %b, i32 %i1) {
+; CHECK: f3:
+; CHECK: chi %r2, 32767
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use CFI.
+define double @f4(double %a, double %b, i32 %i1) {
+; CHECK: f4:
+; CHECK: cfi %r2, 32768
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 32-bit range.
+define double @f5(double %a, double %b, i32 %i1) {
+; CHECK: f5:
+; CHECK: cfi %r2, 2147483647
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i32 %i1, 2147483647
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which should be treated as a negative value.
+define double @f6(double %a, double %b, i32 %i1) {
+; CHECK: f6:
+; CHECK: cfi %r2, -2147483648
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i32 %i1, 2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative CHI range.
+define double @f7(double %a, double %b, i32 %i1) {
+; CHECK: f7:
+; CHECK: chi %r2, -1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CHI range.
+define double @f8(double %a, double %b, i32 %i1) {
+; CHECK: f8:
+; CHECK: chi %r2, -32768
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use CFI instead.
+define double @f9(double %a, double %b, i32 %i1) {
+; CHECK: f9:
+; CHECK: cfi %r2, -32769
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i32 %i1, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 32-bit range.
+define double @f10(double %a, double %b, i32 %i1) {
+; CHECK: f10:
+; CHECK: cfi %r2, -2147483648
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i32 %i1, -2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which should be treated as a positive value.
+define double @f11(double %a, double %b, i32 %i1) {
+; CHECK: f11:
+; CHECK: cfi %r2, 2147483647
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i32 %i1, -2147483649
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-10.ll b/test/CodeGen/SystemZ/int-cmp-10.ll
new file mode 100644
index 000000000000..f2d3ccd64af6
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-10.ll
@@ -0,0 +1,28 @@
+; Test 32-bit unsigned comparisons in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check a value near the low end of the range. We use CFI for comparisons
+; with zero, or things that are equivalent to them.
+define double @f1(double %a, double %b, i32 %i1) {
+; CHECK: f1:
+; CHECK: clfi %r2, 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ugt i32 %i1, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check a value near the high end of the range.
+define double @f2(double %a, double %b, i32 %i1) {
+; CHECK: f2:
+; CHECK: clfi %r2, 4294967280
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ult i32 %i1, 4294967280
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-11.ll b/test/CodeGen/SystemZ/int-cmp-11.ll
new file mode 100644
index 000000000000..1bfb0c61cb90
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-11.ll
@@ -0,0 +1,135 @@
+; Test 64-bit signed comparisons in which the second operand is a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparisons with 0.
+define double @f1(double %a, double %b, i64 %i1) {
+; CHECK: f1:
+; CHECK: cghi %r2, 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 1.
+define double @f2(double %a, double %b, i64 %i1) {
+; CHECK: f2:
+; CHECK: cghi %r2, 1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGHI range.
+define double @f3(double %a, double %b, i64 %i1) {
+; CHECK: f3:
+; CHECK: cghi %r2, 32767
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use CGFI.
+define double @f4(double %a, double %b, i64 %i1) {
+; CHECK: f4:
+; CHECK: cgfi %r2, 32768
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGFI range.
+define double @f5(double %a, double %b, i64 %i1) {
+; CHECK: f5:
+; CHECK: cgfi %r2, 2147483647
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, 2147483647
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use register comparison.
+define double @f6(double %a, double %b, i64 %i1) {
+; CHECK: f6:
+; CHECK: cgr
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, 2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative CGHI range.
+define double @f7(double %a, double %b, i64 %i1) {
+; CHECK: f7:
+; CHECK: cghi %r2, -1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGHI range.
+define double @f8(double %a, double %b, i64 %i1) {
+; CHECK: f8:
+; CHECK: cghi %r2, -32768
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use CGFI instead.
+define double @f9(double %a, double %b, i64 %i1) {
+; CHECK: f9:
+; CHECK: cgfi %r2, -32769
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGFI range.
+define double @f10(double %a, double %b, i64 %i1) {
+; CHECK: f10:
+; CHECK: cgfi %r2, -2147483648
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, -2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use register comparison.
+define double @f11(double %a, double %b, i64 %i1) {
+; CHECK: f11:
+; CHECK: cgr
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp slt i64 %i1, -2147483649
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-12.ll b/test/CodeGen/SystemZ/int-cmp-12.ll
new file mode 100644
index 000000000000..0288730c3a80
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-12.ll
@@ -0,0 +1,40 @@
+; Test 64-bit unsigned comparisons in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check a value near the low end of the range. We use CGFI for comparisons
+; with zero, or things that are equivalent to them.
+define double @f1(double %a, double %b, i64 %i1) {
+; CHECK: f1:
+; CHECK: clgfi %r2, 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ugt i64 %i1, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLGFI range.
+define double @f2(double %a, double %b, i64 %i1) {
+; CHECK: f2:
+; CHECK: clgfi %r2, 4294967295
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ult i64 %i1, 4294967295
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use a register comparison.
+define double @f3(double %a, double %b, i64 %i1) {
+; CHECK: f3:
+; CHECK: clgr %r2,
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ult i64 %i1, 4294967296
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-13.ll b/test/CodeGen/SystemZ/int-cmp-13.ll
new file mode 100644
index 000000000000..c180831debb0
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-13.ll
@@ -0,0 +1,147 @@
+; Test 64-bit equality comparisons in which the second operand is a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparisons with 0.
+define double @f1(double %a, double %b, i64 %i1) {
+; CHECK: f1:
+; CHECK: cghi %r2, 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGHI range.
+define double @f2(double %a, double %b, i64 %i1) {
+; CHECK: f2:
+; CHECK: cghi %r2, 32767
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use CGFI.
+define double @f3(double %a, double %b, i64 %i1) {
+; CHECK: f3:
+; CHECK: cgfi %r2, 32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGFI range.
+define double @f4(double %a, double %b, i64 %i1) {
+; CHECK: f4:
+; CHECK: cgfi %r2, 2147483647
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 2147483647
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which should use CLGFI instead.
+define double @f5(double %a, double %b, i64 %i1) {
+; CHECK: f5:
+; CHECK: clgfi %r2, 2147483648
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLGFI range.
+define double @f6(double %a, double %b, i64 %i1) {
+; CHECK: f6:
+; CHECK: clgfi %r2, 4294967295
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 4294967295
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use a register comparison.
+define double @f7(double %a, double %b, i64 %i1) {
+; CHECK: f7:
+; CHECK: cgr %r2,
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, 4294967296
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative CGHI range.
+define double @f8(double %a, double %b, i64 %i1) {
+; CHECK: f8:
+; CHECK: cghi %r2, -1
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGHI range.
+define double @f9(double %a, double %b, i64 %i1) {
+; CHECK: f9:
+; CHECK: cghi %r2, -32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use CGFI instead.
+define double @f10(double %a, double %b, i64 %i1) {
+; CHECK: f10:
+; CHECK: cgfi %r2, -32769
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGFI range.
+define double @f11(double %a, double %b, i64 %i1) {
+; CHECK: f11:
+; CHECK: cgfi %r2, -2147483648
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, -2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use register comparison.
+define double @f12(double %a, double %b, i64 %i1) {
+; CHECK: f12:
+; CHECK: cgr
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp eq i64 %i1, -2147483649
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-14.ll b/test/CodeGen/SystemZ/int-cmp-14.ll
new file mode 100644
index 000000000000..6a7e0e6d552a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-14.ll
@@ -0,0 +1,147 @@
+; Test 64-bit inequality comparisons in which the second operand is a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparisons with 0.
+define double @f1(double %a, double %b, i64 %i1) {
+; CHECK: f1:
+; CHECK: cghi %r2, 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGHI range.
+define double @f2(double %a, double %b, i64 %i1) {
+; CHECK: f2:
+; CHECK: cghi %r2, 32767
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use CGFI.
+define double @f3(double %a, double %b, i64 %i1) {
+; CHECK: f3:
+; CHECK: cgfi %r2, 32768
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGFI range.
+define double @f4(double %a, double %b, i64 %i1) {
+; CHECK: f4:
+; CHECK: cgfi %r2, 2147483647
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 2147483647
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which should use CLGFI instead.
+define double @f5(double %a, double %b, i64 %i1) {
+; CHECK: f5:
+; CHECK: clgfi %r2, 2147483648
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLGFI range.
+define double @f6(double %a, double %b, i64 %i1) {
+; CHECK: f6:
+; CHECK: clgfi %r2, 4294967295
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 4294967295
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which must use a register comparison.
+define double @f7(double %a, double %b, i64 %i1) {
+; CHECK: f7:
+; CHECK: cgr %r2,
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, 4294967296
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative CGHI range.
+define double @f8(double %a, double %b, i64 %i1) {
+; CHECK: f8:
+; CHECK: cghi %r2, -1
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGHI range.
+define double @f9(double %a, double %b, i64 %i1) {
+; CHECK: f9:
+; CHECK: cghi %r2, -32768
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use CGFI instead.
+define double @f10(double %a, double %b, i64 %i1) {
+; CHECK: f10:
+; CHECK: cgfi %r2, -32769
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CGFI range.
+define double @f11(double %a, double %b, i64 %i1) {
+; CHECK: f11:
+; CHECK: cgfi %r2, -2147483648
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, -2147483648
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which must use register comparison.
+define double @f12(double %a, double %b, i64 %i1) {
+; CHECK: f12:
+; CHECK: cgr
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %cond = icmp ne i64 %i1, -2147483649
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-15.ll b/test/CodeGen/SystemZ/int-cmp-15.ll
new file mode 100644
index 000000000000..6bb7e2b3ac3d
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-15.ll
@@ -0,0 +1,241 @@
+; Test 8-bit unsigned comparisons between memory and constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ordered comparisons near the low end of the unsigned 8-bit range.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp ugt i8 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons near the high end of the unsigned 8-bit range.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 254
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check tests for negative bytes.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp slt i8 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and an alternative form.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp sle i8 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check tests for non-negative bytes.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp sge i8 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and an alternative form.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp sgt i8 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons at the low end of the signed 8-bit range.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp eq i8 %val, -128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons at the low end of the unsigned 8-bit range.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp eq i8 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons at the high end of the signed 8-bit range.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp eq i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons at the high end of the unsigned 8-bit range.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %cond = icmp eq i8 %val, 255
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLI range.
+define double @f11(double %a, double %b, i8 *%src) {
+; CHECK: f11:
+; CHECK: cli 4095(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next byte up, which should use CLIY instead of CLI.
+define double @f12(double %a, double %b, i8 *%src) {
+; CHECK: f12:
+; CHECK: cliy 4096(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLIY range.
+define double @f13(double %a, double %b, i8 *%src) {
+; CHECK: f13:
+; CHECK: cliy 524287(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f14(double %a, double %b, i8 *%src) {
+; CHECK: f14:
+; CHECK: agfi %r2, 524288
+; CHECK: cli 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the negative CLIY range.
+define double @f15(double %a, double %b, i8 *%src) {
+; CHECK: f15:
+; CHECK: cliy -1(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the CLIY range.
+define double @f16(double %a, double %b, i8 *%src) {
+; CHECK: f16:
+; CHECK: cliy -524288(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define double @f17(double %a, double %b, i8 *%src) {
+; CHECK: f17:
+; CHECK: agfi %r2, -524289
+; CHECK: cli 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLI does not allow an index
+define double @f18(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f18:
+; CHECK: agr %r2, %r3
+; CHECK: cli 4095(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLIY does not allow an index
+define double @f19(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f19:
+; CHECK: agr %r2, %r3
+; CHECK: cliy 4096(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %base, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %cond = icmp ult i8 %val, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-16.ll b/test/CodeGen/SystemZ/int-cmp-16.ll
new file mode 100644
index 000000000000..8af854efaabf
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-16.ll
@@ -0,0 +1,133 @@
+; Test 32-bit equality comparisons that are really between a memory byte
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 8-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp eq i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 8-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp eq i32 %ext, 255
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp eq i32 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp eq i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp eq i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 8-bit range, using sign extension.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp eq i32 %ext, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp eq i32 %ext, 128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp eq i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 8-bit range, using sign extension.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp eq i32 %ext, -128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp eq i32 %ext, -129
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-17.ll b/test/CodeGen/SystemZ/int-cmp-17.ll
new file mode 100644
index 000000000000..d4d5e98b8358
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-17.ll
@@ -0,0 +1,133 @@
+; Test 32-bit inequality comparisons that are really between a memory byte
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 8-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ne i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 8-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ne i32 %ext, 255
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ne i32 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ne i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ne i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 8-bit range, using sign extension.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ne i32 %ext, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ne i32 %ext, 128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ne i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 8-bit range, using sign extension.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ne i32 %ext, -128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ne i32 %ext, -129
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-18.ll b/test/CodeGen/SystemZ/int-cmp-18.ll
new file mode 100644
index 000000000000..9822dc212248
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-18.ll
@@ -0,0 +1,133 @@
+; Test 64-bit equality comparisons that are really between a memory byte
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 8-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp eq i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 8-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp eq i64 %ext, 255
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp eq i64 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp eq i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp eq i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 8-bit range, using sign extension.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp eq i64 %ext, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp eq i64 %ext, 128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp eq i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 8-bit range, using sign extension.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp eq i64 %ext, -128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp eq i64 %ext, -129
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-19.ll b/test/CodeGen/SystemZ/int-cmp-19.ll
new file mode 100644
index 000000000000..7d29dbcedcd6
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-19.ll
@@ -0,0 +1,133 @@
+; Test 64-bit inequality comparisons that are really between a memory byte
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 8-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ne i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 8-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ne i64 %ext, 255
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ne i64 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ne i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK: cli 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ne i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 8-bit range, using sign extension.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ne i64 %ext, 127
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ne i64 %ext, 128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 255
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ne i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 8-bit range, using sign extension.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ne i64 %ext, -128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ne i64 %ext, -129
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-20.ll b/test/CodeGen/SystemZ/int-cmp-20.ll
new file mode 100644
index 000000000000..8fffbc86a737
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-20.ll
@@ -0,0 +1,220 @@
+; Test 32-bit ordered comparisons that are really between a memory byte
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check unsigned comparison near the low end of the CLI range, using zero
+; extension.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ugt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the low end of the CLI range, using sign
+; extension.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ugt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLI range, using zero
+; extension.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ult i32 %ext, 254
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLI range, using sign
+; extension.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ult i32 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison above the high end of the CLI range, using zero
+; extension. The condition is always true.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp ult i32 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; When using unsigned comparison with sign extension, equality with values
+; in the range [128, MAX-129] is impossible, and ordered comparisons with
+; those values are effectively sign tests. Since such comparisons are
+; unlikely to occur in practice, we don't bother optimizing the second case,
+; and simply ignore CLI for this range. First check the low end of the range.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ult i32 %ext, 128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and then the high end.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp ult i32 %ext, -129
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp sgt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLI range, using sign
+; extension. This cannot use CLI.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp sgt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp slt i32 %ext, 254
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLI range, using sign
+; extension. This cannot use CLI.
+define double @f11(double %a, double %b, i8 *%ptr) {
+; CHECK: f11:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp slt i32 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison above the high end of the CLI range, using zero
+; extension. The condition is always true.
+define double @f12(double %a, double %b, i8 *%ptr) {
+; CHECK: f12:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %cond = icmp slt i32 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check tests for nonnegative values.
+define double @f13(double %a, double %b, i8 *%ptr) {
+; CHECK: f13:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp sge i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and another form
+define double @f14(double %a, double %b, i8 *%ptr) {
+; CHECK: f14:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp sgt i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check tests for negative values.
+define double @f15(double %a, double %b, i8 *%ptr) {
+; CHECK: f15:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp slt i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and another form
+define double @f16(double %a, double %b, i8 *%ptr) {
+; CHECK: f16:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %cond = icmp sle i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-21.ll b/test/CodeGen/SystemZ/int-cmp-21.ll
new file mode 100644
index 000000000000..43447b8fda07
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-21.ll
@@ -0,0 +1,220 @@
+; Test 64-bit ordered comparisons that are really between a memory byte
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check unsigned comparison near the low end of the CLI range, using zero
+; extension.
+define double @f1(double %a, double %b, i8 *%ptr) {
+; CHECK: f1:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ugt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the low end of the CLI range, using sign
+; extension.
+define double @f2(double %a, double %b, i8 *%ptr) {
+; CHECK: f2:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ugt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLI range, using zero
+; extension.
+define double @f3(double %a, double %b, i8 *%ptr) {
+; CHECK: f3:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ult i64 %ext, 254
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLI range, using sign
+; extension.
+define double @f4(double %a, double %b, i8 *%ptr) {
+; CHECK: f4:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ult i64 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison above the high end of the CLI range, using zero
+; extension. The condition is always true.
+define double @f5(double %a, double %b, i8 *%ptr) {
+; CHECK: f5:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp ult i64 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; When using unsigned comparison with sign extension, equality with values
+; in the range [128, MAX-129] is impossible, and ordered comparisons with
+; those values are effectively sign tests. Since such comparisons are
+; unlikely to occur in practice, we don't bother optimizing the second case,
+; and simply ignore CLI for this range. First check the low end of the range.
+define double @f6(double %a, double %b, i8 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ult i64 %ext, 128
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and then the high end.
+define double @f7(double %a, double %b, i8 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp ult i64 %ext, -129
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f8(double %a, double %b, i8 *%ptr) {
+; CHECK: f8:
+; CHECK: cli 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp sgt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLI range, using sign
+; extension. This cannot use CLI.
+define double @f9(double %a, double %b, i8 *%ptr) {
+; CHECK: f9:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp sgt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f10(double %a, double %b, i8 *%ptr) {
+; CHECK: f10:
+; CHECK: cli 0(%r2), 254
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp slt i64 %ext, 254
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLI range, using sign
+; extension. This cannot use CLI.
+define double @f11(double %a, double %b, i8 *%ptr) {
+; CHECK: f11:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp slt i64 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison above the high end of the CLI range, using zero
+; extension. The condition is always true.
+define double @f12(double %a, double %b, i8 *%ptr) {
+; CHECK: f12:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %cond = icmp slt i64 %ext, 256
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check tests for nonnegative values.
+define double @f13(double %a, double %b, i8 *%ptr) {
+; CHECK: f13:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp sge i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and another form
+define double @f14(double %a, double %b, i8 *%ptr) {
+; CHECK: f14:
+; CHECK: cli 0(%r2), 128
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp sgt i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check tests for negative values.
+define double @f15(double %a, double %b, i8 *%ptr) {
+; CHECK: f15:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp slt i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and another form
+define double @f16(double %a, double %b, i8 *%ptr) {
+; CHECK: f16:
+; CHECK: cli 0(%r2), 127
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %cond = icmp sle i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-22.ll b/test/CodeGen/SystemZ/int-cmp-22.ll
new file mode 100644
index 000000000000..513d4be2f423
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-22.ll
@@ -0,0 +1,128 @@
+; Test 16-bit signed ordered comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check comparisons with 0.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: chhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 1.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: chhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check a value near the high end of the signed 16-bit range.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK: chhsi 0(%r2), 32766
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 32766
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK: chhsi 0(%r2), -1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check a value near the low end of the 16-bit signed range.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK: chhsi 0(%r2), -32766
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, -32766
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CHHSI range.
+define double @f6(double %a, double %b, i16 %i1, i16 *%base) {
+; CHECK: f6:
+; CHECK: chhsi 4094(%r3), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%base, i64 2047
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next halfword up, which needs separate address logic,
+define double @f7(double %a, double %b, i16 *%base) {
+; CHECK: f7:
+; CHECK: aghi %r2, 4096
+; CHECK: chhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%base, i64 2048
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check negative offsets, which also need separate address logic.
+define double @f8(double %a, double %b, i16 *%base) {
+; CHECK: f8:
+; CHECK: aghi %r2, -2
+; CHECK: chhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%base, i64 -1
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CHHSI does not allow indices.
+define double @f9(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f9:
+; CHECK: agr {{%r2, %r3|%r3, %r2}}
+; CHECK: chhsi 0({{%r[23]}}), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i16 *
+ %val = load i16 *%ptr
+ %cond = icmp slt i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-23.ll b/test/CodeGen/SystemZ/int-cmp-23.ll
new file mode 100644
index 000000000000..40e13310d55c
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-23.ll
@@ -0,0 +1,89 @@
+; Test 16-bit unsigned comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check a value near the low end of the unsigned 16-bit range.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp ugt i16 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check a value near the high end of the unsigned 16-bit range.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp ult i16 %val, 65534
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLHHSI range.
+define double @f3(double %a, double %b, i16 %i1, i16 *%base) {
+; CHECK: f3:
+; CHECK: clhhsi 4094(%r3), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%base, i64 2047
+ %val = load i16 *%ptr
+ %cond = icmp ugt i16 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next halfword up, which needs separate address logic,
+define double @f4(double %a, double %b, i16 *%base) {
+; CHECK: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%base, i64 2048
+ %val = load i16 *%ptr
+ %cond = icmp ugt i16 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check negative offsets, which also need separate address logic.
+define double @f5(double %a, double %b, i16 *%base) {
+; CHECK: f5:
+; CHECK: aghi %r2, -2
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%base, i64 -1
+ %val = load i16 *%ptr
+ %cond = icmp ugt i16 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLHHSI does not allow indices.
+define double @f6(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f6:
+; CHECK: agr {{%r2, %r3|%r3, %r2}}
+; CHECK: clhhsi 0({{%r[23]}}), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i16 *
+ %val = load i16 *%ptr
+ %cond = icmp ugt i16 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-24.ll b/test/CodeGen/SystemZ/int-cmp-24.ll
new file mode 100644
index 000000000000..46186cd74b53
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-24.ll
@@ -0,0 +1,55 @@
+; Test 16-bit equality comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the unsigned 16-bit range.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp eq i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the unsigned 16-bit range.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp eq i16 %val, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 16-bit range.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK: clhhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp eq i16 %val, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 16-bit range.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK: clhhsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp eq i16 %val, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-25.ll b/test/CodeGen/SystemZ/int-cmp-25.ll
new file mode 100644
index 000000000000..a3a223fa3448
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-25.ll
@@ -0,0 +1,55 @@
+; Test 16-bit inequality comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the unsigned 16-bit range.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp ne i16 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the unsigned 16-bit range.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp ne i16 %val, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 16-bit range.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK: clhhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp ne i16 %val, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 16-bit range.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK: clhhsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %cond = icmp ne i16 %val, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-26.ll b/test/CodeGen/SystemZ/int-cmp-26.ll
new file mode 100644
index 000000000000..31330b2a6397
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-26.ll
@@ -0,0 +1,133 @@
+; Test 32-bit equality comparisons that are really between a memory halfword
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 16-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp eq i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 16-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp eq i32 %ext, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp eq i32 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp eq i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp eq i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 16-bit range, using sign extension.
+define double @f6(double %a, double %b, i16 *%ptr) {
+; CHECK: f6:
+; CHECK: clhhsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp eq i32 %ext, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i16 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp eq i32 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i16 *%ptr) {
+; CHECK: f8:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp eq i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 16-bit range, using sign extension.
+define double @f9(double %a, double %b, i16 *%ptr) {
+; CHECK: f9:
+; CHECK: clhhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp eq i32 %ext, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i16 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp eq i32 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-27.ll b/test/CodeGen/SystemZ/int-cmp-27.ll
new file mode 100644
index 000000000000..7cbea3d92526
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-27.ll
@@ -0,0 +1,133 @@
+; Test 32-bit inequality comparisons that are really between a memory halfword
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 16-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ne i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 16-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ne i32 %ext, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ne i32 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ne i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ne i32 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 16-bit range, using sign extension.
+define double @f6(double %a, double %b, i16 *%ptr) {
+; CHECK: f6:
+; CHECK: clhhsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ne i32 %ext, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i16 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ne i32 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i16 *%ptr) {
+; CHECK: f8:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ne i32 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 16-bit range, using sign extension.
+define double @f9(double %a, double %b, i16 *%ptr) {
+; CHECK: f9:
+; CHECK: clhhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ne i32 %ext, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i16 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ne i32 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-28.ll b/test/CodeGen/SystemZ/int-cmp-28.ll
new file mode 100644
index 000000000000..629eb4f06013
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-28.ll
@@ -0,0 +1,133 @@
+; Test 64-bit equality comparisons that are really between a memory halfword
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 16-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp eq i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 16-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp eq i64 %ext, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp eq i64 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp eq i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp eq i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 16-bit range, using sign extension.
+define double @f6(double %a, double %b, i16 *%ptr) {
+; CHECK: f6:
+; CHECK: clhhsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp eq i64 %ext, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i16 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp eq i64 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i16 *%ptr) {
+; CHECK: f8:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp eq i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 16-bit range, using sign extension.
+define double @f9(double %a, double %b, i16 *%ptr) {
+; CHECK: f9:
+; CHECK: clhhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp eq i64 %ext, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i16 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp eq i64 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-29.ll b/test/CodeGen/SystemZ/int-cmp-29.ll
new file mode 100644
index 000000000000..de41dd782d21
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-29.ll
@@ -0,0 +1,133 @@
+; Test 64-bit inequality comparisons that are really between a memory halfword
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the 16-bit unsigned range, with zero extension.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ne i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the 16-bit unsigned range, with zero extension.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ne i64 %ext, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, with zero extension. The condition is always false.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ne i64 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, with zero extension.
+; This condition is also always false.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ne i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with 0, using sign extension.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK: clhhsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ne i64 %ext, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the signed 16-bit range, using sign extension.
+define double @f6(double %a, double %b, i16 *%ptr) {
+; CHECK: f6:
+; CHECK: clhhsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ne i64 %ext, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, using sign extension.
+; The condition is always false.
+define double @f7(double %a, double %b, i16 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ne i64 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check comparisons with -1, using sign extension.
+define double @f8(double %a, double %b, i16 *%ptr) {
+; CHECK: f8:
+; CHECK: clhhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ne i64 %ext, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the low end of the signed 16-bit range, using sign extension.
+define double @f9(double %a, double %b, i16 *%ptr) {
+; CHECK: f9:
+; CHECK: clhhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ne i64 %ext, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, using sign extension.
+; The condition is always false.
+define double @f10(double %a, double %b, i16 *%ptr) {
+; CHECK: f10:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ne i64 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-30.ll b/test/CodeGen/SystemZ/int-cmp-30.ll
new file mode 100644
index 000000000000..713ad8ef841d
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-30.ll
@@ -0,0 +1,225 @@
+; Test 32-bit ordered comparisons that are really between a memory halfword
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check unsigned comparison near the low end of the CLHHSI range, using zero
+; extension.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ugt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the low end of the CLHHSI range, using sign
+; extension.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ugt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLHHSI range, using zero
+; extension.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ult i32 %ext, 65534
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLHHSI range, using sign
+; extension.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ult i32 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison above the high end of the CLHHSI range, using zero
+; extension. The condition is always true.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp ult i32 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; When using unsigned comparison with sign extension, equality with values
+; in the range [32768, MAX-32769] is impossible, and ordered comparisons with
+; those values are effectively sign tests. Since such comparisons are
+; unlikely to occur in practice, we don't bother optimizing the second case,
+; and simply ignore CLHHSI for this range. First check the low end of the
+; range.
+define double @f6(double %a, double %b, i16 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ult i32 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and then the high end.
+define double @f7(double %a, double %b, i16 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp ult i32 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLHHSI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f8(double %a, double %b, i16 *%ptr) {
+; CHECK: f8:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp sgt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLHHSI range, using sign
+; extension. This should use CHHSI instead.
+define double @f9(double %a, double %b, i16 *%ptr) {
+; CHECK: f9:
+; CHECK: chhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp sgt i32 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLHHSI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f10(double %a, double %b, i16 *%ptr) {
+; CHECK: f10:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp slt i32 %ext, 65534
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLHHSI range, using sign
+; extension. This should use CHHSI instead.
+define double @f11(double %a, double %b, i16 *%ptr) {
+; CHECK: f11:
+; CHECK: chhsi 0(%r2), -2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp slt i32 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison above the high end of the CLHHSI range, using zero
+; extension. The condition is always true.
+define double @f12(double %a, double %b, i16 *%ptr) {
+; CHECK: f12:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i32
+ %cond = icmp slt i32 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CHHSI range, using sign
+; extension.
+define double @f13(double %a, double %b, i16 *%ptr) {
+; CHECK: f13:
+; CHECK: chhsi 0(%r2), 32766
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp slt i32 %ext, 32766
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison above the high end of the CHHSI range, using sign
+; extension. This condition is always true.
+define double @f14(double %a, double %b, i16 *%ptr) {
+; CHECK: f14:
+; CHECK-NOT: chhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp slt i32 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CHHSI range, using sign
+; extension.
+define double @f15(double %a, double %b, i16 *%ptr) {
+; CHECK: f15:
+; CHECK: chhsi 0(%r2), -32767
+; CHECK-NEXT: j{{g?}}g
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp sgt i32 %ext, -32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison below the low end of the CHHSI range, using sign
+; extension. This condition is always true.
+define double @f16(double %a, double %b, i16 *%ptr) {
+; CHECK: f16:
+; CHECK-NOT: chhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i32
+ %cond = icmp sgt i32 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-31.ll b/test/CodeGen/SystemZ/int-cmp-31.ll
new file mode 100644
index 000000000000..cabe9b83a135
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-31.ll
@@ -0,0 +1,225 @@
+; Test 64-bit ordered comparisons that are really between a memory halfword
+; and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check unsigned comparison near the low end of the CLHHSI range, using zero
+; extension.
+define double @f1(double %a, double %b, i16 *%ptr) {
+; CHECK: f1:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ugt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the low end of the CLHHSI range, using sign
+; extension.
+define double @f2(double %a, double %b, i16 *%ptr) {
+; CHECK: f2:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ugt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLHHSI range, using zero
+; extension.
+define double @f3(double %a, double %b, i16 *%ptr) {
+; CHECK: f3:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ult i64 %ext, 65534
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison near the high end of the CLHHSI range, using sign
+; extension.
+define double @f4(double %a, double %b, i16 *%ptr) {
+; CHECK: f4:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ult i64 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check unsigned comparison above the high end of the CLHHSI range, using zero
+; extension. The condition is always true.
+define double @f5(double %a, double %b, i16 *%ptr) {
+; CHECK: f5:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp ult i64 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; When using unsigned comparison with sign extension, equality with values
+; in the range [32768, MAX-32769] is impossible, and ordered comparisons with
+; those values are effectively sign tests. Since such comparisons are
+; unlikely to occur in practice, we don't bother optimizing the second case,
+; and simply ignore CLHHSI for this range. First check the low end of the
+; range.
+define double @f6(double %a, double %b, i16 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ult i64 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; ...and then the high end.
+define double @f7(double %a, double %b, i16 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: clhhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp ult i64 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLHHSI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f8(double %a, double %b, i16 *%ptr) {
+; CHECK: f8:
+; CHECK: clhhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp sgt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CLHHSI range, using sign
+; extension. This should use CHHSI instead.
+define double @f9(double %a, double %b, i16 *%ptr) {
+; CHECK: f9:
+; CHECK: chhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp sgt i64 %ext, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLHHSI range, using zero
+; extension. This is equivalent to unsigned comparison.
+define double @f10(double %a, double %b, i16 *%ptr) {
+; CHECK: f10:
+; CHECK: clhhsi 0(%r2), 65534
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp slt i64 %ext, 65534
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CLHHSI range, using sign
+; extension. This should use CHHSI instead.
+define double @f11(double %a, double %b, i16 *%ptr) {
+; CHECK: f11:
+; CHECK: chhsi 0(%r2), -2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp slt i64 %ext, -2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison above the high end of the CLHHSI range, using zero
+; extension. The condition is always true.
+define double @f12(double %a, double %b, i16 *%ptr) {
+; CHECK: f12:
+; CHECK-NOT: cli
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = zext i16 %val to i64
+ %cond = icmp slt i64 %ext, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the high end of the CHHSI range, using sign
+; extension.
+define double @f13(double %a, double %b, i16 *%ptr) {
+; CHECK: f13:
+; CHECK: chhsi 0(%r2), 32766
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp slt i64 %ext, 32766
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison above the high end of the CHHSI range, using sign
+; extension. This condition is always true.
+define double @f14(double %a, double %b, i16 *%ptr) {
+; CHECK: f14:
+; CHECK-NOT: chhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp slt i64 %ext, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison near the low end of the CHHSI range, using sign
+; extension.
+define double @f15(double %a, double %b, i16 *%ptr) {
+; CHECK: f15:
+; CHECK: chhsi 0(%r2), -32767
+; CHECK-NEXT: j{{g?}}g
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp sgt i64 %ext, -32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check signed comparison below the low end of the CHHSI range, using sign
+; extension. This condition is always true.
+define double @f16(double %a, double %b, i16 *%ptr) {
+; CHECK: f16:
+; CHECK-NOT: chhsi
+; CHECK: br %r14
+ %val = load i16 *%ptr
+ %ext = sext i16 %val to i64
+ %cond = icmp sgt i64 %ext, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-32.ll b/test/CodeGen/SystemZ/int-cmp-32.ll
new file mode 100644
index 000000000000..4bdeebb35c99
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-32.ll
@@ -0,0 +1,237 @@
+; Test 32-bit signed comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ordered comparisons with 0.
+define double @f1(double %a, double %b, i32 *%ptr) {
+; CHECK: f1:
+; CHECK: chsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with 1.
+define double @f2(double %a, double %b, i32 *%ptr) {
+; CHECK: f2:
+; CHECK: chsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with the high end of the signed 16-bit range.
+define double @f3(double %a, double %b, i32 *%ptr) {
+; CHECK: f3:
+; CHECK: chsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CHSI.
+define double @f4(double %a, double %b, i32 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: chsi
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with -1.
+define double @f5(double %a, double %b, i32 *%ptr) {
+; CHECK: f5:
+; CHECK: chsi 0(%r2), -1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with the low end of the 16-bit signed range.
+define double @f6(double %a, double %b, i32 *%ptr) {
+; CHECK: f6:
+; CHECK: chsi 0(%r2), -32768
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which can't use CHSI.
+define double @f7(double %a, double %b, i32 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: chsi
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with 0.
+define double @f8(double %a, double %b, i32 *%ptr) {
+; CHECK: f8:
+; CHECK: chsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with 1.
+define double @f9(double %a, double %b, i32 *%ptr) {
+; CHECK: f9:
+; CHECK: chsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with the high end of the signed 16-bit range.
+define double @f10(double %a, double %b, i32 *%ptr) {
+; CHECK: f10:
+; CHECK: chsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CHSI.
+define double @f11(double %a, double %b, i32 *%ptr) {
+; CHECK: f11:
+; CHECK-NOT: chsi
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with -1.
+define double @f12(double %a, double %b, i32 *%ptr) {
+; CHECK: f12:
+; CHECK: chsi 0(%r2), -1
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with the low end of the 16-bit signed range.
+define double @f13(double %a, double %b, i32 *%ptr) {
+; CHECK: f13:
+; CHECK: chsi 0(%r2), -32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which should be treated as a positive value.
+define double @f14(double %a, double %b, i32 *%ptr) {
+; CHECK: f14:
+; CHECK-NOT: chsi
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CHSI range.
+define double @f15(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f15:
+; CHECK: chsi 4092(%r3), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1023
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic,
+define double @f16(double %a, double %b, i32 *%base) {
+; CHECK: f16:
+; CHECK: aghi %r2, 4096
+; CHECK: chsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1024
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check negative offsets, which also need separate address logic.
+define double @f17(double %a, double %b, i32 *%base) {
+; CHECK: f17:
+; CHECK: aghi %r2, -4
+; CHECK: chsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -1
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CHSI does not allow indices.
+define double @f18(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f18:
+; CHECK: agr {{%r2, %r3|%r3, %r2}}
+; CHECK: chsi 0({{%r[23]}}), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i32 *
+ %val = load i32 *%ptr
+ %cond = icmp slt i32 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-33.ll b/test/CodeGen/SystemZ/int-cmp-33.ll
new file mode 100644
index 000000000000..0144806d4465
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-33.ll
@@ -0,0 +1,139 @@
+; Test 32-bit unsigned comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ordered comparisons with a constant near the low end of the unsigned
+; 16-bit range.
+define double @f1(double %a, double %b, i32 *%ptr) {
+; CHECK: f1:
+; CHECK: clfhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp ugt i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with the high end of the unsigned 16-bit range.
+define double @f2(double %a, double %b, i32 *%ptr) {
+; CHECK: f2:
+; CHECK: clfhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp ult i32 %val, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CLFHSI.
+define double @f3(double %a, double %b, i32 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: clfhsi
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp ult i32 %val, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with 32768, the lowest value for which
+; we prefer CLFHSI to CHSI.
+define double @f4(double %a, double %b, i32 *%ptr) {
+; CHECK: f4:
+; CHECK: clfhsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with the high end of the unsigned 16-bit range.
+define double @f5(double %a, double %b, i32 *%ptr) {
+; CHECK: f5:
+; CHECK: clfhsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CLFHSI.
+define double @f6(double %a, double %b, i32 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: clfhsi
+; CHECK: br %r14
+ %val = load i32 *%ptr
+ %cond = icmp eq i32 %val, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLFHSI range.
+define double @f7(double %a, double %b, i32 %i1, i32 *%base) {
+; CHECK: f7:
+; CHECK: clfhsi 4092(%r3), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1023
+ %val = load i32 *%ptr
+ %cond = icmp ugt i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next word up, which needs separate address logic,
+define double @f8(double %a, double %b, i32 *%base) {
+; CHECK: f8:
+; CHECK: aghi %r2, 4096
+; CHECK: clfhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 1024
+ %val = load i32 *%ptr
+ %cond = icmp ugt i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check negative offsets, which also need separate address logic.
+define double @f9(double %a, double %b, i32 *%base) {
+; CHECK: f9:
+; CHECK: aghi %r2, -4
+; CHECK: clfhsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%base, i64 -1
+ %val = load i32 *%ptr
+ %cond = icmp ugt i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLFHSI does not allow indices.
+define double @f10(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f10:
+; CHECK: agr {{%r2, %r3|%r3, %r2}}
+; CHECK: clfhsi 0({{%r[23]}}), 1
+; CHECK-NEXT: j{{g?}}h
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i32 *
+ %val = load i32 *%ptr
+ %cond = icmp ugt i32 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-34.ll b/test/CodeGen/SystemZ/int-cmp-34.ll
new file mode 100644
index 000000000000..b10bd4e08031
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-34.ll
@@ -0,0 +1,237 @@
+; Test 64-bit signed comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ordered comparisons with 0.
+define double @f1(double %a, double %b, i64 *%ptr) {
+; CHECK: f1:
+; CHECK: cghsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with 1.
+define double @f2(double %a, double %b, i64 *%ptr) {
+; CHECK: f2:
+; CHECK: cghsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with the high end of the signed 16-bit range.
+define double @f3(double %a, double %b, i64 *%ptr) {
+; CHECK: f3:
+; CHECK: cghsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CGHSI.
+define double @f4(double %a, double %b, i64 *%ptr) {
+; CHECK: f4:
+; CHECK-NOT: cghsi
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with -1.
+define double @f5(double %a, double %b, i64 *%ptr) {
+; CHECK: f5:
+; CHECK: cghsi 0(%r2), -1
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with the low end of the 16-bit signed range.
+define double @f6(double %a, double %b, i64 *%ptr) {
+; CHECK: f6:
+; CHECK: cghsi 0(%r2), -32768
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which should be treated as a positive value.
+define double @f7(double %a, double %b, i64 *%ptr) {
+; CHECK: f7:
+; CHECK-NOT: cghsi
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with 0.
+define double @f8(double %a, double %b, i64 *%ptr) {
+; CHECK: f8:
+; CHECK: cghsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with 1.
+define double @f9(double %a, double %b, i64 *%ptr) {
+; CHECK: f9:
+; CHECK: cghsi 0(%r2), 1
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with the high end of the signed 16-bit range.
+define double @f10(double %a, double %b, i64 *%ptr) {
+; CHECK: f10:
+; CHECK: cghsi 0(%r2), 32767
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 32767
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CGHSI.
+define double @f11(double %a, double %b, i64 *%ptr) {
+; CHECK: f11:
+; CHECK-NOT: cghsi
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with -1.
+define double @f12(double %a, double %b, i64 *%ptr) {
+; CHECK: f12:
+; CHECK: cghsi 0(%r2), -1
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, -1
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with the low end of the 16-bit signed range.
+define double @f13(double %a, double %b, i64 *%ptr) {
+; CHECK: f13:
+; CHECK: cghsi 0(%r2), -32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, -32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value down, which should be treated as a positive value.
+define double @f14(double %a, double %b, i64 *%ptr) {
+; CHECK: f14:
+; CHECK-NOT: cghsi
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, -32769
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CGHSI range.
+define double @f15(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f15:
+; CHECK: cghsi 4088(%r3), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 511
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic,
+define double @f16(double %a, double %b, i64 *%base) {
+; CHECK: f16:
+; CHECK: aghi %r2, 4096
+; CHECK: cghsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 512
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check negative offsets, which also need separate address logic.
+define double @f17(double %a, double %b, i64 *%base) {
+; CHECK: f17:
+; CHECK: aghi %r2, -8
+; CHECK: cghsi 0(%r2), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -1
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CGHSI does not allow indices.
+define double @f18(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f18:
+; CHECK: agr {{%r2, %r3|%r3, %r2}}
+; CHECK: cghsi 0({{%r[23]}}), 0
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i64 *
+ %val = load i64 *%ptr
+ %cond = icmp slt i64 %val, 0
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-35.ll b/test/CodeGen/SystemZ/int-cmp-35.ll
new file mode 100644
index 000000000000..9934906ba8d4
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-35.ll
@@ -0,0 +1,139 @@
+; Test 64-bit unsigned comparisons between memory and a constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check ordered comparisons with a constant near the low end of the unsigned
+; 16-bit range.
+define double @f1(double %a, double %b, i64 *%ptr) {
+; CHECK: f1:
+; CHECK: clghsi 0(%r2), 2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check ordered comparisons with the high end of the unsigned 16-bit range.
+define double @f2(double %a, double %b, i64 *%ptr) {
+; CHECK: f2:
+; CHECK: clghsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CLGHSI.
+define double @f3(double %a, double %b, i64 *%ptr) {
+; CHECK: f3:
+; CHECK-NOT: clghsi
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with 32768, the lowest value for which
+; we prefer CLGHSI to CGHSI.
+define double @f4(double %a, double %b, i64 *%ptr) {
+; CHECK: f4:
+; CHECK: clghsi 0(%r2), 32768
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 32768
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check equality comparisons with the high end of the unsigned 16-bit range.
+define double @f5(double %a, double %b, i64 *%ptr) {
+; CHECK: f5:
+; CHECK: clghsi 0(%r2), 65535
+; CHECK-NEXT: j{{g?}}e
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 65535
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next value up, which can't use CLGHSI.
+define double @f6(double %a, double %b, i64 *%ptr) {
+; CHECK: f6:
+; CHECK-NOT: clghsi
+; CHECK: br %r14
+ %val = load i64 *%ptr
+ %cond = icmp eq i64 %val, 65536
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the high end of the CLGHSI range.
+define double @f7(double %a, double %b, i64 %i1, i64 *%base) {
+; CHECK: f7:
+; CHECK: clghsi 4088(%r3), 2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 511
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check the next doubleword up, which needs separate address logic,
+define double @f8(double %a, double %b, i64 *%base) {
+; CHECK: f8:
+; CHECK: aghi %r2, 4096
+; CHECK: clghsi 0(%r2), 2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 512
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check negative offsets, which also need separate address logic.
+define double @f9(double %a, double %b, i64 *%base) {
+; CHECK: f9:
+; CHECK: aghi %r2, -8
+; CHECK: clghsi 0(%r2), 2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%base, i64 -1
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
+
+; Check that CLGHSI does not allow indices.
+define double @f10(double %a, double %b, i64 %base, i64 %index) {
+; CHECK: f10:
+; CHECK: agr {{%r2, %r3|%r3, %r2}}
+; CHECK: clghsi 0({{%r[23]}}), 2
+; CHECK-NEXT: j{{g?}}l
+; CHECK: ldr %f0, %f2
+; CHECK: br %r14
+ %add = add i64 %base, %index
+ %ptr = inttoptr i64 %add to i64 *
+ %val = load i64 *%ptr
+ %cond = icmp ult i64 %val, 2
+ %res = select i1 %cond, double %a, double %b
+ ret double %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-36.ll b/test/CodeGen/SystemZ/int-cmp-36.ll
new file mode 100644
index 000000000000..0813594325e4
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-36.ll
@@ -0,0 +1,81 @@
+; Test 32-bit comparisons in which the second operand is sign-extended
+; from a PC-relative i16.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i16 1
+
+; Check signed comparison.
+define i32 @f1(i32 %src1) {
+; CHECK: f1:
+; CHECK: chrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i32
+ %cond = icmp slt i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check unsigned comparison, which cannot use CHRL.
+define i32 @f2(i32 %src1) {
+; CHECK: f2:
+; CHECK-NOT: chrl
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i32
+ %cond = icmp ult i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check equality.
+define i32 @f3(i32 %src1) {
+; CHECK: f3:
+; CHECK: chrl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i32
+ %cond = icmp eq i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check inequality.
+define i32 @f4(i32 %src1) {
+; CHECK: f4:
+; CHECK: chrl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i32
+ %cond = icmp ne i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-37.ll b/test/CodeGen/SystemZ/int-cmp-37.ll
new file mode 100644
index 000000000000..aebd1f610d27
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-37.ll
@@ -0,0 +1,81 @@
+; Test 32-bit comparisons in which the second operand is zero-extended
+; from a PC-relative i16.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i16 1
+
+; Check unsigned comparison.
+define i32 @f1(i32 %src1) {
+; CHECK: f1:
+; CHECK: clhrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i32
+ %cond = icmp ult i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check signed comparison.
+define i32 @f2(i32 %src1) {
+; CHECK: f2:
+; CHECK-NOT: clhrl
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i32
+ %cond = icmp slt i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check equality.
+define i32 @f3(i32 %src1) {
+; CHECK: f3:
+; CHECK: clhrl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i32
+ %cond = icmp eq i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check inequality.
+define i32 @f4(i32 %src1) {
+; CHECK: f4:
+; CHECK: clhrl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i32
+ %cond = icmp ne i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-38.ll b/test/CodeGen/SystemZ/int-cmp-38.ll
new file mode 100644
index 000000000000..347073027554
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-38.ll
@@ -0,0 +1,78 @@
+; Test 32-bit comparisons in which the second operand is a PC-relative
+; variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i32 1
+
+; Check signed comparisons.
+define i32 @f1(i32 %src1) {
+; CHECK: f1:
+; CHECK: crl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %src2 = load i32 *@g
+ %cond = icmp slt i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check unsigned comparisons.
+define i32 @f2(i32 %src1) {
+; CHECK: f2:
+; CHECK: clrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %src2 = load i32 *@g
+ %cond = icmp ult i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; Check equality, which can use CRL or CLRL.
+define i32 @f3(i32 %src1) {
+; CHECK: f3:
+; CHECK: c{{l?}}rl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %src2 = load i32 *@g
+ %cond = icmp eq i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
+
+; ...likewise inequality.
+define i32 @f4(i32 %src1) {
+; CHECK: f4:
+; CHECK: c{{l?}}rl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %src2 = load i32 *@g
+ %cond = icmp ne i32 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i32 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i32 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-39.ll b/test/CodeGen/SystemZ/int-cmp-39.ll
new file mode 100644
index 000000000000..1129dce84a44
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-39.ll
@@ -0,0 +1,81 @@
+; Test 64-bit comparisons in which the second operand is sign-extended
+; from a PC-relative i16.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i16 1
+
+; Check signed comparison.
+define i64 @f1(i64 %src1) {
+; CHECK: f1:
+; CHECK: cghrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i64
+ %cond = icmp slt i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check unsigned comparison, which cannot use CHRL.
+define i64 @f2(i64 %src1) {
+; CHECK: f2:
+; CHECK-NOT: cghrl
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i64
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check equality.
+define i64 @f3(i64 %src1) {
+; CHECK: f3:
+; CHECK: cghrl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i64
+ %cond = icmp eq i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check inequality.
+define i64 @f4(i64 %src1) {
+; CHECK: f4:
+; CHECK: cghrl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = sext i16 %val to i64
+ %cond = icmp ne i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-40.ll b/test/CodeGen/SystemZ/int-cmp-40.ll
new file mode 100644
index 000000000000..8d9fd9aa140a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-40.ll
@@ -0,0 +1,81 @@
+; Test 64-bit comparisons in which the second operand is zero-extended
+; from a PC-relative i16.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i16 1
+
+; Check unsigned comparison.
+define i64 @f1(i64 %src1) {
+; CHECK: f1:
+; CHECK: clghrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i64
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check signed comparison.
+define i64 @f2(i64 %src1) {
+; CHECK: f2:
+; CHECK-NOT: clghrl
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i64
+ %cond = icmp slt i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check equality.
+define i64 @f3(i64 %src1) {
+; CHECK: f3:
+; CHECK: clghrl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i64
+ %cond = icmp eq i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check inequality.
+define i64 @f4(i64 %src1) {
+; CHECK: f4:
+; CHECK: clghrl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %val = load i16 *@g
+ %src2 = zext i16 %val to i64
+ %cond = icmp ne i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-41.ll b/test/CodeGen/SystemZ/int-cmp-41.ll
new file mode 100644
index 000000000000..0808bffe6d3e
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-41.ll
@@ -0,0 +1,81 @@
+; Test 64-bit comparisons in which the second operand is sign-extended
+; from a PC-relative i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i32 1
+
+; Check signed comparison.
+define i64 @f1(i64 %src1) {
+; CHECK: f1:
+; CHECK: cgfrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = sext i32 %val to i64
+ %cond = icmp slt i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check unsigned comparison, which cannot use CHRL.
+define i64 @f2(i64 %src1) {
+; CHECK: f2:
+; CHECK-NOT: cgfrl
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = sext i32 %val to i64
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check equality.
+define i64 @f3(i64 %src1) {
+; CHECK: f3:
+; CHECK: cgfrl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = sext i32 %val to i64
+ %cond = icmp eq i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check inequality.
+define i64 @f4(i64 %src1) {
+; CHECK: f4:
+; CHECK: cgfrl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = sext i32 %val to i64
+ %cond = icmp ne i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-42.ll b/test/CodeGen/SystemZ/int-cmp-42.ll
new file mode 100644
index 000000000000..5c67581dc29a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-42.ll
@@ -0,0 +1,81 @@
+; Test 64-bit comparisons in which the second operand is zero-extended
+; from a PC-relative i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i32 1
+
+; Check unsigned comparison.
+define i64 @f1(i64 %src1) {
+; CHECK: f1:
+; CHECK: clgfrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = zext i32 %val to i64
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check signed comparison.
+define i64 @f2(i64 %src1) {
+; CHECK: f2:
+; CHECK-NOT: clgfrl
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = zext i32 %val to i64
+ %cond = icmp slt i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check equality.
+define i64 @f3(i64 %src1) {
+; CHECK: f3:
+; CHECK: clgfrl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = zext i32 %val to i64
+ %cond = icmp eq i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check inequality.
+define i64 @f4(i64 %src1) {
+; CHECK: f4:
+; CHECK: clgfrl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %val = load i32 *@g
+ %src2 = zext i32 %val to i64
+ %cond = icmp ne i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-cmp-43.ll b/test/CodeGen/SystemZ/int-cmp-43.ll
new file mode 100644
index 000000000000..f387293b2b1b
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-cmp-43.ll
@@ -0,0 +1,78 @@
+; Test 64-bit comparisons in which the second operand is a PC-relative
+; variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@g = global i64 1
+
+; Check signed comparisons.
+define i64 @f1(i64 %src1) {
+; CHECK: f1:
+; CHECK: cgrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %src2 = load i64 *@g
+ %cond = icmp slt i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check unsigned comparisons.
+define i64 @f2(i64 %src1) {
+; CHECK: f2:
+; CHECK: clgrl %r2, g
+; CHECK-NEXT: j{{g?}}l
+; CHECK: br %r14
+entry:
+ %src2 = load i64 *@g
+ %cond = icmp ult i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; Check equality, which can use CRL or CLRL.
+define i64 @f3(i64 %src1) {
+; CHECK: f3:
+; CHECK: c{{l?}}grl %r2, g
+; CHECK-NEXT: j{{g?}}e
+; CHECK: br %r14
+entry:
+ %src2 = load i64 *@g
+ %cond = icmp eq i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
+
+; ...likewise inequality.
+define i64 @f4(i64 %src1) {
+; CHECK: f4:
+; CHECK: c{{l?}}grl %r2, g
+; CHECK-NEXT: j{{g?}}lh
+; CHECK: br %r14
+entry:
+ %src2 = load i64 *@g
+ %cond = icmp ne i64 %src1, %src2
+ br i1 %cond, label %exit, label %mulb
+mulb:
+ %mul = mul i64 %src1, %src1
+ br label %exit
+exit:
+ %res = phi i64 [ %src1, %entry ], [ %mul, %mulb ]
+ ret i64 %res
+}
diff --git a/test/CodeGen/SystemZ/int-const-01.ll b/test/CodeGen/SystemZ/int-const-01.ll
new file mode 100644
index 000000000000..a580154e6b57
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-const-01.ll
@@ -0,0 +1,91 @@
+; Test loading of 32-bit constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check 0.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: lhi %r2, 0
+; CHECK: br %r14
+ ret i32 0
+}
+
+; Check the high end of the LHI range.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: lhi %r2, 32767
+; CHECK: br %r14
+ ret i32 32767
+}
+
+; Check the next value up, which must use LLILL instead.
+define i32 @f3() {
+; CHECK: f3:
+; CHECK: llill %r2, 32768
+; CHECK: br %r14
+ ret i32 32768
+}
+
+; Check the high end of the LLILL range.
+define i32 @f4() {
+; CHECK: f4:
+; CHECK: llill %r2, 65535
+; CHECK: br %r14
+ ret i32 65535
+}
+
+; Check the first useful LLILH value, which is the next one up.
+define i32 @f5() {
+; CHECK: f5:
+; CHECK: llilh %r2, 1
+; CHECK: br %r14
+ ret i32 65536
+}
+
+; Check the first useful IILF value, which is the next one up again.
+define i32 @f6() {
+; CHECK: f6:
+; CHECK: iilf %r2, 65537
+; CHECK: br %r14
+ ret i32 65537
+}
+
+; Check the high end of the LLILH range.
+define i32 @f7() {
+; CHECK: f7:
+; CHECK: llilh %r2, 65535
+; CHECK: br %r14
+ ret i32 -65536
+}
+
+; Check the next value up, which must use IILF.
+define i32 @f8() {
+; CHECK: f8:
+; CHECK: iilf %r2, 4294901761
+; CHECK: br %r14
+ ret i32 -65535
+}
+
+; Check the highest useful IILF value, 0xffff7fff
+define i32 @f9() {
+; CHECK: f9:
+; CHECK: iilf %r2, 4294934527
+; CHECK: br %r14
+ ret i32 -32769
+}
+
+; Check the next value up, which should use LHI.
+define i32 @f10() {
+; CHECK: f10:
+; CHECK: lhi %r2, -32768
+; CHECK: br %r14
+ ret i32 -32768
+}
+
+; Check -1.
+define i32 @f11() {
+; CHECK: f11:
+; CHECK: lhi %r2, -1
+; CHECK: br %r14
+ ret i32 -1
+}
diff --git a/test/CodeGen/SystemZ/int-const-02.ll b/test/CodeGen/SystemZ/int-const-02.ll
new file mode 100644
index 000000000000..b345e3f2a2a1
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-const-02.ll
@@ -0,0 +1,251 @@
+; Test loading of 64-bit constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check 0.
+define i64 @f1() {
+; CHECK: f1:
+; CHECK: lghi %r2, 0
+; CHECK-NEXT: br %r14
+ ret i64 0
+}
+
+; Check the high end of the LGHI range.
+define i64 @f2() {
+; CHECK: f2:
+; CHECK: lghi %r2, 32767
+; CHECK-NEXT: br %r14
+ ret i64 32767
+}
+
+; Check the next value up, which must use LLILL instead.
+define i64 @f3() {
+; CHECK: f3:
+; CHECK: llill %r2, 32768
+; CHECK-NEXT: br %r14
+ ret i64 32768
+}
+
+; Check the high end of the LLILL range.
+define i64 @f4() {
+; CHECK: f4:
+; CHECK: llill %r2, 65535
+; CHECK-NEXT: br %r14
+ ret i64 65535
+}
+
+; Check the first useful LLILH value, which is the next one up.
+define i64 @f5() {
+; CHECK: f5:
+; CHECK: llilh %r2, 1
+; CHECK-NEXT: br %r14
+ ret i64 65536
+}
+
+; Check the first useful LGFI value, which is the next one up again.
+define i64 @f6() {
+; CHECK: f6:
+; CHECK: lgfi %r2, 65537
+; CHECK-NEXT: br %r14
+ ret i64 65537
+}
+
+; Check the high end of the LGFI range.
+define i64 @f7() {
+; CHECK: f7:
+; CHECK: lgfi %r2, 2147483647
+; CHECK-NEXT: br %r14
+ ret i64 2147483647
+}
+
+; Check the next value up, which should use LLILH instead.
+define i64 @f8() {
+; CHECK: f8:
+; CHECK: llilh %r2, 32768
+; CHECK-NEXT: br %r14
+ ret i64 2147483648
+}
+
+; Check the next value up again, which should use LLILF.
+define i64 @f9() {
+; CHECK: f9:
+; CHECK: llilf %r2, 2147483649
+; CHECK-NEXT: br %r14
+ ret i64 2147483649
+}
+
+; Check the high end of the LLILH range.
+define i64 @f10() {
+; CHECK: f10:
+; CHECK: llilh %r2, 65535
+; CHECK-NEXT: br %r14
+ ret i64 4294901760
+}
+
+; Check the next value up, which must use LLILF.
+define i64 @f11() {
+; CHECK: f11:
+; CHECK: llilf %r2, 4294901761
+; CHECK-NEXT: br %r14
+ ret i64 4294901761
+}
+
+; Check the high end of the LLILF range.
+define i64 @f12() {
+; CHECK: f12:
+; CHECK: llilf %r2, 4294967295
+; CHECK-NEXT: br %r14
+ ret i64 4294967295
+}
+
+; Check the lowest useful LLIHL value, which is the next one up.
+define i64 @f13() {
+; CHECK: f13:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: br %r14
+ ret i64 4294967296
+}
+
+; Check the next value up, which must use a combination of two instructions.
+define i64 @f14() {
+; CHECK: f14:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: oill %r2, 1
+; CHECK-NEXT: br %r14
+ ret i64 4294967297
+}
+
+; Check the high end of the OILL range.
+define i64 @f15() {
+; CHECK: f15:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: oill %r2, 65535
+; CHECK-NEXT: br %r14
+ ret i64 4295032831
+}
+
+; Check the next value up, which should use OILH instead.
+define i64 @f16() {
+; CHECK: f16:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: oilh %r2, 1
+; CHECK-NEXT: br %r14
+ ret i64 4295032832
+}
+
+; Check the next value up again, which should use OILF.
+define i64 @f17() {
+; CHECK: f17:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: oilf %r2, 65537
+; CHECK-NEXT: br %r14
+ ret i64 4295032833
+}
+
+; Check the high end of the OILH range.
+define i64 @f18() {
+; CHECK: f18:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: oilh %r2, 65535
+; CHECK-NEXT: br %r14
+ ret i64 8589869056
+}
+
+; Check the high end of the OILF range.
+define i64 @f19() {
+; CHECK: f19:
+; CHECK: llihl %r2, 1
+; CHECK-NEXT: oilf %r2, 4294967295
+; CHECK-NEXT: br %r14
+ ret i64 8589934591
+}
+
+; Check the high end of the LLIHL range.
+define i64 @f20() {
+; CHECK: f20:
+; CHECK: llihl %r2, 65535
+; CHECK-NEXT: br %r14
+ ret i64 281470681743360
+}
+
+; Check the lowest useful LLIHH value, which is 1<<32 greater than the above.
+define i64 @f21() {
+; CHECK: f21:
+; CHECK: llihh %r2, 1
+; CHECK-NEXT: br %r14
+ ret i64 281474976710656
+}
+
+; Check the lowest useful LLIHF value, which is 1<<32 greater again.
+define i64 @f22() {
+; CHECK: f22:
+; CHECK: llihf %r2, 65537
+; CHECK-NEXT: br %r14
+ ret i64 281479271677952
+}
+
+; Check the highest end of the LLIHH range.
+define i64 @f23() {
+; CHECK: f23:
+; CHECK: llihh %r2, 65535
+; CHECK-NEXT: br %r14
+ ret i64 -281474976710656
+}
+
+; Check the next value up, which must use OILL too.
+define i64 @f24() {
+; CHECK: f24:
+; CHECK: llihh %r2, 65535
+; CHECK-NEXT: oill %r2, 1
+; CHECK-NEXT: br %r14
+ ret i64 -281474976710655
+}
+
+; Check the high end of the LLIHF range.
+define i64 @f25() {
+; CHECK: f25:
+; CHECK: llihf %r2, 4294967295
+; CHECK-NEXT: br %r14
+ ret i64 -4294967296
+}
+
+; Check -1.
+define i64 @f26() {
+; CHECK: f26:
+; CHECK: lghi %r2, -1
+; CHECK-NEXT: br %r14
+ ret i64 -1
+}
+
+; Check the low end of the LGHI range.
+define i64 @f27() {
+; CHECK: f27:
+; CHECK: lghi %r2, -32768
+; CHECK-NEXT: br %r14
+ ret i64 -32768
+}
+
+; Check the next value down, which must use LGFI instead.
+define i64 @f28() {
+; CHECK: f28:
+; CHECK: lgfi %r2, -32769
+; CHECK-NEXT: br %r14
+ ret i64 -32769
+}
+
+; Check the low end of the LGFI range.
+define i64 @f29() {
+; CHECK: f29:
+; CHECK: lgfi %r2, -2147483648
+; CHECK-NEXT: br %r14
+ ret i64 -2147483648
+}
+
+; Check the next value down, which needs a two-instruction sequence.
+define i64 @f30() {
+; CHECK: f30:
+; CHECK: llihf %r2, 4294967295
+; CHECK-NEXT: oilf %r2, 2147483647
+; CHECK-NEXT: br %r14
+ ret i64 -2147483649
+}
diff --git a/test/CodeGen/SystemZ/int-const-03.ll b/test/CodeGen/SystemZ/int-const-03.ll
new file mode 100644
index 000000000000..807b7e463ced
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-const-03.ll
@@ -0,0 +1,166 @@
+; Test moves of integers to byte memory locations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the unsigned range.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: mvi 0(%r2), 0
+; CHECK: br %r14
+ store i8 0, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the signed range.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: mvi 0(%r2), 127
+; CHECK: br %r14
+ store i8 127, i8 *%ptr
+ ret void
+}
+
+; Check the next value up.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: mvi 0(%r2), 128
+; CHECK: br %r14
+ store i8 -128, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the unsigned range.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: mvi 0(%r2), 255
+; CHECK: br %r14
+ store i8 255, i8 *%ptr
+ ret void
+}
+
+; Check -1.
+define void @f5(i8 *%ptr) {
+; CHECK: f5:
+; CHECK: mvi 0(%r2), 255
+; CHECK: br %r14
+ store i8 -1, i8 *%ptr
+ ret void
+}
+
+; Check the low end of the signed range.
+define void @f6(i8 *%ptr) {
+; CHECK: f6:
+; CHECK: mvi 0(%r2), 128
+; CHECK: br %r14
+ store i8 -128, i8 *%ptr
+ ret void
+}
+
+; Check the next value down.
+define void @f7(i8 *%ptr) {
+; CHECK: f7:
+; CHECK: mvi 0(%r2), 127
+; CHECK: br %r14
+ store i8 -129, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the MVI range.
+define void @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: mvi 4095(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which should use MVIY instead of MVI.
+define void @f9(i8 *%src) {
+; CHECK: f9:
+; CHECK: mviy 4096(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the MVIY range.
+define void @f10(i8 *%src) {
+; CHECK: f10:
+; CHECK: mviy 524287(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f11(i8 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r2, 524288
+; CHECK: mvi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the negative MVIY range.
+define void @f12(i8 *%src) {
+; CHECK: f12:
+; CHECK: mviy -1(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check the low end of the MVIY range.
+define void @f13(i8 *%src) {
+; CHECK: f13:
+; CHECK: mviy -524288(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f14(i8 *%src) {
+; CHECK: f14:
+; CHECK: agfi %r2, -524289
+; CHECK: mvi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check that MVI does not allow an index
+define void @f15(i64 %src, i64 %index) {
+; CHECK: f15:
+; CHECK: agr %r2, %r3
+; CHECK: mvi 4095(%r2), 42
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i8 *
+ store i8 42, i8 *%ptr
+ ret void
+}
+
+; Check that MVIY does not allow an index
+define void @f16(i64 %src, i64 %index) {
+; CHECK: f16:
+; CHECK: agr %r2, %r3
+; CHECK: mviy 4096(%r2), 42
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i8 *
+ store i8 42, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-const-04.ll b/test/CodeGen/SystemZ/int-const-04.ll
new file mode 100644
index 000000000000..41c7306c89aa
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-const-04.ll
@@ -0,0 +1,111 @@
+; Test moves of integers to 2-byte memory locations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the unsigned range.
+define void @f1(i16 *%ptr) {
+; CHECK: f1:
+; CHECK: mvhhi 0(%r2), 0
+; CHECK: br %r14
+ store i16 0, i16 *%ptr
+ ret void
+}
+
+; Check the high end of the signed range.
+define void @f2(i16 *%ptr) {
+; CHECK: f2:
+; CHECK: mvhhi 0(%r2), 32767
+; CHECK: br %r14
+ store i16 32767, i16 *%ptr
+ ret void
+}
+
+; Check the next value up.
+define void @f3(i16 *%ptr) {
+; CHECK: f3:
+; CHECK: mvhhi 0(%r2), -32768
+; CHECK: br %r14
+ store i16 -32768, i16 *%ptr
+ ret void
+}
+
+; Check the high end of the unsigned range.
+define void @f4(i16 *%ptr) {
+; CHECK: f4:
+; CHECK: mvhhi 0(%r2), -1
+; CHECK: br %r14
+ store i16 65535, i16 *%ptr
+ ret void
+}
+
+; Check -1.
+define void @f5(i16 *%ptr) {
+; CHECK: f5:
+; CHECK: mvhhi 0(%r2), -1
+; CHECK: br %r14
+ store i16 -1, i16 *%ptr
+ ret void
+}
+
+; Check the low end of the signed range.
+define void @f6(i16 *%ptr) {
+; CHECK: f6:
+; CHECK: mvhhi 0(%r2), -32768
+; CHECK: br %r14
+ store i16 -32768, i16 *%ptr
+ ret void
+}
+
+; Check the next value down.
+define void @f7(i16 *%ptr) {
+; CHECK: f7:
+; CHECK: mvhhi 0(%r2), 32767
+; CHECK: br %r14
+ store i16 -32769, i16 *%ptr
+ ret void
+}
+
+; Check the high end of the MVHHI range.
+define void @f8(i16 *%a) {
+; CHECK: f8:
+; CHECK: mvhhi 4094(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%a, i64 2047
+ store i16 42, i16 *%ptr
+ ret void
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f9(i16 *%a) {
+; CHECK: f9:
+; CHECK: aghi %r2, 4096
+; CHECK: mvhhi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%a, i64 2048
+ store i16 42, i16 *%ptr
+ ret void
+}
+
+; Check negative displacements, which also need separate address logic.
+define void @f10(i16 *%a) {
+; CHECK: f10:
+; CHECK: aghi %r2, -2
+; CHECK: mvhhi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%a, i64 -1
+ store i16 42, i16 *%ptr
+ ret void
+}
+
+; Check that MVHHI does not allow an index
+define void @f11(i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: agr %r2, %r3
+; CHECK: mvhhi 0(%r2), 42
+; CHECK: br %r14
+ %add = add i64 %src, %index
+ %ptr = inttoptr i64 %add to i16 *
+ store i16 42, i16 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-const-05.ll b/test/CodeGen/SystemZ/int-const-05.ll
new file mode 100644
index 000000000000..b85fd6b68207
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-const-05.ll
@@ -0,0 +1,102 @@
+; Test moves of integers to 4-byte memory locations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check moves of zero.
+define void @f1(i32 *%a) {
+; CHECK: f1:
+; CHECK: mvhi 0(%r2), 0
+; CHECK: br %r14
+ store i32 0, i32 *%a
+ ret void
+}
+
+; Check the high end of the signed 16-bit range.
+define void @f2(i32 *%a) {
+; CHECK: f2:
+; CHECK: mvhi 0(%r2), 32767
+; CHECK: br %r14
+ store i32 32767, i32 *%a
+ ret void
+}
+
+; Check the next value up, which can't use MVHI.
+define void @f3(i32 *%a) {
+; CHECK: f3:
+; CHECK-NOT: mvhi
+; CHECK: br %r14
+ store i32 32768, i32 *%a
+ ret void
+}
+
+; Check moves of -1.
+define void @f4(i32 *%a) {
+; CHECK: f4:
+; CHECK: mvhi 0(%r2), -1
+; CHECK: br %r14
+ store i32 -1, i32 *%a
+ ret void
+}
+
+; Check the low end of the MVHI range.
+define void @f5(i32 *%a) {
+; CHECK: f5:
+; CHECK: mvhi 0(%r2), -32768
+; CHECK: br %r14
+ store i32 -32768, i32 *%a
+ ret void
+}
+
+; Check the next value down, which can't use MVHI.
+define void @f6(i32 *%a) {
+; CHECK: f6:
+; CHECK-NOT: mvhi
+; CHECK: br %r14
+ store i32 -32769, i32 *%a
+ ret void
+}
+
+; Check the high end of the MVHI range.
+define void @f7(i32 *%a) {
+; CHECK: f7:
+; CHECK: mvhi 4092(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%a, i64 1023
+ store i32 42, i32 *%ptr
+ ret void
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(i32 *%a) {
+; CHECK: f8:
+; CHECK: aghi %r2, 4096
+; CHECK: mvhi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%a, i64 1024
+ store i32 42, i32 *%ptr
+ ret void
+}
+
+; Check negative displacements, which also need separate address logic.
+define void @f9(i32 *%a) {
+; CHECK: f9:
+; CHECK: aghi %r2, -4
+; CHECK: mvhi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%a, i64 -1
+ store i32 42, i32 *%ptr
+ ret void
+}
+
+; Check that MVHI does not allow an index
+define void @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: agr %r2, %r3
+; CHECK: mvhi 0(%r2), 42
+; CHECK: br %r14
+ %add = add i64 %src, %index
+ %ptr = inttoptr i64 %add to i32 *
+ store i32 42, i32 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-const-06.ll b/test/CodeGen/SystemZ/int-const-06.ll
new file mode 100644
index 000000000000..9f14347cf880
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-const-06.ll
@@ -0,0 +1,102 @@
+; Test moves of integers to 8-byte memory locations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check moves of zero.
+define void @f1(i64 *%a) {
+; CHECK: f1:
+; CHECK: mvghi 0(%r2), 0
+; CHECK: br %r14
+ store i64 0, i64 *%a
+ ret void
+}
+
+; Check the high end of the signed 16-bit range.
+define void @f2(i64 *%a) {
+; CHECK: f2:
+; CHECK: mvghi 0(%r2), 32767
+; CHECK: br %r14
+ store i64 32767, i64 *%a
+ ret void
+}
+
+; Check the next value up, which can't use MVGHI.
+define void @f3(i64 *%a) {
+; CHECK: f3:
+; CHECK-NOT: mvghi
+; CHECK: br %r14
+ store i64 32768, i64 *%a
+ ret void
+}
+
+; Check moves of -1.
+define void @f4(i64 *%a) {
+; CHECK: f4:
+; CHECK: mvghi 0(%r2), -1
+; CHECK: br %r14
+ store i64 -1, i64 *%a
+ ret void
+}
+
+; Check the low end of the MVGHI range.
+define void @f5(i64 *%a) {
+; CHECK: f5:
+; CHECK: mvghi 0(%r2), -32768
+; CHECK: br %r14
+ store i64 -32768, i64 *%a
+ ret void
+}
+
+; Check the next value down, which can't use MVGHI.
+define void @f6(i64 *%a) {
+; CHECK: f6:
+; CHECK-NOT: mvghi
+; CHECK: br %r14
+ store i64 -32769, i64 *%a
+ ret void
+}
+
+; Check the high end of the MVGHI range.
+define void @f7(i64 *%a) {
+; CHECK: f7:
+; CHECK: mvghi 4088(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%a, i64 511
+ store i64 42, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(i64 *%a) {
+; CHECK: f8:
+; CHECK: aghi %r2, 4096
+; CHECK: mvghi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%a, i64 512
+ store i64 42, i64 *%ptr
+ ret void
+}
+
+; Check negative displacements, which also need separate address logic.
+define void @f9(i64 *%a) {
+; CHECK: f9:
+; CHECK: aghi %r2, -8
+; CHECK: mvghi 0(%r2), 42
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%a, i64 -1
+ store i64 42, i64 *%ptr
+ ret void
+}
+
+; Check that MVGHI does not allow an index
+define void @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: agr %r2, %r3
+; CHECK: mvghi 0(%r2), 42
+; CHECK: br %r14
+ %add = add i64 %src, %index
+ %ptr = inttoptr i64 %add to i64 *
+ store i64 42, i64 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-conv-01.ll b/test/CodeGen/SystemZ/int-conv-01.ll
new file mode 100644
index 000000000000..643ac6ae2510
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-01.ll
@@ -0,0 +1,105 @@
+; Test sign extensions from a byte to an i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: lbr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i32 %a to i8
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; ...and again with an i64.
+define i32 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: lbr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i64 %a to i8
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check LB with no displacement.
+define i32 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: lb %r2, 0(%r2)
+; CHECK: br %r14
+ %byte = load i8 *%src
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the high end of the LB range.
+define i32 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: lb %r2, 524287(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: lb %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the high end of the negative LB range.
+define i32 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: lb %r2, -1(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the low end of the LB range.
+define i32 @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: lb %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524289
+; CHECK: lb %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check that LB allows an index
+define i32 @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: lb %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i8 *
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i32
+ ret i32 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-02.ll b/test/CodeGen/SystemZ/int-conv-02.ll
new file mode 100644
index 000000000000..86144d3e6450
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-02.ll
@@ -0,0 +1,114 @@
+; Test zero extensions from a byte to an i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: llcr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i32 %a to i8
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; ...and again with an i64.
+define i32 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: llcr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i64 %a to i8
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check ANDs that are equivalent to zero extension.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK: llcr %r2, %r2
+; CHECk: br %r14
+ %ext = and i32 %a, 255
+ ret i32 %ext
+}
+
+; Check LLC with no displacement.
+define i32 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: llc %r2, 0(%r2)
+; CHECK: br %r14
+ %byte = load i8 *%src
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the high end of the LLC range.
+define i32 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: llc %r2, 524287(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, 524288
+; CHECK: llc %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the high end of the negative LLC range.
+define i32 @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: llc %r2, -1(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the low end of the LLC range.
+define i32 @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: llc %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i8 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524289
+; CHECK: llc %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
+
+; Check that LLC allows an index
+define i32 @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: llc %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i8 *
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i32
+ ret i32 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-03.ll b/test/CodeGen/SystemZ/int-conv-03.ll
new file mode 100644
index 000000000000..73b8dbb43a13
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-03.ll
@@ -0,0 +1,105 @@
+; Test sign extensions from a byte to an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i64 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: lgbr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i32 %a to i8
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; ...and again with an i64.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: lgbr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i64 %a to i8
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check LGB with no displacement.
+define i64 @f3(i8 *%src) {
+; CHECK: f3:
+; CHECK: lgb %r2, 0(%r2)
+; CHECK: br %r14
+ %byte = load i8 *%src
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the high end of the LGB range.
+define i64 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: lgb %r2, 524287(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: lgb %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the high end of the negative LGB range.
+define i64 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: lgb %r2, -1(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the low end of the LGB range.
+define i64 @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: lgb %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524289
+; CHECK: lgb %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check that LGB allows an index
+define i64 @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: lgb %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i8 *
+ %byte = load i8 *%ptr
+ %ext = sext i8 %byte to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-04.ll b/test/CodeGen/SystemZ/int-conv-04.ll
new file mode 100644
index 000000000000..4cec5242e880
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-04.ll
@@ -0,0 +1,114 @@
+; Test zero extensions from a byte to an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i64 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: llgcr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i32 %a to i8
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; ...and again with an i64.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: llgcr %r2, %r2
+; CHECk: br %r14
+ %byte = trunc i64 %a to i8
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check ANDs that are equivalent to zero extension.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: llgcr %r2, %r2
+; CHECk: br %r14
+ %ext = and i64 %a, 255
+ ret i64 %ext
+}
+
+; Check LLGC with no displacement.
+define i64 @f4(i8 *%src) {
+; CHECK: f4:
+; CHECK: llgc %r2, 0(%r2)
+; CHECK: br %r14
+ %byte = load i8 *%src
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the high end of the LLGC range.
+define i64 @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: llgc %r2, 524287(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, 524288
+; CHECK: llgc %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the high end of the negative LLGC range.
+define i64 @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: llgc %r2, -1(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the low end of the LLGC range.
+define i64 @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: llgc %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f9(i8 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524289
+; CHECK: llgc %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
+
+; Check that LLGC allows an index
+define i64 @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: llgc %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i8 *
+ %byte = load i8 *%ptr
+ %ext = zext i8 %byte to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-05.ll b/test/CodeGen/SystemZ/int-conv-05.ll
new file mode 100644
index 000000000000..5358f7d9228a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-05.ll
@@ -0,0 +1,140 @@
+; Test sign extensions from a halfword to an i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: lhr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i32 %a to i16
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; ...and again with an i64.
+define i32 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: lhr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i64 %a to i16
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the low end of the LH range.
+define i32 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: lh %r2, 0(%r2)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the high end of the LH range.
+define i32 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: lh %r2, 4094(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2047
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the next halfword up, which needs LHY rather than LH.
+define i32 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: lhy %r2, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2048
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the high end of the LHY range.
+define i32 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: lhy %r2, 524286(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f7(i16 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: lh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the high end of the negative LHY range.
+define i32 @f8(i16 *%src) {
+; CHECK: f8:
+; CHECK: lhy %r2, -2(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the low end of the LHY range.
+define i32 @f9(i16 *%src) {
+; CHECK: f9:
+; CHECK: lhy %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f10(i16 *%src) {
+; CHECK: f10:
+; CHECK: agfi %r2, -524290
+; CHECK: lh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check that LH allows an index
+define i32 @f11(i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: lh %r2, 4094(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4094
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check that LH allows an index
+define i32 @f12(i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: lhy %r2, 4096(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i32
+ ret i32 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-06.ll b/test/CodeGen/SystemZ/int-conv-06.ll
new file mode 100644
index 000000000000..64af612d65f5
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-06.ll
@@ -0,0 +1,114 @@
+; Test zero extensions from a halfword to an i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: llhr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i32 %a to i16
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; ...and again with an i64.
+define i32 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: llhr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i64 %a to i16
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check ANDs that are equivalent to zero extension.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK: llhr %r2, %r2
+; CHECk: br %r14
+ %ext = and i32 %a, 65535
+ ret i32 %ext
+}
+
+; Check LLH with no displacement.
+define i32 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: llh %r2, 0(%r2)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the high end of the LLH range.
+define i32 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: llh %r2, 524286(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, 524288
+; CHECK: llh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the high end of the negative LLH range.
+define i32 @f7(i16 *%src) {
+; CHECK: f7:
+; CHECK: llh %r2, -2(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the low end of the LLH range.
+define i32 @f8(i16 *%src) {
+; CHECK: f8:
+; CHECK: llh %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i16 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524290
+; CHECK: llh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
+
+; Check that LLH allows an index
+define i32 @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: llh %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i32
+ ret i32 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-07.ll b/test/CodeGen/SystemZ/int-conv-07.ll
new file mode 100644
index 000000000000..041caa244c8e
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-07.ll
@@ -0,0 +1,105 @@
+; Test sign extensions from a halfword to an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: lghr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i64 %a to i16
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; ...and again with an i64.
+define i64 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: lghr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i32 %a to i16
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check LGH with no displacement.
+define i64 @f3(i16 *%src) {
+; CHECK: f3:
+; CHECK: lgh %r2, 0(%r2)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the high end of the LGH range.
+define i64 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: lgh %r2, 524286(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: lgh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the high end of the negative LGH range.
+define i64 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: lgh %r2, -2(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the low end of the LGH range.
+define i64 @f7(i16 *%src) {
+; CHECK: f7:
+; CHECK: lgh %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f8(i16 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524290
+; CHECK: lgh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check that LGH allows an index.
+define i64 @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: lgh %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %ext = sext i16 %half to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-08.ll b/test/CodeGen/SystemZ/int-conv-08.ll
new file mode 100644
index 000000000000..3d7f96675da9
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-08.ll
@@ -0,0 +1,114 @@
+; Test zero extensions from a halfword to an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i64 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: llghr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i32 %a to i16
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; ...and again with an i64.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: llghr %r2, %r2
+; CHECk: br %r14
+ %half = trunc i64 %a to i16
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check ANDs that are equivalent to zero extension.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: llghr %r2, %r2
+; CHECk: br %r14
+ %ext = and i64 %a, 65535
+ ret i64 %ext
+}
+
+; Check LLGH with no displacement.
+define i64 @f4(i16 *%src) {
+; CHECK: f4:
+; CHECK: llgh %r2, 0(%r2)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the high end of the LLGH range.
+define i64 @f5(i16 *%src) {
+; CHECK: f5:
+; CHECK: llgh %r2, 524286(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i16 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, 524288
+; CHECK: llgh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the high end of the negative LLGH range.
+define i64 @f7(i16 *%src) {
+; CHECK: f7:
+; CHECK: llgh %r2, -2(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the low end of the LLGH range.
+define i64 @f8(i16 *%src) {
+; CHECK: f8:
+; CHECK: llgh %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f9(i16 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524290
+; CHECK: llgh %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
+
+; Check that LLGH allows an index
+define i64 @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: llgh %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %ext = zext i16 %half to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-09.ll b/test/CodeGen/SystemZ/int-conv-09.ll
new file mode 100644
index 000000000000..6e93886895d5
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-09.ll
@@ -0,0 +1,104 @@
+; Test sign extensions from an i32 to an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i64 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: lgfr %r2, %r2
+; CHECk: br %r14
+ %ext = sext i32 %a to i64
+ ret i64 %ext
+}
+
+; ...and again with an i64.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: lgfr %r2, %r2
+; CHECk: br %r14
+ %word = trunc i64 %a to i32
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check LGF with no displacement.
+define i64 @f3(i32 *%src) {
+; CHECK: f3:
+; CHECK: lgf %r2, 0(%r2)
+; CHECK: br %r14
+ %word = load i32 *%src
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the high end of the LGF range.
+define i64 @f4(i32 *%src) {
+; CHECK: f4:
+; CHECK: lgf %r2, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %word = load i32 *%ptr
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f5(i32 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: lgf %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %word = load i32 *%ptr
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the high end of the negative LGF range.
+define i64 @f6(i32 *%src) {
+; CHECK: f6:
+; CHECK: lgf %r2, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %word = load i32 *%ptr
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the low end of the LGF range.
+define i64 @f7(i32 *%src) {
+; CHECK: f7:
+; CHECK: lgf %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %word = load i32 *%ptr
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f8(i32 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524292
+; CHECK: lgf %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %word = load i32 *%ptr
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check that LGF allows an index.
+define i64 @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: lgf %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %word = load i32 *%ptr
+ %ext = sext i32 %word to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-conv-10.ll b/test/CodeGen/SystemZ/int-conv-10.ll
new file mode 100644
index 000000000000..918bc1de8fa5
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-conv-10.ll
@@ -0,0 +1,113 @@
+; Test zero extensions from an i32 to an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register extension, starting with an i32.
+define i64 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: llgfr %r2, %r2
+; CHECk: br %r14
+ %ext = zext i32 %a to i64
+ ret i64 %ext
+}
+
+; ...and again with an i64.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: llgfr %r2, %r2
+; CHECk: br %r14
+ %word = trunc i64 %a to i32
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check ANDs that are equivalent to zero extension.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: llgfr %r2, %r2
+; CHECk: br %r14
+ %ext = and i64 %a, 4294967295
+ ret i64 %ext
+}
+
+; Check LLGF with no displacement.
+define i64 @f4(i32 *%src) {
+; CHECK: f4:
+; CHECK: llgf %r2, 0(%r2)
+; CHECK: br %r14
+ %word = load i32 *%src
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the high end of the LLGF range.
+define i64 @f5(i32 *%src) {
+; CHECK: f5:
+; CHECK: llgf %r2, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %word = load i32 *%ptr
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, 524288
+; CHECK: llgf %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %word = load i32 *%ptr
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the high end of the negative LLGF range.
+define i64 @f7(i32 *%src) {
+; CHECK: f7:
+; CHECK: llgf %r2, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %word = load i32 *%ptr
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the low end of the LLGF range.
+define i64 @f8(i32 *%src) {
+; CHECK: f8:
+; CHECK: llgf %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %word = load i32 *%ptr
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f9(i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: llgf %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %word = load i32 *%ptr
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
+
+; Check that LLGF allows an index.
+define i64 @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: llgf %r2, 524287(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %word = load i32 *%ptr
+ %ext = zext i32 %word to i64
+ ret i64 %ext
+}
diff --git a/test/CodeGen/SystemZ/int-div-01.ll b/test/CodeGen/SystemZ/int-div-01.ll
new file mode 100644
index 000000000000..492ece91497e
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-div-01.ll
@@ -0,0 +1,190 @@
+; Test 32-bit signed division and remainder.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register division. The result is in the second of the two registers.
+define void @f1(i32 *%dest, i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: lgfr %r1, %r3
+; CHECK: dsgfr %r0, %r4
+; CHECK: st %r1, 0(%r2)
+; CHECK: br %r14
+ %div = sdiv i32 %a, %b
+ store i32 %div, i32 *%dest
+ ret void
+}
+
+; Test register remainder. The result is in the first of the two registers.
+define void @f2(i32 *%dest, i32 %a, i32 %b) {
+; CHECK: f2:
+; CHECK: lgfr %r1, %r3
+; CHECK: dsgfr %r0, %r4
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %rem = srem i32 %a, %b
+ store i32 %rem, i32 *%dest
+ ret void
+}
+
+; Test that division and remainder use a single instruction.
+define i32 @f3(i32 %dummy, i32 %a, i32 %b) {
+; CHECK: f3:
+; CHECK-NOT: %r2
+; CHECK: lgfr %r3, %r3
+; CHECK-NOT: %r2
+; CHECK: dsgfr %r2, %r4
+; CHECK-NOT: dsgfr
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %div = sdiv i32 %a, %b
+ %rem = srem i32 %a, %b
+ %or = or i32 %rem, %div
+ ret i32 %or
+}
+
+; Check that the sign extension of the dividend is elided when the argument
+; is already sign-extended.
+define i32 @f4(i32 %dummy, i32 signext %a, i32 %b) {
+; CHECK: f4:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgfr %r2, %r4
+; CHECK-NOT: dsgfr
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %div = sdiv i32 %a, %b
+ %rem = srem i32 %a, %b
+ %or = or i32 %rem, %div
+ ret i32 %or
+}
+
+; Test that memory dividends are loaded using sign extension (LGF).
+define i32 @f5(i32 %dummy, i32 *%src, i32 %b) {
+; CHECK: f5:
+; CHECK-NOT: %r2
+; CHECK: lgf %r3, 0(%r3)
+; CHECK-NOT: %r2
+; CHECK: dsgfr %r2, %r4
+; CHECK-NOT: dsgfr
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %a = load i32 *%src
+ %div = sdiv i32 %a, %b
+ %rem = srem i32 %a, %b
+ %or = or i32 %rem, %div
+ ret i32 %or
+}
+
+; Test memory division with no displacement.
+define void @f6(i32 *%dest, i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: lgfr %r1, %r3
+; CHECK: dsgf %r0, 0(%r4)
+; CHECK: st %r1, 0(%r2)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %div = sdiv i32 %a, %b
+ store i32 %div, i32 *%dest
+ ret void
+}
+
+; Test memory remainder with no displacement.
+define void @f7(i32 *%dest, i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: lgfr %r1, %r3
+; CHECK: dsgf %r0, 0(%r4)
+; CHECK: st %r0, 0(%r2)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %rem = srem i32 %a, %b
+ store i32 %rem, i32 *%dest
+ ret void
+}
+
+; Test both memory division and memory remainder.
+define i32 @f8(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK-NOT: %r2
+; CHECK: lgfr %r3, %r3
+; CHECK-NOT: %r2
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK-NOT: {{dsgf|dsgfr}}
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %b = load i32 *%src
+ %div = sdiv i32 %a, %b
+ %rem = srem i32 %a, %b
+ %or = or i32 %rem, %div
+ ret i32 %or
+}
+
+; Check the high end of the DSGF range.
+define i32 @f9(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: dsgf %r2, 524284(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %rem = srem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f10(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f10:
+; CHECK: agfi %r4, 524288
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %rem = srem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the high end of the negative aligned DSGF range.
+define i32 @f11(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f11:
+; CHECK: dsgf %r2, -4(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %rem = srem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the low end of the DSGF range.
+define i32 @f12(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f12:
+; CHECK: dsgf %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %rem = srem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f13(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f13:
+; CHECK: agfi %r4, -524292
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %rem = srem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check that DSGF allows an index.
+define i32 @f14(i32 %dummy, i32 %a, i64 %src, i64 %index) {
+; CHECK: f14:
+; CHECK: dsgf %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %rem = srem i32 %a, %b
+ ret i32 %rem
+}
diff --git a/test/CodeGen/SystemZ/int-div-02.ll b/test/CodeGen/SystemZ/int-div-02.ll
new file mode 100644
index 000000000000..7954384d2962
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-div-02.ll
@@ -0,0 +1,166 @@
+; Test 32-bit unsigned division and remainder.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register division. The result is in the second of the two registers.
+define void @f1(i32 %dummy, i32 %a, i32 %b, i32 *%dest) {
+; CHECK: f1:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lhi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlr %r2, %r4
+; CHECK: st %r3, 0(%r5)
+; CHECK: br %r14
+ %div = udiv i32 %a, %b
+ store i32 %div, i32 *%dest
+ ret void
+}
+
+; Test register remainder. The result is in the first of the two registers.
+define void @f2(i32 %dummy, i32 %a, i32 %b, i32 *%dest) {
+; CHECK: f2:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lhi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlr %r2, %r4
+; CHECK: st %r2, 0(%r5)
+; CHECK: br %r14
+ %rem = urem i32 %a, %b
+ store i32 %rem, i32 *%dest
+ ret void
+}
+
+; Test that division and remainder use a single instruction.
+define i32 @f3(i32 %dummy1, i32 %a, i32 %b) {
+; CHECK: f3:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lhi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlr %r2, %r4
+; CHECK-NOT: dlr
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %div = udiv i32 %a, %b
+ %rem = urem i32 %a, %b
+ %or = or i32 %rem, %div
+ ret i32 %or
+}
+
+; Test memory division with no displacement.
+define void @f4(i32 %dummy, i32 %a, i32 *%src, i32 *%dest) {
+; CHECK: f4:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lhi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dl %r2, 0(%r4)
+; CHECK: st %r3, 0(%r5)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %div = udiv i32 %a, %b
+ store i32 %div, i32 *%dest
+ ret void
+}
+
+; Test memory remainder with no displacement.
+define void @f5(i32 %dummy, i32 %a, i32 *%src, i32 *%dest) {
+; CHECK: f5:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lhi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dl %r2, 0(%r4)
+; CHECK: st %r2, 0(%r5)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %rem = urem i32 %a, %b
+ store i32 %rem, i32 *%dest
+ ret void
+}
+
+; Test both memory division and memory remainder.
+define i32 @f6(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lhi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dl %r2, 0(%r4)
+; CHECK-NOT: {{dl|dlr}}
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %b = load i32 *%src
+ %div = udiv i32 %a, %b
+ %rem = urem i32 %a, %b
+ %or = or i32 %rem, %div
+ ret i32 %or
+}
+
+; Check the high end of the DL range.
+define i32 @f7(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: dl %r2, 524284(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %rem = urem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f8(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: dl %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %rem = urem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the high end of the negative aligned DL range.
+define i32 @f9(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: dl %r2, -4(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %rem = urem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the low end of the DL range.
+define i32 @f10(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f10:
+; CHECK: dl %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %rem = urem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f11(i32 %dummy, i32 %a, i32 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r4, -524292
+; CHECK: dl %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %rem = urem i32 %a, %b
+ ret i32 %rem
+}
+
+; Check that DL allows an index.
+define i32 @f12(i32 %dummy, i32 %a, i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: dl %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %rem = urem i32 %a, %b
+ ret i32 %rem
+}
diff --git a/test/CodeGen/SystemZ/int-div-03.ll b/test/CodeGen/SystemZ/int-div-03.ll
new file mode 100644
index 000000000000..b950f2b02035
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-div-03.ll
@@ -0,0 +1,189 @@
+; Test 64-bit signed division and remainder when the divisor is
+; a signed-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register division. The result is in the second of the two registers.
+define void @f1(i64 %dummy, i64 %a, i32 %b, i64 *%dest) {
+; CHECK: f1:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgfr %r2, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %div = sdiv i64 %a, %bext
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; Test register remainder. The result is in the first of the two registers.
+define void @f2(i64 %dummy, i64 %a, i32 %b, i64 *%dest) {
+; CHECK: f2:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgfr %r2, %r4
+; CHECK: stg %r2, 0(%r5)
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Test that division and remainder use a single instruction.
+define i64 @f3(i64 %dummy, i64 %a, i32 %b) {
+; CHECK: f3:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgfr %r2, %r4
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %div = sdiv i64 %a, %bext
+ %rem = srem i64 %a, %bext
+ %or = or i64 %rem, %div
+ ret i64 %or
+}
+
+; Test register division when the dividend is zero rather than sign extended.
+; We can't use dsgfr here
+define void @f4(i64 %dummy, i64 %a, i32 %b, i64 *%dest) {
+; CHECK: f4:
+; CHECK-NOT: dsgfr
+; CHECK: br %r14
+ %bext = zext i32 %b to i64
+ %div = sdiv i64 %a, %bext
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; ...likewise remainder.
+define void @f5(i64 %dummy, i64 %a, i32 %b, i64 *%dest) {
+; CHECK: f5:
+; CHECK-NOT: dsgfr
+; CHECK: br %r14
+ %bext = zext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Test memory division with no displacement.
+define void @f6(i64 %dummy, i64 %a, i32 *%src, i64 *%dest) {
+; CHECK: f6:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = sext i32 %b to i64
+ %div = sdiv i64 %a, %bext
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; Test memory remainder with no displacement.
+define void @f7(i64 %dummy, i64 %a, i32 *%src, i64 *%dest) {
+; CHECK: f7:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK: stg %r2, 0(%r5)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Test both memory division and memory remainder.
+define i64 @f8(i64 %dummy, i64 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK-NOT: {{dsgf|dsgfr}}
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = sext i32 %b to i64
+ %div = sdiv i64 %a, %bext
+ %rem = srem i64 %a, %bext
+ %or = or i64 %rem, %div
+ ret i64 %or
+}
+
+; Check the high end of the DSGF range.
+define i64 @f9(i64 %dummy, i64 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: dsgf %r2, 524284(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ ret i64 %rem
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f10(i64 %dummy, i64 %a, i32 *%src) {
+; CHECK: f10:
+; CHECK: agfi %r4, 524288
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ ret i64 %rem
+}
+
+; Check the high end of the negative aligned DSGF range.
+define i64 @f11(i64 %dummy, i64 %a, i32 *%src) {
+; CHECK: f11:
+; CHECK: dsgf %r2, -4(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ ret i64 %rem
+}
+
+; Check the low end of the DSGF range.
+define i64 @f12(i64 %dummy, i64 %a, i32 *%src) {
+; CHECK: f12:
+; CHECK: dsgf %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ ret i64 %rem
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f13(i64 %dummy, i64 %a, i32 *%src) {
+; CHECK: f13:
+; CHECK: agfi %r4, -524292
+; CHECK: dsgf %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ ret i64 %rem
+}
+
+; Check that DSGF allows an index.
+define i64 @f14(i64 %dummy, i64 %a, i64 %src, i64 %index) {
+; CHECK: f14:
+; CHECK: dsgf %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %rem = srem i64 %a, %bext
+ ret i64 %rem
+}
diff --git a/test/CodeGen/SystemZ/int-div-04.ll b/test/CodeGen/SystemZ/int-div-04.ll
new file mode 100644
index 000000000000..3f72be9a47da
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-div-04.ll
@@ -0,0 +1,154 @@
+; Testg 64-bit signed division and remainder.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Testg register division. The result is in the second of the two registers.
+define void @f1(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
+; CHECK: f1:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgr %r2, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: br %r14
+ %div = sdiv i64 %a, %b
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; Testg register remainder. The result is in the first of the two registers.
+define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
+; CHECK: f2:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgr %r2, %r4
+; CHECK: stg %r2, 0(%r5)
+; CHECK: br %r14
+ %rem = srem i64 %a, %b
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Testg that division and remainder use a single instruction.
+define i64 @f3(i64 %dummy1, i64 %a, i64 %b) {
+; CHECK: f3:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsgr %r2, %r4
+; CHECK-NOT: dsgr
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %div = sdiv i64 %a, %b
+ %rem = srem i64 %a, %b
+ %or = or i64 %rem, %div
+ ret i64 %or
+}
+
+; Testg memory division with no displacement.
+define void @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
+; CHECK: f4:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsg %r2, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %div = sdiv i64 %a, %b
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; Testg memory remainder with no displacement.
+define void @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
+; CHECK: f5:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsg %r2, 0(%r4)
+; CHECK: stg %r2, 0(%r5)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %rem = srem i64 %a, %b
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Testg both memory division and memory remainder.
+define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: dsg %r2, 0(%r4)
+; CHECK-NOT: {{dsg|dsgr}}
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %b = load i64 *%src
+ %div = sdiv i64 %a, %b
+ %rem = srem i64 %a, %b
+ %or = or i64 %rem, %div
+ ret i64 %or
+}
+
+; Check the high end of the DSG range.
+define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: dsg %r2, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: dsg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the high end of the negative aligned DSG range.
+define i64 @f9(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f9:
+; CHECK: dsg %r2, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the low end of the DSG range.
+define i64 @f10(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f10:
+; CHECK: dsg %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f11(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r4, -524296
+; CHECK: dsg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check that DSG allows an index.
+define i64 @f12(i64 %dummy, i64 %a, i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: dsg %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %rem = srem i64 %a, %b
+ ret i64 %rem
+}
diff --git a/test/CodeGen/SystemZ/int-div-05.ll b/test/CodeGen/SystemZ/int-div-05.ll
new file mode 100644
index 000000000000..04f622b44e74
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-div-05.ll
@@ -0,0 +1,166 @@
+; Testg 64-bit unsigned division and remainder.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Testg register division. The result is in the second of the two registers.
+define void @f1(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
+; CHECK: f1:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lghi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlgr %r2, %r4
+; CHECK: stg %r3, 0(%r5)
+; CHECK: br %r14
+ %div = udiv i64 %a, %b
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; Testg register remainder. The result is in the first of the two registers.
+define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
+; CHECK: f2:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lghi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlgr %r2, %r4
+; CHECK: stg %r2, 0(%r5)
+; CHECK: br %r14
+ %rem = urem i64 %a, %b
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Testg that division and remainder use a single instruction.
+define i64 @f3(i64 %dummy1, i64 %a, i64 %b) {
+; CHECK: f3:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lghi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlgr %r2, %r4
+; CHECK-NOT: dlgr
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %div = udiv i64 %a, %b
+ %rem = urem i64 %a, %b
+ %or = or i64 %rem, %div
+ ret i64 %or
+}
+
+; Testg memory division with no displacement.
+define void @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
+; CHECK: f4:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lghi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlg %r2, 0(%r4)
+; CHECK: stg %r3, 0(%r5)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %div = udiv i64 %a, %b
+ store i64 %div, i64 *%dest
+ ret void
+}
+
+; Testg memory remainder with no displacement.
+define void @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
+; CHECK: f5:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lghi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlg %r2, 0(%r4)
+; CHECK: stg %r2, 0(%r5)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %rem = urem i64 %a, %b
+ store i64 %rem, i64 *%dest
+ ret void
+}
+
+; Testg both memory division and memory remainder.
+define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK-NOT: %r3
+; CHECK: {{llill|lghi}} %r2, 0
+; CHECK-NOT: %r3
+; CHECK: dlg %r2, 0(%r4)
+; CHECK-NOT: {{dlg|dlgr}}
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %b = load i64 *%src
+ %div = udiv i64 %a, %b
+ %rem = urem i64 %a, %b
+ %or = or i64 %rem, %div
+ ret i64 %or
+}
+
+; Check the high end of the DLG range.
+define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: dlg %r2, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: dlg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the high end of the negative aligned DLG range.
+define i64 @f9(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f9:
+; CHECK: dlg %r2, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the low end of the DLG range.
+define i64 @f10(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f10:
+; CHECK: dlg %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f11(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r4, -524296
+; CHECK: dlg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
+
+; Check that DLG allows an index.
+define i64 @f12(i64 %dummy, i64 %a, i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: dlg %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %rem = urem i64 %a, %b
+ ret i64 %rem
+}
diff --git a/test/CodeGen/SystemZ/int-move-01.ll b/test/CodeGen/SystemZ/int-move-01.ll
new file mode 100644
index 000000000000..ae890ade3275
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-01.ll
@@ -0,0 +1,35 @@
+; Test moves between GPRs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test 8-bit moves, which should get promoted to i32.
+define i8 @f1(i8 %a, i8 %b) {
+; CHECK: f1:
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ ret i8 %b
+}
+
+; Test 16-bit moves, which again should get promoted to i32.
+define i16 @f2(i16 %a, i16 %b) {
+; CHECK: f2:
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ ret i16 %b
+}
+
+; Test 32-bit moves.
+define i32 @f3(i32 %a, i32 %b) {
+; CHECK: f3:
+; CHECK: lr %r2, %r3
+; CHECK: br %r14
+ ret i32 %b
+}
+
+; Test 64-bit moves.
+define i64 @f4(i64 %a, i64 %b) {
+; CHECK: f4:
+; CHECK: lgr %r2, %r3
+; CHECK: br %r14
+ ret i64 %b
+}
diff --git a/test/CodeGen/SystemZ/int-move-02.ll b/test/CodeGen/SystemZ/int-move-02.ll
new file mode 100644
index 000000000000..467e22d89c5a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-02.ll
@@ -0,0 +1,110 @@
+; Test 32-bit GPR loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the L range.
+define i32 @f1(i32 *%src) {
+; CHECK: f1:
+; CHECK: l %r2, 0(%r2)
+; CHECK: br %r14
+ %val = load i32 *%src
+ ret i32 %val
+}
+
+; Check the high end of the aligned L range.
+define i32 @f2(i32 *%src) {
+; CHECK: f2:
+; CHECK: l %r2, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check the next word up, which should use LY instead of L.
+define i32 @f3(i32 *%src) {
+; CHECK: f3:
+; CHECK: ly %r2, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check the high end of the aligned LY range.
+define i32 @f4(i32 *%src) {
+; CHECK: f4:
+; CHECK: ly %r2, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f5(i32 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r2, 524288
+; CHECK: l %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check the high end of the negative aligned LY range.
+define i32 @f6(i32 *%src) {
+; CHECK: f6:
+; CHECK: ly %r2, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check the low end of the LY range.
+define i32 @f7(i32 *%src) {
+; CHECK: f7:
+; CHECK: ly %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f8(i32 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, -524292
+; CHECK: l %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check that L allows an index.
+define i32 @f9(i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: l %r2, 4095({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i32 *
+ %val = load i32 *%ptr
+ ret i32 %val
+}
+
+; Check that LY allows an index.
+define i32 @f10(i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: ly %r2, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %val = load i32 *%ptr
+ ret i32 %val
+}
diff --git a/test/CodeGen/SystemZ/int-move-03.ll b/test/CodeGen/SystemZ/int-move-03.ll
new file mode 100644
index 000000000000..97c70a2740c1
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-03.ll
@@ -0,0 +1,78 @@
+; Test 64-bit GPR loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check LG with no displacement.
+define i64 @f1(i64 *%src) {
+; CHECK: f1:
+; CHECK: lg %r2, 0(%r2)
+; CHECK: br %r14
+ %val = load i64 *%src
+ ret i64 %val
+}
+
+; Check the high end of the aligned LG range.
+define i64 @f2(i64 *%src) {
+; CHECK: f2:
+; CHECK: lg %r2, 524280(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %val = load i64 *%ptr
+ ret i64 %val
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f3(i64 *%src) {
+; CHECK: f3:
+; CHECK: agfi %r2, 524288
+; CHECK: lg %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %val = load i64 *%ptr
+ ret i64 %val
+}
+
+; Check the high end of the negative aligned LG range.
+define i64 @f4(i64 *%src) {
+; CHECK: f4:
+; CHECK: lg %r2, -8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %val = load i64 *%ptr
+ ret i64 %val
+}
+
+; Check the low end of the LG range.
+define i64 @f5(i64 *%src) {
+; CHECK: f5:
+; CHECK: lg %r2, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %val = load i64 *%ptr
+ ret i64 %val
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f6(i64 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r2, -524296
+; CHECK: lg %r2, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %val = load i64 *%ptr
+ ret i64 %val
+}
+
+; Check that LG allows an index.
+define i64 @f7(i64 %src, i64 %index) {
+; CHECK: f7:
+; CHECK: lg %r2, 524287({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %val = load i64 *%ptr
+ ret i64 %val
+}
diff --git a/test/CodeGen/SystemZ/int-move-04.ll b/test/CodeGen/SystemZ/int-move-04.ll
new file mode 100644
index 000000000000..9736657b1efa
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-04.ll
@@ -0,0 +1,130 @@
+; Test 8-bit GPR stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test an i8 store, which should get converted into an i32 truncation.
+define void @f1(i8 *%dst, i8 %val) {
+; CHECK: f1:
+; CHECK: stc %r3, 0(%r2)
+; CHECK: br %r14
+ store i8 %val, i8 *%dst
+ ret void
+}
+
+; Test an i32 truncating store.
+define void @f2(i8 *%dst, i32 %val) {
+; CHECK: f2:
+; CHECK: stc %r3, 0(%r2)
+; CHECK: br %r14
+ %trunc = trunc i32 %val to i8
+ store i8 %trunc, i8 *%dst
+ ret void
+}
+
+; Test an i64 truncating store.
+define void @f3(i8 *%dst, i64 %val) {
+; CHECK: f3:
+; CHECK: stc %r3, 0(%r2)
+; CHECK: br %r14
+ %trunc = trunc i64 %val to i8
+ store i8 %trunc, i8 *%dst
+ ret void
+}
+
+; Check the high end of the STC range.
+define void @f4(i8 *%dst, i8 %val) {
+; CHECK: f4:
+; CHECK: stc %r3, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 4095
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which should use STCY instead of STC.
+define void @f5(i8 *%dst, i8 %val) {
+; CHECK: f5:
+; CHECK: stcy %r3, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 4096
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the STCY range.
+define void @f6(i8 *%dst, i8 %val) {
+; CHECK: f6:
+; CHECK: stcy %r3, 524287(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 524287
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f7(i8 *%dst, i8 %val) {
+; CHECK: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: stc %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 524288
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the negative STCY range.
+define void @f8(i8 *%dst, i8 %val) {
+; CHECK: f8:
+; CHECK: stcy %r3, -1(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 -1
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check the low end of the STCY range.
+define void @f9(i8 *%dst, i8 %val) {
+; CHECK: f9:
+; CHECK: stcy %r3, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 -524288
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f10(i8 *%dst, i8 %val) {
+; CHECK: f10:
+; CHECK: agfi %r2, -524289
+; CHECK: stc %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%dst, i64 -524289
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check that STC allows an index.
+define void @f11(i64 %dst, i64 %index, i8 %val) {
+; CHECK: f11:
+; CHECK: stc %r4, 4095(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i8 *
+ store i8 %val, i8 *%ptr
+ ret void
+}
+
+; Check that STCY allows an index.
+define void @f12(i64 %dst, i64 %index, i8 %val) {
+; CHECK: f12:
+; CHECK: stcy %r4, 4096(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i8 *
+ store i8 %val, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-move-05.ll b/test/CodeGen/SystemZ/int-move-05.ll
new file mode 100644
index 000000000000..f61477e71830
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-05.ll
@@ -0,0 +1,130 @@
+; Test 16-bit GPR stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test an i16 store, which should get converted into an i32 truncation.
+define void @f1(i16 *%dst, i16 %val) {
+; CHECK: f1:
+; CHECK: sth %r3, 0(%r2)
+; CHECK: br %r14
+ store i16 %val, i16 *%dst
+ ret void
+}
+
+; Test an i32 truncating store.
+define void @f2(i16 *%dst, i32 %val) {
+; CHECK: f2:
+; CHECK: sth %r3, 0(%r2)
+; CHECK: br %r14
+ %trunc = trunc i32 %val to i16
+ store i16 %trunc, i16 *%dst
+ ret void
+}
+
+; Test an i64 truncating store.
+define void @f3(i16 *%dst, i64 %val) {
+; CHECK: f3:
+; CHECK: sth %r3, 0(%r2)
+; CHECK: br %r14
+ %trunc = trunc i64 %val to i16
+ store i16 %trunc, i16 *%dst
+ ret void
+}
+
+; Check the high end of the STH range.
+define void @f4(i16 *%dst, i16 %val) {
+; CHECK: f4:
+; CHECK: sth %r3, 4094(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 2047
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check the next halfword up, which should use STHY instead of STH.
+define void @f5(i16 *%dst, i16 %val) {
+; CHECK: f5:
+; CHECK: sthy %r3, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 2048
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check the high end of the aligned STHY range.
+define void @f6(i16 *%dst, i16 %val) {
+; CHECK: f6:
+; CHECK: sthy %r3, 524286(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 262143
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f7(i16 *%dst, i16 %val) {
+; CHECK: f7:
+; CHECK: agfi %r2, 524288
+; CHECK: sth %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 262144
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STHY range.
+define void @f8(i16 *%dst, i16 %val) {
+; CHECK: f8:
+; CHECK: sthy %r3, -2(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 -1
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check the low end of the STHY range.
+define void @f9(i16 *%dst, i16 %val) {
+; CHECK: f9:
+; CHECK: sthy %r3, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 -262144
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f10(i16 *%dst, i16 %val) {
+; CHECK: f10:
+; CHECK: agfi %r2, -524290
+; CHECK: sth %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%dst, i64 -262145
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check that STH allows an index.
+define void @f11(i64 %dst, i64 %index, i16 %val) {
+; CHECK: f11:
+; CHECK: sth %r4, 4094({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 4094
+ %ptr = inttoptr i64 %add2 to i16 *
+ store i16 %val, i16 *%ptr
+ ret void
+}
+
+; Check that STHY allows an index.
+define void @f12(i64 %dst, i64 %index, i16 %val) {
+; CHECK: f12:
+; CHECK: sthy %r4, 4096({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i16 *
+ store i16 %val, i16 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-move-06.ll b/test/CodeGen/SystemZ/int-move-06.ll
new file mode 100644
index 000000000000..5b35a32ff543
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-06.ll
@@ -0,0 +1,117 @@
+; Test 32-bit GPR stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test an i32 store.
+define void @f1(i32 *%dst, i32 %val) {
+; CHECK: f1:
+; CHECK: st %r3, 0(%r2)
+; CHECK: br %r14
+ store i32 %val, i32 *%dst
+ ret void
+}
+
+; Test a truncating i64 store.
+define void @f2(i32 *%dst, i64 %val) {
+ %word = trunc i64 %val to i32
+ store i32 %word, i32 *%dst
+ ret void
+}
+
+; Check the high end of the aligned ST range.
+define void @f3(i32 *%dst, i32 %val) {
+; CHECK: f3:
+; CHECK: st %r3, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 1023
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check the next word up, which should use STY instead of ST.
+define void @f4(i32 *%dst, i32 %val) {
+; CHECK: f4:
+; CHECK: sty %r3, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 1024
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check the high end of the aligned STY range.
+define void @f5(i32 *%dst, i32 %val) {
+; CHECK: f5:
+; CHECK: sty %r3, 524284(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 131071
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i32 *%dst, i32 %val) {
+; CHECK: f6:
+; CHECK: agfi %r2, 524288
+; CHECK: st %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 131072
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STY range.
+define void @f7(i32 *%dst, i32 %val) {
+; CHECK: f7:
+; CHECK: sty %r3, -4(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 -1
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check the low end of the STY range.
+define void @f8(i32 *%dst, i32 %val) {
+; CHECK: f8:
+; CHECK: sty %r3, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 -131072
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f9(i32 *%dst, i32 %val) {
+; CHECK: f9:
+; CHECK: agfi %r2, -524292
+; CHECK: st %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%dst, i64 -131073
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check that ST allows an index.
+define void @f10(i64 %dst, i64 %index, i32 %val) {
+; CHECK: f10:
+; CHECK: st %r4, 4095(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i32 *
+ store i32 %val, i32 *%ptr
+ ret void
+}
+
+; Check that STY allows an index.
+define void @f11(i64 %dst, i64 %index, i32 %val) {
+; CHECK: f11:
+; CHECK: sty %r4, 4096(%r3,%r2)
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ store i32 %val, i32 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-move-07.ll b/test/CodeGen/SystemZ/int-move-07.ll
new file mode 100644
index 000000000000..ab21ab039534
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-07.ll
@@ -0,0 +1,78 @@
+; Test 64-bit GPR stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check STG with no displacement.
+define void @f1(i64 *%dst, i64 %val) {
+; CHECK: f1:
+; CHECK: stg %r3, 0(%r2)
+; CHECK: br %r14
+ store i64 %val, i64 *%dst
+ ret void
+}
+
+; Check the high end of the aligned STG range.
+define void @f2(i64 *%dst, i64 %val) {
+; CHECK: f2:
+; CHECK: stg %r3, 524280(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%dst, i64 65535
+ store i64 %val, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f3(i64 *%dst, i64 %val) {
+; CHECK: f3:
+; CHECK: agfi %r2, 524288
+; CHECK: stg %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%dst, i64 65536
+ store i64 %val, i64 *%ptr
+ ret void
+}
+
+; Check the high end of the negative aligned STG range.
+define void @f4(i64 *%dst, i64 %val) {
+; CHECK: f4:
+; CHECK: stg %r3, -8(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%dst, i64 -1
+ store i64 %val, i64 *%ptr
+ ret void
+}
+
+; Check the low end of the STG range.
+define void @f5(i64 *%dst, i64 %val) {
+; CHECK: f5:
+; CHECK: stg %r3, -524288(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%dst, i64 -65536
+ store i64 %val, i64 *%ptr
+ ret void
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i64 *%dst, i64 %val) {
+; CHECK: f6:
+; CHECK: agfi %r2, -524296
+; CHECK: stg %r3, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%dst, i64 -65537
+ store i64 %val, i64 *%ptr
+ ret void
+}
+
+; Check that STG allows an index.
+define void @f7(i64 %dst, i64 %index, i64 %val) {
+; CHECK: f7:
+; CHECK: stg %r4, 524287({{%r3,%r2|%r2,%r3}})
+; CHECK: br %r14
+ %add1 = add i64 %dst, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ store i64 %val, i64 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-move-08.ll b/test/CodeGen/SystemZ/int-move-08.ll
new file mode 100644
index 000000000000..5640fec3299f
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-08.ll
@@ -0,0 +1,49 @@
+; Test 32-bit GPR accesses to a PC-relative location.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@gsrc16 = global i16 1
+@gsrc32 = global i32 1
+@gdst16 = global i16 2
+@gdst32 = global i32 2
+
+; Check sign-extending loads from i16.
+define i32 @f1() {
+; CHECK: f1:
+; CHECK: lhrl %r2, gsrc16
+; CHECK: br %r14
+ %val = load i16 *@gsrc16
+ %ext = sext i16 %val to i32
+ ret i32 %ext
+}
+
+; Check zero-extending loads from i16.
+define i32 @f2() {
+; CHECK: f2:
+; CHECK: llhrl %r2, gsrc16
+; CHECK: br %r14
+ %val = load i16 *@gsrc16
+ %ext = zext i16 %val to i32
+ ret i32 %ext
+}
+
+; Check truncating 16-bit stores.
+define void @f3(i32 %val) {
+; CHECK: f3:
+; CHECK: sthrl %r2, gdst16
+; CHECK: br %r14
+ %half = trunc i32 %val to i16
+ store i16 %half, i16 *@gdst16
+ ret void
+}
+
+; Check plain loads and stores.
+define void @f4() {
+; CHECK: f4:
+; CHECK: lrl %r0, gsrc32
+; CHECK: strl %r0, gdst32
+; CHECK: br %r14
+ %val = load i32 *@gsrc32
+ store i32 %val, i32 *@gdst32
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-move-09.ll b/test/CodeGen/SystemZ/int-move-09.ll
new file mode 100644
index 000000000000..a7a8c82951f5
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-move-09.ll
@@ -0,0 +1,81 @@
+; Test 64-bit GPR accesses to a PC-relative location.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@gsrc16 = global i16 1
+@gsrc32 = global i32 1
+@gsrc64 = global i64 1
+@gdst16 = global i16 2
+@gdst32 = global i32 2
+@gdst64 = global i64 2
+
+; Check sign-extending loads from i16.
+define i64 @f1() {
+; CHECK: f1:
+; CHECK: lghrl %r2, gsrc16
+; CHECK: br %r14
+ %val = load i16 *@gsrc16
+ %ext = sext i16 %val to i64
+ ret i64 %ext
+}
+
+; Check zero-extending loads from i16.
+define i64 @f2() {
+; CHECK: f2:
+; CHECK: llghrl %r2, gsrc16
+; CHECK: br %r14
+ %val = load i16 *@gsrc16
+ %ext = zext i16 %val to i64
+ ret i64 %ext
+}
+
+; Check sign-extending loads from i32.
+define i64 @f3() {
+; CHECK: f3:
+; CHECK: lgfrl %r2, gsrc32
+; CHECK: br %r14
+ %val = load i32 *@gsrc32
+ %ext = sext i32 %val to i64
+ ret i64 %ext
+}
+
+; Check zero-extending loads from i32.
+define i64 @f4() {
+; CHECK: f4:
+; CHECK: llgfrl %r2, gsrc32
+; CHECK: br %r14
+ %val = load i32 *@gsrc32
+ %ext = zext i32 %val to i64
+ ret i64 %ext
+}
+
+; Check truncating 16-bit stores.
+define void @f5(i64 %val) {
+; CHECK: f5:
+; CHECK: sthrl %r2, gdst16
+; CHECK: br %r14
+ %half = trunc i64 %val to i16
+ store i16 %half, i16 *@gdst16
+ ret void
+}
+
+; Check truncating 32-bit stores.
+define void @f6(i64 %val) {
+; CHECK: f6:
+; CHECK: strl %r2, gdst32
+; CHECK: br %r14
+ %word = trunc i64 %val to i32
+ store i32 %word, i32 *@gdst32
+ ret void
+}
+
+; Check plain loads and stores.
+define void @f7() {
+; CHECK: f7:
+; CHECK: lgrl %r0, gsrc64
+; CHECK: stgrl %r0, gdst64
+; CHECK: br %r14
+ %val = load i64 *@gsrc64
+ store i64 %val, i64 *@gdst64
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-mul-01.ll b/test/CodeGen/SystemZ/int-mul-01.ll
new file mode 100644
index 000000000000..e1246e2156e3
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-01.ll
@@ -0,0 +1,131 @@
+; Test 32-bit multiplication in which the second operand is a sign-extended
+; i16 memory value.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the MH range.
+define i32 @f1(i32 %lhs, i16 *%src) {
+; CHECK: f1:
+; CHECK: mh %r2, 0(%r3)
+; CHECK: br %r14
+ %half = load i16 *%src
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the high end of the aligned MH range.
+define i32 @f2(i32 %lhs, i16 *%src) {
+; CHECK: f2:
+; CHECK: mh %r2, 4094(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2047
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the next halfword up, which should use MHY instead of MH.
+define i32 @f3(i32 %lhs, i16 *%src) {
+; CHECK: f3:
+; CHECK: mhy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 2048
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the high end of the aligned MHY range.
+define i32 @f4(i32 %lhs, i16 *%src) {
+; CHECK: f4:
+; CHECK: mhy %r2, 524286(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262143
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the next halfword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f5(i32 %lhs, i16 *%src) {
+; CHECK: f5:
+; CHECK: agfi %r3, 524288
+; CHECK: mh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the high end of the negative aligned MHY range.
+define i32 @f6(i32 %lhs, i16 *%src) {
+; CHECK: f6:
+; CHECK: mhy %r2, -2(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -1
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the low end of the MHY range.
+define i32 @f7(i32 %lhs, i16 *%src) {
+; CHECK: f7:
+; CHECK: mhy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262144
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check the next halfword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f8(i32 %lhs, i16 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r3, -524290
+; CHECK: mh %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i16 *%src, i64 -262145
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check that MH allows an index.
+define i32 @f9(i32 %lhs, i64 %src, i64 %index) {
+; CHECK: f9:
+; CHECK: mh %r2, 4094({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4094
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
+
+; Check that MHY allows an index.
+define i32 @f10(i32 %lhs, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: mhy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i16 *
+ %half = load i16 *%ptr
+ %rhs = sext i16 %half to i32
+ %res = mul i32 %lhs, %rhs
+ ret i32 %res
+}
diff --git a/test/CodeGen/SystemZ/int-mul-02.ll b/test/CodeGen/SystemZ/int-mul-02.ll
new file mode 100644
index 000000000000..d39c4dd0961c
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-02.ll
@@ -0,0 +1,129 @@
+; Test 32-bit multiplication in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check MSR.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: msr %r2, %r3
+; CHECK: br %r14
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the low end of the MS range.
+define i32 @f2(i32 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: ms %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the high end of the aligned MS range.
+define i32 @f3(i32 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: ms %r2, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the next word up, which should use MSY instead of MS.
+define i32 @f4(i32 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: msy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the high end of the aligned MSY range.
+define i32 @f5(i32 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: msy %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: ms %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the high end of the negative aligned MSY range.
+define i32 @f7(i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: msy %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the low end of the MSY range.
+define i32 @f8(i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: msy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: ms %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check that MS allows an index.
+define i32 @f10(i32 %a, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: ms %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
+
+; Check that MSY allows an index.
+define i32 @f11(i32 %a, i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: msy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %mul = mul i32 %a, %b
+ ret i32 %mul
+}
diff --git a/test/CodeGen/SystemZ/int-mul-03.ll b/test/CodeGen/SystemZ/int-mul-03.ll
new file mode 100644
index 000000000000..ab4ef9edd235
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-03.ll
@@ -0,0 +1,102 @@
+; Test multiplications between an i64 and a sign-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check MSGFR.
+define i64 @f1(i64 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: msgfr %r2, %r3
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check MSGF with no displacement.
+define i64 @f2(i64 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: msgf %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the high end of the aligned MSGF range.
+define i64 @f3(i64 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: msgf %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: msgf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the high end of the negative aligned MSGF range.
+define i64 @f5(i64 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: msgf %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the low end of the MSGF range.
+define i64 @f6(i64 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: msgf %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524292
+; CHECK: msgf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
+
+; Check that MSGF allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: msgf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %mul = mul i64 %a, %bext
+ ret i64 %mul
+}
diff --git a/test/CodeGen/SystemZ/int-mul-04.ll b/test/CodeGen/SystemZ/int-mul-04.ll
new file mode 100644
index 000000000000..94c263978341
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-04.ll
@@ -0,0 +1,94 @@
+; Test 64-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check MSGR.
+define i64 @f1(i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK: msgr %r2, %r3
+; CHECK: br %r14
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check MSG with no displacement.
+define i64 @f2(i64 %a, i64 *%src) {
+; CHECK: f2:
+; CHECK: msg %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check the high end of the aligned MSG range.
+define i64 @f3(i64 %a, i64 *%src) {
+; CHECK: f3:
+; CHECK: msg %r2, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: msg %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check the high end of the negative aligned MSG range.
+define i64 @f5(i64 %a, i64 *%src) {
+; CHECK: f5:
+; CHECK: msg %r2, -8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check the low end of the MSG range.
+define i64 @f6(i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK: msg %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: msg %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
+
+; Check that MSG allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: msg %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %mul = mul i64 %a, %b
+ ret i64 %mul
+}
diff --git a/test/CodeGen/SystemZ/int-mul-05.ll b/test/CodeGen/SystemZ/int-mul-05.ll
new file mode 100644
index 000000000000..5e4031b5d77d
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-05.ll
@@ -0,0 +1,159 @@
+; Test 32-bit multiplication in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check multiplication by 2, which should use shifts.
+define i32 @f1(i32 %a, i32 *%dest) {
+; CHECK: f1:
+; CHECK: sll %r2, 1
+; CHECK: br %r14
+ %mul = mul i32 %a, 2
+ ret i32 %mul
+}
+
+; Check multiplication by 3.
+define i32 @f2(i32 %a, i32 *%dest) {
+; CHECK: f2:
+; CHECK: mhi %r2, 3
+; CHECK: br %r14
+ %mul = mul i32 %a, 3
+ ret i32 %mul
+}
+
+; Check the high end of the MHI range.
+define i32 @f3(i32 %a, i32 *%dest) {
+; CHECK: f3:
+; CHECK: mhi %r2, 32767
+; CHECK: br %r14
+ %mul = mul i32 %a, 32767
+ ret i32 %mul
+}
+
+; Check the next value up, which should use shifts.
+define i32 @f4(i32 %a, i32 *%dest) {
+; CHECK: f4:
+; CHECK: sll %r2, 15
+; CHECK: br %r14
+ %mul = mul i32 %a, 32768
+ ret i32 %mul
+}
+
+; Check the next value up again, which can use MSFI.
+define i32 @f5(i32 %a, i32 *%dest) {
+; CHECK: f5:
+; CHECK: msfi %r2, 32769
+; CHECK: br %r14
+ %mul = mul i32 %a, 32769
+ ret i32 %mul
+}
+
+; Check the high end of the MSFI range.
+define i32 @f6(i32 %a, i32 *%dest) {
+; CHECK: f6:
+; CHECK: msfi %r2, 2147483647
+; CHECK: br %r14
+ %mul = mul i32 %a, 2147483647
+ ret i32 %mul
+}
+
+; Check the next value up, which should use shifts.
+define i32 @f7(i32 %a, i32 *%dest) {
+; CHECK: f7:
+; CHECK: sll %r2, 31
+; CHECK: br %r14
+ %mul = mul i32 %a, 2147483648
+ ret i32 %mul
+}
+
+; Check the next value up again, which is treated as a negative value.
+define i32 @f8(i32 %a, i32 *%dest) {
+; CHECK: f8:
+; CHECK: msfi %r2, -2147483647
+; CHECK: br %r14
+ %mul = mul i32 %a, 2147483649
+ ret i32 %mul
+}
+
+; Check multiplication by -1, which is a negation.
+define i32 @f9(i32 %a, i32 *%dest) {
+; CHECK: f9:
+; CHECK: lcr %r2, %r2
+; CHECK: br %r14
+ %mul = mul i32 %a, -1
+ ret i32 %mul
+}
+
+; Check multiplication by -2, which should use shifts.
+define i32 @f10(i32 %a, i32 *%dest) {
+; CHECK: f10:
+; CHECK: sll %r2, 1
+; CHECK: lcr %r2, %r2
+; CHECK: br %r14
+ %mul = mul i32 %a, -2
+ ret i32 %mul
+}
+
+; Check multiplication by -3.
+define i32 @f11(i32 %a, i32 *%dest) {
+; CHECK: f11:
+; CHECK: mhi %r2, -3
+; CHECK: br %r14
+ %mul = mul i32 %a, -3
+ ret i32 %mul
+}
+
+; Check the lowest useful MHI value.
+define i32 @f12(i32 %a, i32 *%dest) {
+; CHECK: f12:
+; CHECK: mhi %r2, -32767
+; CHECK: br %r14
+ %mul = mul i32 %a, -32767
+ ret i32 %mul
+}
+
+; Check the next value down, which should use shifts.
+define i32 @f13(i32 %a, i32 *%dest) {
+; CHECK: f13:
+; CHECK: sll %r2, 15
+; CHECK: lcr %r2, %r2
+; CHECK: br %r14
+ %mul = mul i32 %a, -32768
+ ret i32 %mul
+}
+
+; Check the next value down again, which can use MSFI.
+define i32 @f14(i32 %a, i32 *%dest) {
+; CHECK: f14:
+; CHECK: msfi %r2, -32769
+; CHECK: br %r14
+ %mul = mul i32 %a, -32769
+ ret i32 %mul
+}
+
+; Check the lowest useful MSFI value.
+define i32 @f15(i32 %a, i32 *%dest) {
+; CHECK: f15:
+; CHECK: msfi %r2, -2147483647
+; CHECK: br %r14
+ %mul = mul i32 %a, -2147483647
+ ret i32 %mul
+}
+
+; Check the next value down, which should use shifts.
+define i32 @f16(i32 %a, i32 *%dest) {
+; CHECK: f16:
+; CHECK: sll %r2, 31
+; CHECK-NOT: lcr
+; CHECK: br %r14
+ %mul = mul i32 %a, -2147483648
+ ret i32 %mul
+}
+
+; Check the next value down again, which is treated as a positive value.
+define i32 @f17(i32 %a, i32 *%dest) {
+; CHECK: f17:
+; CHECK: msfi %r2, 2147483647
+; CHECK: br %r14
+ %mul = mul i32 %a, -2147483649
+ ret i32 %mul
+}
diff --git a/test/CodeGen/SystemZ/int-mul-06.ll b/test/CodeGen/SystemZ/int-mul-06.ll
new file mode 100644
index 000000000000..a3546059c023
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-06.ll
@@ -0,0 +1,159 @@
+; Test 64-bit multiplication in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check multiplication by 2, which should use shifts.
+define i64 @f1(i64 %a, i64 *%dest) {
+; CHECK: f1:
+; CHECK: sllg %r2, %r2, 1
+; CHECK: br %r14
+ %mul = mul i64 %a, 2
+ ret i64 %mul
+}
+
+; Check multiplication by 3.
+define i64 @f2(i64 %a, i64 *%dest) {
+; CHECK: f2:
+; CHECK: mghi %r2, 3
+; CHECK: br %r14
+ %mul = mul i64 %a, 3
+ ret i64 %mul
+}
+
+; Check the high end of the MGHI range.
+define i64 @f3(i64 %a, i64 *%dest) {
+; CHECK: f3:
+; CHECK: mghi %r2, 32767
+; CHECK: br %r14
+ %mul = mul i64 %a, 32767
+ ret i64 %mul
+}
+
+; Check the next value up, which should use shifts.
+define i64 @f4(i64 %a, i64 *%dest) {
+; CHECK: f4:
+; CHECK: sllg %r2, %r2, 15
+; CHECK: br %r14
+ %mul = mul i64 %a, 32768
+ ret i64 %mul
+}
+
+; Check the next value up again, which can use MSGFI.
+define i64 @f5(i64 %a, i64 *%dest) {
+; CHECK: f5:
+; CHECK: msgfi %r2, 32769
+; CHECK: br %r14
+ %mul = mul i64 %a, 32769
+ ret i64 %mul
+}
+
+; Check the high end of the MSGFI range.
+define i64 @f6(i64 %a, i64 *%dest) {
+; CHECK: f6:
+; CHECK: msgfi %r2, 2147483647
+; CHECK: br %r14
+ %mul = mul i64 %a, 2147483647
+ ret i64 %mul
+}
+
+; Check the next value up, which should use shifts.
+define i64 @f7(i64 %a, i64 *%dest) {
+; CHECK: f7:
+; CHECK: sllg %r2, %r2, 31
+; CHECK: br %r14
+ %mul = mul i64 %a, 2147483648
+ ret i64 %mul
+}
+
+; Check the next value up again, which cannot use a constant multiplicatoin.
+define i64 @f8(i64 %a, i64 *%dest) {
+; CHECK: f8:
+; CHECK-NOT: msgfi
+; CHECK: br %r14
+ %mul = mul i64 %a, 2147483649
+ ret i64 %mul
+}
+
+; Check multiplication by -1, which is a negation.
+define i64 @f9(i64 %a, i64 *%dest) {
+; CHECK: f9:
+; CHECK: lcgr {{%r[0-5]}}, %r2
+; CHECK: br %r14
+ %mul = mul i64 %a, -1
+ ret i64 %mul
+}
+
+; Check multiplication by -2, which should use shifts.
+define i64 @f10(i64 %a, i64 *%dest) {
+; CHECK: f10:
+; CHECK: sllg [[SHIFTED:%r[0-5]]], %r2, 1
+; CHECK: lcgr %r2, [[SHIFTED]]
+; CHECK: br %r14
+ %mul = mul i64 %a, -2
+ ret i64 %mul
+}
+
+; Check multiplication by -3.
+define i64 @f11(i64 %a, i64 *%dest) {
+; CHECK: f11:
+; CHECK: mghi %r2, -3
+; CHECK: br %r14
+ %mul = mul i64 %a, -3
+ ret i64 %mul
+}
+
+; Check the lowest useful MGHI value.
+define i64 @f12(i64 %a, i64 *%dest) {
+; CHECK: f12:
+; CHECK: mghi %r2, -32767
+; CHECK: br %r14
+ %mul = mul i64 %a, -32767
+ ret i64 %mul
+}
+
+; Check the next value down, which should use shifts.
+define i64 @f13(i64 %a, i64 *%dest) {
+; CHECK: f13:
+; CHECK: sllg [[SHIFTED:%r[0-5]]], %r2, 15
+; CHECK: lcgr %r2, [[SHIFTED]]
+; CHECK: br %r14
+ %mul = mul i64 %a, -32768
+ ret i64 %mul
+}
+
+; Check the next value down again, which can use MSGFI.
+define i64 @f14(i64 %a, i64 *%dest) {
+; CHECK: f14:
+; CHECK: msgfi %r2, -32769
+; CHECK: br %r14
+ %mul = mul i64 %a, -32769
+ ret i64 %mul
+}
+
+; Check the lowest useful MSGFI value.
+define i64 @f15(i64 %a, i64 *%dest) {
+; CHECK: f15:
+; CHECK: msgfi %r2, -2147483647
+; CHECK: br %r14
+ %mul = mul i64 %a, -2147483647
+ ret i64 %mul
+}
+
+; Check the next value down, which should use shifts.
+define i64 @f16(i64 %a, i64 *%dest) {
+; CHECK: f16:
+; CHECK: sllg [[SHIFTED:%r[0-5]]], %r2, 31
+; CHECK: lcgr %r2, [[SHIFTED]]
+; CHECK: br %r14
+ %mul = mul i64 %a, -2147483648
+ ret i64 %mul
+}
+
+; Check the next value down again, which cannot use constant multiplication
+define i64 @f17(i64 %a, i64 *%dest) {
+; CHECK: f17:
+; CHECK-NOT: msgfi
+; CHECK: br %r14
+ %mul = mul i64 %a, -2147483649
+ ret i64 %mul
+}
diff --git a/test/CodeGen/SystemZ/int-mul-07.ll b/test/CodeGen/SystemZ/int-mul-07.ll
new file mode 100644
index 000000000000..2459cc359930
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-07.ll
@@ -0,0 +1,64 @@
+; Test high-part i32->i64 multiplications.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; We don't provide *MUL_LOHI or MULH* for the patterns in this file,
+; but they should at least still work.
+
+; Check zero-extended multiplication in which only the high part is used.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: msgr
+; CHECK: br %r14
+ %ax = zext i32 %a to i64
+ %bx = zext i32 %b to i64
+ %mulx = mul i64 %ax, %bx
+ %highx = lshr i64 %mulx, 32
+ %high = trunc i64 %highx to i32
+ ret i32 %high
+}
+
+; Check sign-extended multiplication in which only the high part is used.
+define i32 @f2(i32 %a, i32 %b) {
+; CHECK: f2:
+; CHECK: msgfr
+; CHECK: br %r14
+ %ax = sext i32 %a to i64
+ %bx = sext i32 %b to i64
+ %mulx = mul i64 %ax, %bx
+ %highx = lshr i64 %mulx, 32
+ %high = trunc i64 %highx to i32
+ ret i32 %high
+}
+
+; Check zero-extended multiplication in which the result is split into
+; high and low halves.
+define i32 @f3(i32 %a, i32 %b) {
+; CHECK: f3:
+; CHECK: msgr
+; CHECK: br %r14
+ %ax = zext i32 %a to i64
+ %bx = zext i32 %b to i64
+ %mulx = mul i64 %ax, %bx
+ %highx = lshr i64 %mulx, 32
+ %high = trunc i64 %highx to i32
+ %low = trunc i64 %mulx to i32
+ %or = or i32 %high, %low
+ ret i32 %or
+}
+
+; Check sign-extended multiplication in which the result is split into
+; high and low halves.
+define i32 @f4(i32 %a, i32 %b) {
+; CHECK: f4:
+; CHECK: msgfr
+; CHECK: br %r14
+ %ax = sext i32 %a to i64
+ %bx = sext i32 %b to i64
+ %mulx = mul i64 %ax, %bx
+ %highx = lshr i64 %mulx, 32
+ %high = trunc i64 %highx to i32
+ %low = trunc i64 %mulx to i32
+ %or = or i32 %high, %low
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/int-mul-08.ll b/test/CodeGen/SystemZ/int-mul-08.ll
new file mode 100644
index 000000000000..09ebe7a7b489
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-mul-08.ll
@@ -0,0 +1,188 @@
+; Test high-part i64->i128 multiplications.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check zero-extended multiplication in which only the high part is used.
+define i64 @f1(i64 %dummy, i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mlgr %r2, %r4
+; CHECK: br %r14
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check sign-extended multiplication in which only the high part is used.
+; This needs a rather convoluted sequence.
+define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
+; CHECK: f2:
+; CHECK: mlgr
+; CHECK: agr
+; CHECK: agr
+; CHECK: br %r14
+ %ax = sext i64 %a to i128
+ %bx = sext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check zero-extended multiplication in which only part of the high half
+; is used.
+define i64 @f3(i64 %dummy, i64 %a, i64 %b) {
+; CHECK: f3:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mlgr %r2, %r4
+; CHECK: srlg %r2, %r2, 3
+; CHECK: br %r14
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 67
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check zero-extended multiplication in which the result is split into
+; high and low halves.
+define i64 @f4(i64 %dummy, i64 %a, i64 %b) {
+; CHECK: f4:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mlgr %r2, %r4
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ %low = trunc i128 %mulx to i64
+ %or = or i64 %high, %low
+ ret i64 %or
+}
+
+; Check division by a constant, which should use multiplication instead.
+define i64 @f5(i64 %dummy, i64 %a) {
+; CHECK: f5:
+; CHECK: mlgr %r2,
+; CHECK: srlg %r2, %r2,
+; CHECK: br %r14
+ %res = udiv i64 %a, 1234
+ ret i64 %res
+}
+
+; Check MLG with no displacement.
+define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK-NOT: {{%r[234]}}
+; CHECK: mlg %r2, 0(%r4)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the high end of the aligned MLG range.
+define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: mlg %r2, 524280(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the next doubleword up, which requires separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r4, 524288
+; CHECK: mlg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the high end of the negative aligned MLG range.
+define i64 @f9(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f9:
+; CHECK: mlg %r2, -8(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the low end of the MLG range.
+define i64 @f10(i64 %dummy, i64 %a, i64 *%src) {
+; CHECK: f10:
+; CHECK: mlg %r2, -524288(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f11(i64 *%dest, i64 %a, i64 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r4, -524296
+; CHECK: mlg %r2, 0(%r4)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
+
+; Check that MLG allows an index.
+define i64 @f12(i64 *%dest, i64 %a, i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: mlg %r2, 524287(%r5,%r4)
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524287
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %ax = zext i64 %a to i128
+ %bx = zext i64 %b to i128
+ %mulx = mul i128 %ax, %bx
+ %highx = lshr i128 %mulx, 64
+ %high = trunc i128 %highx to i64
+ ret i64 %high
+}
diff --git a/test/CodeGen/SystemZ/int-neg-01.ll b/test/CodeGen/SystemZ/int-neg-01.ll
new file mode 100644
index 000000000000..6114f4efbc9a
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-neg-01.ll
@@ -0,0 +1,42 @@
+; Test integer negation.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test i32->i32 negation.
+define i32 @f1(i32 %val) {
+; CHECK: f1:
+; CHECK: lcr %r2, %r2
+; CHECK: br %r14
+ %neg = sub i32 0, %val
+ ret i32 %neg
+}
+
+; Test i32->i64 negation.
+define i64 @f2(i32 %val) {
+; CHECK: f2:
+; CHECK: lcgfr %r2, %r2
+; CHECK: br %r14
+ %ext = sext i32 %val to i64
+ %neg = sub i64 0, %ext
+ ret i64 %neg
+}
+
+; Test i32->i64 negation that uses an "in-register" form of sign extension.
+define i64 @f3(i64 %val) {
+; CHECK: f3:
+; CHECK: lcgfr %r2, %r2
+; CHECK: br %r14
+ %trunc = trunc i64 %val to i32
+ %ext = sext i32 %trunc to i64
+ %neg = sub i64 0, %ext
+ ret i64 %neg
+}
+
+; Test i64 negation.
+define i64 @f4(i64 %val) {
+; CHECK: f4:
+; CHECK: lcgr %r2, %r2
+; CHECK: br %r14
+ %neg = sub i64 0, %val
+ ret i64 %neg
+}
diff --git a/test/CodeGen/SystemZ/int-sub-01.ll b/test/CodeGen/SystemZ/int-sub-01.ll
new file mode 100644
index 000000000000..9a738148f7ef
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-01.ll
@@ -0,0 +1,129 @@
+; Test 32-bit subtraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check SR.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: sr %r2, %r3
+; CHECK: br %r14
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the low end of the S range.
+define i32 @f2(i32 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: s %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the high end of the aligned S range.
+define i32 @f3(i32 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: s %r2, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the next word up, which should use SY instead of S.
+define i32 @f4(i32 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: sy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the high end of the aligned SY range.
+define i32 @f5(i32 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: sy %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: s %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the high end of the negative aligned SY range.
+define i32 @f7(i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: sy %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the low end of the SY range.
+define i32 @f8(i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: sy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: s %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check that S allows an index.
+define i32 @f10(i32 %a, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: s %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
+
+; Check that SY allows an index.
+define i32 @f11(i32 %a, i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: sy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %sub = sub i32 %a, %b
+ ret i32 %sub
+}
diff --git a/test/CodeGen/SystemZ/int-sub-02.ll b/test/CodeGen/SystemZ/int-sub-02.ll
new file mode 100644
index 000000000000..5150a960a554
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-02.ll
@@ -0,0 +1,102 @@
+; Test subtractions of a sign-extended i32 from an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check SGFR.
+define i64 @f1(i64 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: sgfr %r2, %r3
+; CHECK: br %r14
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check SGF with no displacement.
+define i64 @f2(i64 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: sgf %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the aligned SGF range.
+define i64 @f3(i64 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: sgf %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: sgf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the negative aligned SGF range.
+define i64 @f5(i64 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: sgf %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the low end of the SGF range.
+define i64 @f6(i64 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: sgf %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524292
+; CHECK: sgf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check that SGF allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: sgf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = sext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
diff --git a/test/CodeGen/SystemZ/int-sub-03.ll b/test/CodeGen/SystemZ/int-sub-03.ll
new file mode 100644
index 000000000000..73571b3591f5
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-03.ll
@@ -0,0 +1,102 @@
+; Test subtractions of a zero-extended i32 from an i64.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check SLGFR.
+define i64 @f1(i64 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: slgfr %r2, %r3
+; CHECK: br %r14
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check SLGF with no displacement.
+define i64 @f2(i64 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: slgf %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the aligned SLGF range.
+define i64 @f3(i64 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: slgf %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: slgf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the high end of the negative aligned SLGF range.
+define i64 @f5(i64 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: slgf %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the low end of the SLGF range.
+define i64 @f6(i64 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: slgf %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524292
+; CHECK: slgf %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
+
+; Check that SLGF allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: slgf %r2, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i64
+ %sub = sub i64 %a, %bext
+ ret i64 %sub
+}
diff --git a/test/CodeGen/SystemZ/int-sub-04.ll b/test/CodeGen/SystemZ/int-sub-04.ll
new file mode 100644
index 000000000000..545d34216809
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-04.ll
@@ -0,0 +1,94 @@
+; Test 64-bit subtraction in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check SGR.
+define i64 @f1(i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK: sgr %r2, %r3
+; CHECK: br %r14
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check SG with no displacement.
+define i64 @f2(i64 %a, i64 *%src) {
+; CHECK: f2:
+; CHECK: sg %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check the high end of the aligned SG range.
+define i64 @f3(i64 %a, i64 *%src) {
+; CHECK: f3:
+; CHECK: sg %r2, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: sg %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check the high end of the negative aligned SG range.
+define i64 @f5(i64 %a, i64 *%src) {
+; CHECK: f5:
+; CHECK: sg %r2, -8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check the low end of the SG range.
+define i64 @f6(i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK: sg %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: sg %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
+
+; Check that SG allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: sg %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %sub = sub i64 %a, %b
+ ret i64 %sub
+}
diff --git a/test/CodeGen/SystemZ/int-sub-05.ll b/test/CodeGen/SystemZ/int-sub-05.ll
new file mode 100644
index 000000000000..1475b244f678
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-05.ll
@@ -0,0 +1,118 @@
+; Test 128-bit addition in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Test register addition.
+define void @f1(i128 *%ptr, i64 %high, i64 %low) {
+; CHECK: f1:
+; CHECK: slgr {{%r[0-5]}}, %r4
+; CHECK: slbgr {{%r[0-5]}}, %r3
+; CHECK: br %r14
+ %a = load i128 *%ptr
+ %highx = zext i64 %high to i128
+ %lowx = zext i64 %low to i128
+ %bhigh = shl i128 %highx, 64
+ %b = or i128 %bhigh, %lowx
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%ptr
+ ret void
+}
+
+; Test memory addition with no offset.
+define void @f2(i64 %addr) {
+; CHECK: f2:
+; CHECK: slg {{%r[0-5]}}, 8(%r2)
+; CHECK: slbg {{%r[0-5]}}, 0(%r2)
+; CHECK: br %r14
+ %bptr = inttoptr i64 %addr to i128 *
+ %aptr = getelementptr i128 *%bptr, i64 -8
+ %a = load i128 *%aptr
+ %b = load i128 *%bptr
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test the highest aligned offset that is in range of both SLG and SLBG.
+define void @f3(i64 %base) {
+; CHECK: f3:
+; CHECK: slg {{%r[0-5]}}, 524280(%r2)
+; CHECK: slbg {{%r[0-5]}}, 524272(%r2)
+; CHECK: br %r14
+ %addr = add i64 %base, 524272
+ %bptr = inttoptr i64 %addr to i128 *
+ %aptr = getelementptr i128 *%bptr, i64 -8
+ %a = load i128 *%aptr
+ %b = load i128 *%bptr
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test the next doubleword up, which requires separate address logic for SLG.
+define void @f4(i64 %base) {
+; CHECK: f4:
+; CHECK: lgr [[BASE:%r[1-5]]], %r2
+; CHECK: agfi [[BASE]], 524288
+; CHECK: slg {{%r[0-5]}}, 0([[BASE]])
+; CHECK: slbg {{%r[0-5]}}, 524280(%r2)
+; CHECK: br %r14
+ %addr = add i64 %base, 524280
+ %bptr = inttoptr i64 %addr to i128 *
+ %aptr = getelementptr i128 *%bptr, i64 -8
+ %a = load i128 *%aptr
+ %b = load i128 *%bptr
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test the next doubleword after that, which requires separate logic for
+; both instructions. It would be better to create an anchor at 524288
+; that both instructions can use, but that isn't implemented yet.
+define void @f5(i64 %base) {
+; CHECK: f5:
+; CHECK: slg {{%r[0-5]}}, 0({{%r[1-5]}})
+; CHECK: slbg {{%r[0-5]}}, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %addr = add i64 %base, 524288
+ %bptr = inttoptr i64 %addr to i128 *
+ %aptr = getelementptr i128 *%bptr, i64 -8
+ %a = load i128 *%aptr
+ %b = load i128 *%bptr
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test the lowest displacement that is in range of both SLG and SLBG.
+define void @f6(i64 %base) {
+; CHECK: f6:
+; CHECK: slg {{%r[0-5]}}, -524280(%r2)
+; CHECK: slbg {{%r[0-5]}}, -524288(%r2)
+; CHECK: br %r14
+ %addr = add i64 %base, -524288
+ %bptr = inttoptr i64 %addr to i128 *
+ %aptr = getelementptr i128 *%bptr, i64 -8
+ %a = load i128 *%aptr
+ %b = load i128 *%bptr
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test the next doubleword down, which is out of range of the SLBG.
+define void @f7(i64 %base) {
+; CHECK: f7:
+; CHECK: slg {{%r[0-5]}}, -524288(%r2)
+; CHECK: slbg {{%r[0-5]}}, 0({{%r[1-5]}})
+; CHECK: br %r14
+ %addr = add i64 %base, -524296
+ %bptr = inttoptr i64 %addr to i128 *
+ %aptr = getelementptr i128 *%bptr, i64 -8
+ %a = load i128 *%aptr
+ %b = load i128 *%bptr
+ %sub = sub i128 %a, %b
+ store i128 %sub, i128 *%aptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/int-sub-06.ll b/test/CodeGen/SystemZ/int-sub-06.ll
new file mode 100644
index 000000000000..0e04d51e2bc7
--- /dev/null
+++ b/test/CodeGen/SystemZ/int-sub-06.ll
@@ -0,0 +1,165 @@
+; Test 128-bit addition in which the second operand is a zero-extended i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check register additions. The XOR ensures that we don't instead zero-extend
+; %b into a register and use memory addition.
+define void @f1(i128 *%aptr, i32 %b) {
+; CHECK: f1:
+; CHECK: slgfr {{%r[0-5]}}, %r3
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Like f1, but using an "in-register" extension.
+define void @f2(i128 *%aptr, i64 %b) {
+; CHECK: f2:
+; CHECK: slgfr {{%r[0-5]}}, %r3
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %trunc = trunc i64 %b to i32
+ %bext = zext i32 %trunc to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test register addition in cases where the second operand is zero extended
+; from i64 rather than i32, but is later masked to i32 range.
+define void @f3(i128 *%aptr, i64 %b) {
+; CHECK: f3:
+; CHECK: slgfr {{%r[0-5]}}, %r3
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %bext = zext i64 %b to i128
+ %and = and i128 %bext, 4294967295
+ %sub = sub i128 %xor, %and
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Test SLGF with no offset.
+define void @f4(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f4:
+; CHECK: slgf {{%r[0-5]}}, 0(%r3)
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %b = load i32 *%bsrc
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Check the high end of the SLGF range.
+define void @f5(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f5:
+; CHECK: slgf {{%r[0-5]}}, 524284(%r3)
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i64 131071
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Check the next word up, which must use separate address logic.
+; Other sequences besides this one would be OK.
+define void @f6(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: slgf {{%r[0-5]}}, 0(%r3)
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i64 131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Check the high end of the negative aligned SLGF range.
+define void @f7(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f7:
+; CHECK: slgf {{%r[0-5]}}, -4(%r3)
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i128 -1
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Check the low end of the SLGF range.
+define void @f8(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f8:
+; CHECK: slgf {{%r[0-5]}}, -524288(%r3)
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i128 -131072
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f9(i128 *%aptr, i32 *%bsrc) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: slgf {{%r[0-5]}}, 0(%r3)
+; CHECK: slbgr
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %ptr = getelementptr i32 *%bsrc, i128 -131073
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
+
+; Check that SLGF allows an index.
+define void @f10(i128 *%aptr, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: slgf {{%r[0-5]}}, 524284({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %a = load i128 *%aptr
+ %xor = xor i128 %a, 127
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524284
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %bext = zext i32 %b to i128
+ %sub = sub i128 %xor, %bext
+ store i128 %sub, i128 *%aptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/la-01.ll b/test/CodeGen/SystemZ/la-01.ll
new file mode 100644
index 000000000000..b43e3f8662dc
--- /dev/null
+++ b/test/CodeGen/SystemZ/la-01.ll
@@ -0,0 +1,80 @@
+; Test loads of symbolic addresses when generating small-model non-PIC.
+; All addresses can be treated as PC
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+@e4 = external global i32
+@d4 = global i32 1
+@e2 = external global i32, align 2
+@d2 = global i32 1, align 2
+@e1 = external global i32, align 1
+@d1 = global i32 1, align 1
+
+declare void @ef()
+define void @df() {
+ ret void
+}
+
+; Test a load of a fully-aligned external variable.
+define i32 *@f1() {
+; CHECK: f1:
+; CHECK: larl %r2, e4
+; CHECK-NEXT: br %r14
+ ret i32 *@e4
+}
+
+; Test a load of a fully-aligned local variable.
+define i32 *@f2() {
+; CHECK: f2:
+; CHECK: larl %r2, d4
+; CHECK-NEXT: br %r14
+ ret i32 *@d4
+}
+
+; Test a load of a 2-byte-aligned external variable.
+define i32 *@f3() {
+; CHECK: f3:
+; CHECK: larl %r2, e2
+; CHECK-NEXT: br %r14
+ ret i32 *@e2
+}
+
+; Test a load of a 2-byte-aligned local variable.
+define i32 *@f4() {
+; CHECK: f4:
+; CHECK: larl %r2, d2
+; CHECK-NEXT: br %r14
+ ret i32 *@d2
+}
+
+; Test a load of an unaligned external variable, which must go via the GOT.
+define i32 *@f5() {
+; CHECK: f5:
+; CHECK: lgrl %r2, e1@GOT
+; CHECK-NEXT: br %r14
+ ret i32 *@e1
+}
+
+; Test a load of an unaligned local variable, which must go via the GOT.
+define i32 *@f6() {
+; CHECK: f6:
+; CHECK: lgrl %r2, d1@GOT
+; CHECK-NEXT: br %r14
+ ret i32 *@d1
+}
+
+; Test a load of an external function.
+define void() *@f7() {
+; CHECK: f7:
+; CHECK: larl %r2, ef
+; CHECK-NEXT: br %r14
+ ret void() *@ef
+}
+
+; Test a load of a local function.
+define void() *@f8() {
+; CHECK: f8:
+; CHECK: larl %r2, df
+; CHECK-NEXT: br %r14
+ ret void() *@df
+}
diff --git a/test/CodeGen/SystemZ/la-02.ll b/test/CodeGen/SystemZ/la-02.ll
new file mode 100644
index 000000000000..4c5374a0925b
--- /dev/null
+++ b/test/CodeGen/SystemZ/la-02.ll
@@ -0,0 +1,87 @@
+; Test loads of symbolic addresses when generating medium- and
+; large-model non-PIC.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -code-model=medium | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -code-model=large | FileCheck %s
+
+@ev = external global i32
+@dv = global i32 0
+@pv = protected global i32 0
+@hv = hidden global i32 0
+
+declare void @ef()
+define void @df() {
+ ret void
+}
+define protected void @pf() {
+ ret void
+}
+define hidden void @hf() {
+ ret void
+}
+
+; Test loads of external variables. There is no guarantee that the
+; variable will be in range of LARL.
+define i32 *@f1() {
+; CHECK: f1:
+; CHECK: lgrl %r2, ev@GOT
+; CHECK: br %r14
+ ret i32 *@ev
+}
+
+; ...likewise locally-defined normal-visibility variables.
+define i32 *@f2() {
+; CHECK: f2:
+; CHECK: lgrl %r2, dv@GOT
+; CHECK: br %r14
+ ret i32 *@dv
+}
+
+; ...likewise protected variables.
+define i32 *@f3() {
+; CHECK: f3:
+; CHECK: lgrl %r2, pv@GOT
+; CHECK: br %r14
+ ret i32 *@pv
+}
+
+; ...likewise hidden variables.
+define i32 *@f4() {
+; CHECK: f4:
+; CHECK: lgrl %r2, hv@GOT
+; CHECK: br %r14
+ ret i32 *@hv
+}
+
+; Check loads of external functions. This could use LARL, but we don't have
+; code to detect that yet.
+define void() *@f5() {
+; CHECK: f5:
+; CHECK: lgrl %r2, ef@GOT
+; CHECK: br %r14
+ ret void() *@ef
+}
+
+; ...likewise locally-defined normal-visibility functions.
+define void() *@f6() {
+; CHECK: f6:
+; CHECK: lgrl %r2, df@GOT
+; CHECK: br %r14
+ ret void() *@df
+}
+
+; ...likewise protected functions.
+define void() *@f7() {
+; CHECK: f7:
+; CHECK: lgrl %r2, pf@GOT
+; CHECK: br %r14
+ ret void() *@pf
+}
+
+; ...likewise hidden functions.
+define void() *@f8() {
+; CHECK: f8:
+; CHECK: lgrl %r2, hf@GOT
+; CHECK: br %r14
+ ret void() *@hf
+}
diff --git a/test/CodeGen/SystemZ/la-03.ll b/test/CodeGen/SystemZ/la-03.ll
new file mode 100644
index 000000000000..9449b2bfbec0
--- /dev/null
+++ b/test/CodeGen/SystemZ/la-03.ll
@@ -0,0 +1,85 @@
+; Test loads of symbolic addresses in PIC code.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -relocation-model=pic | FileCheck %s
+
+@ev = external global i32
+@dv = global i32 0
+@pv = protected global i32 0
+@hv = hidden global i32 0
+
+declare void @ef()
+define void @df() {
+ ret void
+}
+define protected void @pf() {
+ ret void
+}
+define hidden void @hf() {
+ ret void
+}
+
+; Test loads of external variables, which must go via the GOT.
+define i32 *@f1() {
+; CHECK: f1:
+; CHECK: lgrl %r2, ev@GOT
+; CHECK: br %r14
+ ret i32 *@ev
+}
+
+; Check loads of locally-defined normal-visibility variables, which might
+; be overridden. The load must go via the GOT.
+define i32 *@f2() {
+; CHECK: f2:
+; CHECK: lgrl %r2, dv@GOT
+; CHECK: br %r14
+ ret i32 *@dv
+}
+
+; Check loads of protected variables, which in the small code model
+; must be in range of LARL.
+define i32 *@f3() {
+; CHECK: f3:
+; CHECK: larl %r2, pv
+; CHECK: br %r14
+ ret i32 *@pv
+}
+
+; ...likewise hidden variables.
+define i32 *@f4() {
+; CHECK: f4:
+; CHECK: larl %r2, hv
+; CHECK: br %r14
+ ret i32 *@hv
+}
+
+; Like f1, but for functions.
+define void() *@f5() {
+; CHECK: f5:
+; CHECK: lgrl %r2, ef@GOT
+; CHECK: br %r14
+ ret void() *@ef
+}
+
+; Like f2, but for functions.
+define void() *@f6() {
+; CHECK: f6:
+; CHECK: lgrl %r2, df@GOT
+; CHECK: br %r14
+ ret void() *@df
+}
+
+; Like f3, but for functions.
+define void() *@f7() {
+; CHECK: f7:
+; CHECK: larl %r2, pf
+; CHECK: br %r14
+ ret void() *@pf
+}
+
+; Like f4, but for functions.
+define void() *@f8() {
+; CHECK: f8:
+; CHECK: larl %r2, hf
+; CHECK: br %r14
+ ret void() *@hf
+}
diff --git a/test/CodeGen/SystemZ/la-04.ll b/test/CodeGen/SystemZ/la-04.ll
new file mode 100644
index 000000000000..4c3636481e7d
--- /dev/null
+++ b/test/CodeGen/SystemZ/la-04.ll
@@ -0,0 +1,18 @@
+; Test blockaddress.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Do some arbitrary work and return the address of the following label.
+define i8 *@f1(i8 *%addr) {
+; CHECK: f1:
+; CHECK: mvi 0(%r2), 1
+; CHECK: [[LABEL:\.L.*]]:
+; CHECK: larl %r2, [[LABEL]]
+; CHECK: br %r14
+entry:
+ store i8 1, i8 *%addr
+ br label %b.lab
+
+b.lab:
+ ret i8 *blockaddress(@f1, %b.lab)
+}
diff --git a/test/CodeGen/SystemZ/lit.local.cfg b/test/CodeGen/SystemZ/lit.local.cfg
new file mode 100644
index 000000000000..79528d178f23
--- /dev/null
+++ b/test/CodeGen/SystemZ/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'SystemZ' in targets:
+ config.unsupported = True
+
diff --git a/test/CodeGen/SystemZ/or-01.ll b/test/CodeGen/SystemZ/or-01.ll
new file mode 100644
index 000000000000..20c93129efca
--- /dev/null
+++ b/test/CodeGen/SystemZ/or-01.ll
@@ -0,0 +1,129 @@
+; Test 32-bit ORs in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check OR.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: or %r2, %r3
+; CHECK: br %r14
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the low end of the O range.
+define i32 @f2(i32 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: o %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the high end of the aligned O range.
+define i32 @f3(i32 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: o %r2, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the next word up, which should use OY instead of O.
+define i32 @f4(i32 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: oy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the high end of the aligned OY range.
+define i32 @f5(i32 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: oy %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: o %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the high end of the negative aligned OY range.
+define i32 @f7(i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: oy %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the low end of the OY range.
+define i32 @f8(i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: oy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: o %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check that O allows an index.
+define i32 @f10(i32 %a, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: o %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
+
+; Check that OY allows an index.
+define i32 @f11(i32 %a, i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: oy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %or = or i32 %a, %b
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/or-02.ll b/test/CodeGen/SystemZ/or-02.ll
new file mode 100644
index 000000000000..377a3e604c60
--- /dev/null
+++ b/test/CodeGen/SystemZ/or-02.ll
@@ -0,0 +1,66 @@
+; Test 32-bit ORs in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful OILL value.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: oill %r2, 1
+; CHECK: br %r14
+ %or = or i32 %a, 1
+ ret i32 %or
+}
+
+; Check the high end of the OILL range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: oill %r2, 65535
+; CHECK: br %r14
+ %or = or i32 %a, 65535
+ ret i32 %or
+}
+
+; Check the lowest useful OILH range, which is the next value up.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK: oilh %r2, 1
+; CHECK: br %r14
+ %or = or i32 %a, 65536
+ ret i32 %or
+}
+
+; Check the lowest useful OILF value, which is the next value up again.
+define i32 @f4(i32 %a) {
+; CHECK: f4:
+; CHECK: oilf %r2, 65537
+; CHECK: br %r14
+ %or = or i32 %a, 65537
+ ret i32 %or
+}
+
+; Check the high end of the OILH range.
+define i32 @f5(i32 %a) {
+; CHECK: f5:
+; CHECK: oilh %r2, 65535
+; CHECK: br %r14
+ %or = or i32 %a, -65536
+ ret i32 %or
+}
+
+; Check the next value up, which must use OILF instead.
+define i32 @f6(i32 %a) {
+; CHECK: f6:
+; CHECK: oilf %r2, 4294901761
+; CHECK: br %r14
+ %or = or i32 %a, -65535
+ ret i32 %or
+}
+
+; Check the highest useful OILF value.
+define i32 @f7(i32 %a) {
+; CHECK: f7:
+; CHECK: oilf %r2, 4294967294
+; CHECK: br %r14
+ %or = or i32 %a, -2
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/or-03.ll b/test/CodeGen/SystemZ/or-03.ll
new file mode 100644
index 000000000000..16f84f1635a8
--- /dev/null
+++ b/test/CodeGen/SystemZ/or-03.ll
@@ -0,0 +1,94 @@
+; Test 64-bit ORs in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check OGR.
+define i64 @f1(i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK: ogr %r2, %r3
+; CHECK: br %r14
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check OG with no displacement.
+define i64 @f2(i64 %a, i64 *%src) {
+; CHECK: f2:
+; CHECK: og %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check the high end of the aligned OG range.
+define i64 @f3(i64 %a, i64 *%src) {
+; CHECK: f3:
+; CHECK: og %r2, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: og %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check the high end of the negative aligned OG range.
+define i64 @f5(i64 %a, i64 *%src) {
+; CHECK: f5:
+; CHECK: og %r2, -8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check the low end of the OG range.
+define i64 @f6(i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK: og %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: og %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %or = or i64 %a, %b
+ ret i64 %or
+}
+
+; Check that OG allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: og %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %or = or i64 %a, %b
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/or-04.ll b/test/CodeGen/SystemZ/or-04.ll
new file mode 100644
index 000000000000..a8278423981a
--- /dev/null
+++ b/test/CodeGen/SystemZ/or-04.ll
@@ -0,0 +1,182 @@
+; Test 64-bit ORs in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful OILL value.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: oill %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 1
+ ret i64 %or
+}
+
+; Check the high end of the OILL range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: oill %r2, 65535
+; CHECK: br %r14
+ %or = or i64 %a, 65535
+ ret i64 %or
+}
+
+; Check the lowest useful OILH value, which is the next value up.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: oilh %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 65536
+ ret i64 %or
+}
+
+; Check the lowest useful OILF value, which is the next value up again.
+define i64 @f4(i64 %a) {
+; CHECK: f4:
+; CHECK: oilf %r2, 4294901759
+; CHECK: br %r14
+ %or = or i64 %a, 4294901759
+ ret i64 %or
+}
+
+; Check the high end of the OILH range.
+define i64 @f5(i64 %a) {
+; CHECK: f5:
+; CHECK: oilh %r2, 65535
+; CHECK: br %r14
+ %or = or i64 %a, 4294901760
+ ret i64 %or
+}
+
+; Check the high end of the OILF range.
+define i64 @f6(i64 %a) {
+; CHECK: f6:
+; CHECK: oilf %r2, 4294967295
+; CHECK: br %r14
+ %or = or i64 %a, 4294967295
+ ret i64 %or
+}
+
+; Check the lowest useful OIHL value, which is the next value up.
+define i64 @f7(i64 %a) {
+; CHECK: f7:
+; CHECK: oihl %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 4294967296
+ ret i64 %or
+}
+
+; Check the next value up again, which must use two ORs.
+define i64 @f8(i64 %a) {
+; CHECK: f8:
+; CHECK: oihl %r2, 1
+; CHECK: oill %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 4294967297
+ ret i64 %or
+}
+
+; Check the high end of the OILL range.
+define i64 @f9(i64 %a) {
+; CHECK: f9:
+; CHECK: oihl %r2, 1
+; CHECK: oill %r2, 65535
+; CHECK: br %r14
+ %or = or i64 %a, 4295032831
+ ret i64 %or
+}
+
+; Check the next value up, which must use OILH
+define i64 @f10(i64 %a) {
+; CHECK: f10:
+; CHECK: oihl %r2, 1
+; CHECK: oilh %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 4295032832
+ ret i64 %or
+}
+
+; Check the next value up again, which must use OILF
+define i64 @f11(i64 %a) {
+; CHECK: f11:
+; CHECK: oihl %r2, 1
+; CHECK: oilf %r2, 65537
+; CHECK: br %r14
+ %or = or i64 %a, 4295032833
+ ret i64 %or
+}
+
+; Check the high end of the OIHL range.
+define i64 @f12(i64 %a) {
+; CHECK: f12:
+; CHECK: oihl %r2, 65535
+; CHECK: br %r14
+ %or = or i64 %a, 281470681743360
+ ret i64 %or
+}
+
+; Check a combination of the high end of the OIHL range and the high end
+; of the OILF range.
+define i64 @f13(i64 %a) {
+; CHECK: f13:
+; CHECK: oihl %r2, 65535
+; CHECK: oilf %r2, 4294967295
+; CHECK: br %r14
+ %or = or i64 %a, 281474976710655
+ ret i64 %or
+}
+
+; Check the lowest useful OIHH value.
+define i64 @f14(i64 %a) {
+; CHECK: f14:
+; CHECK: oihh %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 281474976710656
+ ret i64 %or
+}
+
+; Check the next value up, which needs two ORs.
+define i64 @f15(i64 %a) {
+; CHECK: f15:
+; CHECK: oihh %r2, 1
+; CHECK: oill %r2, 1
+; CHECK: br %r14
+ %or = or i64 %a, 281474976710657
+ ret i64 %or
+}
+
+; Check the lowest useful OIHF value.
+define i64 @f16(i64 %a) {
+; CHECK: f16:
+; CHECK: oihf %r2, 65537
+; CHECK: br %r14
+ %or = or i64 %a, 281479271677952
+ ret i64 %or
+}
+
+; Check the high end of the OIHH range.
+define i64 @f17(i64 %a) {
+; CHECK: f17:
+; CHECK: oihh %r2, 65535
+; CHECK: br %r14
+ %or = or i64 %a, 18446462598732840960
+ ret i64 %or
+}
+
+; Check the high end of the OIHF range.
+define i64 @f18(i64 %a) {
+; CHECK: f18:
+; CHECK: oihf %r2, 4294967295
+; CHECK: br %r14
+ %or = or i64 %a, -4294967296
+ ret i64 %or
+}
+
+; Check the highest useful OR value.
+define i64 @f19(i64 %a) {
+; CHECK: f19:
+; CHECK: oihf %r2, 4294967295
+; CHECK: oilf %r2, 4294967294
+; CHECK: br %r14
+ %or = or i64 %a, -2
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/or-05.ll b/test/CodeGen/SystemZ/or-05.ll
new file mode 100644
index 000000000000..9b6c10d4b5ce
--- /dev/null
+++ b/test/CodeGen/SystemZ/or-05.ll
@@ -0,0 +1,165 @@
+; Test ORs of a constant into a byte of memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful constant, expressed as a signed integer.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: oi 0(%r2), 1
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %or = or i8 %val, -255
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the highest useful constant, expressed as a signed integer.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %or = or i8 %val, -2
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the lowest useful constant, expressed as an unsigned integer.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: oi 0(%r2), 1
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %or = or i8 %val, 1
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the highest useful constant, expressed as a unsigned integer.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %or = or i8 %val, 254
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the OI range.
+define void @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: oi 4095(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which should use OIY instead of OI.
+define void @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: oiy 4096(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the OIY range.
+define void @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: oiy 524287(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, 524288
+; CHECK: oi 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the negative OIY range.
+define void @f9(i8 *%src) {
+; CHECK: f9:
+; CHECK: oiy -1(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the low end of the OIY range.
+define void @f10(i8 *%src) {
+; CHECK: f10:
+; CHECK: oiy -524288(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f11(i8 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r2, -524289
+; CHECK: oi 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check that OI does not allow an index
+define void @f12(i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: agr %r2, %r3
+; CHECK: oi 4095(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
+
+; Check that OIY does not allow an index
+define void @f13(i64 %src, i64 %index) {
+; CHECK: f13:
+; CHECK: agr %r2, %r3
+; CHECK: oiy 4096(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %or = or i8 %val, 127
+ store i8 %or, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/or-06.ll b/test/CodeGen/SystemZ/or-06.ll
new file mode 100644
index 000000000000..a24a18a191f1
--- /dev/null
+++ b/test/CodeGen/SystemZ/or-06.ll
@@ -0,0 +1,108 @@
+; Test that we can use OI for byte operations that are expressed as i32
+; or i64 operations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Zero extension to 32 bits, negative constant.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %or = or i32 %ext, -2
+ %trunc = trunc i32 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 64 bits, negative constant.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %or = or i64 %ext, -2
+ %trunc = trunc i64 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 32 bits, positive constant.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %or = or i32 %ext, 254
+ %trunc = trunc i32 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 64 bits, positive constant.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %or = or i64 %ext, 254
+ %trunc = trunc i64 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 32 bits, negative constant.
+define void @f5(i8 *%ptr) {
+; CHECK: f5:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %or = or i32 %ext, -2
+ %trunc = trunc i32 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 64 bits, negative constant.
+define void @f6(i8 *%ptr) {
+; CHECK: f6:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %or = or i64 %ext, -2
+ %trunc = trunc i64 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 32 bits, positive constant.
+define void @f7(i8 *%ptr) {
+; CHECK: f7:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %or = or i32 %ext, 254
+ %trunc = trunc i32 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 64 bits, positive constant.
+define void @f8(i8 *%ptr) {
+; CHECK: f8:
+; CHECK: oi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %or = or i64 %ext, 254
+ %trunc = trunc i64 %or to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/shift-01.ll b/test/CodeGen/SystemZ/shift-01.ll
new file mode 100644
index 000000000000..e5a459aaa828
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-01.ll
@@ -0,0 +1,114 @@
+; Test 32-bit shifts left.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the SLL range.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: sll %r2, 1
+; CHECK: br %r14
+ %shift = shl i32 %a, 1
+ ret i32 %shift
+}
+
+; Check the high end of the defined SLL range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: sll %r2, 31
+; CHECK: br %r14
+ %shift = shl i32 %a, 31
+ ret i32 %shift
+}
+
+; We don't generate shifts by out-of-range values.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK-NOT: sll %r2, 32
+; CHECK: br %r14
+ %shift = shl i32 %a, 32
+ ret i32 %shift
+}
+
+; Make sure that we don't generate negative shift amounts.
+define i32 @f4(i32 %a, i32 %amt) {
+; CHECK: f4:
+; CHECK-NOT: sll %r2, -1{{.*}}
+; CHECK: br %r14
+ %sub = sub i32 %amt, 1
+ %shift = shl i32 %a, %sub
+ ret i32 %shift
+}
+
+; Check variable shifts.
+define i32 @f5(i32 %a, i32 %amt) {
+; CHECK: f5:
+; CHECK: sll %r2, 0(%r3)
+; CHECK: br %r14
+ %shift = shl i32 %a, %amt
+ ret i32 %shift
+}
+
+; Check shift amounts that have a constant term.
+define i32 @f6(i32 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: sll %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %shift = shl i32 %a, %add
+ ret i32 %shift
+}
+
+; ...and again with a truncated 64-bit shift amount.
+define i32 @f7(i32 %a, i64 %amt) {
+; CHECK: f7:
+; CHECK: sll %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %trunc = trunc i64 %add to i32
+ %shift = shl i32 %a, %trunc
+ ret i32 %shift
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i32 @f8(i32 %a, i32 %amt) {
+; CHECK: f8:
+; CHECK: sll %r2, 4095(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 4095
+ %shift = shl i32 %a, %add
+ ret i32 %shift
+}
+
+; Check the next value up. Again, we could mask the amount instead.
+define i32 @f9(i32 %a, i32 %amt) {
+; CHECK: f9:
+; CHECK: ahi %r3, 4096
+; CHECK: sll %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 4096
+ %shift = shl i32 %a, %add
+ ret i32 %shift
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i32 @f10(i32 %a, i32 %b, i32 %c) {
+; CHECK: f10:
+; CHECK: ar {{%r3, %r4|%r4, %r3}}
+; CHECK: sll %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i32 %b, %c
+ %shift = shl i32 %a, %add
+ ret i32 %shift
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i32 @f11(i32 %a, i32 *%ptr) {
+; CHECK: f11:
+; CHECK: l %r1, 0(%r3)
+; CHECK: sll %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i32 *%ptr
+ %shift = shl i32 %a, %amt
+ ret i32 %shift
+}
diff --git a/test/CodeGen/SystemZ/shift-02.ll b/test/CodeGen/SystemZ/shift-02.ll
new file mode 100644
index 000000000000..38093a8ff7a0
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-02.ll
@@ -0,0 +1,114 @@
+; Test 32-bit logical shifts right.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the SRL range.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: srl %r2, 1
+; CHECK: br %r14
+ %shift = lshr i32 %a, 1
+ ret i32 %shift
+}
+
+; Check the high end of the defined SRL range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: srl %r2, 31
+; CHECK: br %r14
+ %shift = lshr i32 %a, 31
+ ret i32 %shift
+}
+
+; We don't generate shifts by out-of-range values.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK-NOT: srl %r2, 32
+; CHECK: br %r14
+ %shift = lshr i32 %a, 32
+ ret i32 %shift
+}
+
+; Make sure that we don't generate negative shift amounts.
+define i32 @f4(i32 %a, i32 %amt) {
+; CHECK: f4:
+; CHECK-NOT: srl %r2, -1{{.*}}
+; CHECK: br %r14
+ %sub = sub i32 %amt, 1
+ %shift = lshr i32 %a, %sub
+ ret i32 %shift
+}
+
+; Check variable shifts.
+define i32 @f5(i32 %a, i32 %amt) {
+; CHECK: f5:
+; CHECK: srl %r2, 0(%r3)
+; CHECK: br %r14
+ %shift = lshr i32 %a, %amt
+ ret i32 %shift
+}
+
+; Check shift amounts that have a constant term.
+define i32 @f6(i32 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: srl %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %shift = lshr i32 %a, %add
+ ret i32 %shift
+}
+
+; ...and again with a truncated 64-bit shift amount.
+define i32 @f7(i32 %a, i64 %amt) {
+; CHECK: f7:
+; CHECK: srl %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %trunc = trunc i64 %add to i32
+ %shift = lshr i32 %a, %trunc
+ ret i32 %shift
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i32 @f8(i32 %a, i32 %amt) {
+; CHECK: f8:
+; CHECK: srl %r2, 4095(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 4095
+ %shift = lshr i32 %a, %add
+ ret i32 %shift
+}
+
+; Check the next value up. Again, we could mask the amount instead.
+define i32 @f9(i32 %a, i32 %amt) {
+; CHECK: f9:
+; CHECK: ahi %r3, 4096
+; CHECK: srl %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 4096
+ %shift = lshr i32 %a, %add
+ ret i32 %shift
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i32 @f10(i32 %a, i32 %b, i32 %c) {
+; CHECK: f10:
+; CHECK: ar {{%r3, %r4|%r4, %r3}}
+; CHECK: srl %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i32 %b, %c
+ %shift = lshr i32 %a, %add
+ ret i32 %shift
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i32 @f11(i32 %a, i32 *%ptr) {
+; CHECK: f11:
+; CHECK: l %r1, 0(%r3)
+; CHECK: srl %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i32 *%ptr
+ %shift = lshr i32 %a, %amt
+ ret i32 %shift
+}
diff --git a/test/CodeGen/SystemZ/shift-03.ll b/test/CodeGen/SystemZ/shift-03.ll
new file mode 100644
index 000000000000..ca510f3c429b
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-03.ll
@@ -0,0 +1,114 @@
+; Test 32-bit arithmetic shifts right.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the SRA range.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: sra %r2, 1
+; CHECK: br %r14
+ %shift = ashr i32 %a, 1
+ ret i32 %shift
+}
+
+; Check the high end of the defined SRA range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: sra %r2, 31
+; CHECK: br %r14
+ %shift = ashr i32 %a, 31
+ ret i32 %shift
+}
+
+; We don't generate shifts by out-of-range values.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK-NOT: sra %r2, 32
+; CHECK: br %r14
+ %shift = ashr i32 %a, 32
+ ret i32 %shift
+}
+
+; Make sure that we don't generate negative shift amounts.
+define i32 @f4(i32 %a, i32 %amt) {
+; CHECK: f4:
+; CHECK-NOT: sra %r2, -1{{.*}}
+; CHECK: br %r14
+ %sub = sub i32 %amt, 1
+ %shift = ashr i32 %a, %sub
+ ret i32 %shift
+}
+
+; Check variable shifts.
+define i32 @f5(i32 %a, i32 %amt) {
+; CHECK: f5:
+; CHECK: sra %r2, 0(%r3)
+; CHECK: br %r14
+ %shift = ashr i32 %a, %amt
+ ret i32 %shift
+}
+
+; Check shift amounts that have a constant term.
+define i32 @f6(i32 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: sra %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %shift = ashr i32 %a, %add
+ ret i32 %shift
+}
+
+; ...and again with a truncated 64-bit shift amount.
+define i32 @f7(i32 %a, i64 %amt) {
+; CHECK: f7:
+; CHECK: sra %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %trunc = trunc i64 %add to i32
+ %shift = ashr i32 %a, %trunc
+ ret i32 %shift
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i32 @f8(i32 %a, i32 %amt) {
+; CHECK: f8:
+; CHECK: sra %r2, 4095(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 4095
+ %shift = ashr i32 %a, %add
+ ret i32 %shift
+}
+
+; Check the next value up. Again, we could mask the amount instead.
+define i32 @f9(i32 %a, i32 %amt) {
+; CHECK: f9:
+; CHECK: ahi %r3, 4096
+; CHECK: sra %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 4096
+ %shift = ashr i32 %a, %add
+ ret i32 %shift
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i32 @f10(i32 %a, i32 %b, i32 %c) {
+; CHECK: f10:
+; CHECK: ar {{%r3, %r4|%r4, %r3}}
+; CHECK: sra %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i32 %b, %c
+ %shift = ashr i32 %a, %add
+ ret i32 %shift
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i32 @f11(i32 %a, i32 *%ptr) {
+; CHECK: f11:
+; CHECK: l %r1, 0(%r3)
+; CHECK: sra %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i32 *%ptr
+ %shift = ashr i32 %a, %amt
+ ret i32 %shift
+}
diff --git a/test/CodeGen/SystemZ/shift-04.ll b/test/CodeGen/SystemZ/shift-04.ll
new file mode 100644
index 000000000000..0146a86ee062
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-04.ll
@@ -0,0 +1,189 @@
+; Test 32-bit rotates left.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the RLL range.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: rll %r2, %r2, 1
+; CHECK: br %r14
+ %parta = shl i32 %a, 1
+ %partb = lshr i32 %a, 31
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check the high end of the defined RLL range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: rll %r2, %r2, 31
+; CHECK: br %r14
+ %parta = shl i32 %a, 31
+ %partb = lshr i32 %a, 1
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; We don't generate shifts by out-of-range values.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK-NOT: rll
+; CHECK: br %r14
+ %parta = shl i32 %a, 32
+ %partb = lshr i32 %a, 0
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check variable shifts.
+define i32 @f4(i32 %a, i32 %amt) {
+; CHECK: f4:
+; CHECK: rll %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %amtb = sub i32 32, %amt
+ %parta = shl i32 %a, %amt
+ %partb = lshr i32 %a, %amtb
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check shift amounts that have a constant term.
+define i32 @f5(i32 %a, i32 %amt) {
+; CHECK: f5:
+; CHECK: rll %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %sub = sub i32 32, %add
+ %parta = shl i32 %a, %add
+ %partb = lshr i32 %a, %sub
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; ...and again with a truncated 64-bit shift amount.
+define i32 @f6(i32 %a, i64 %amt) {
+; CHECK: f6:
+; CHECK: rll %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %addtrunc = trunc i64 %add to i32
+ %sub = sub i32 32, %addtrunc
+ %parta = shl i32 %a, %addtrunc
+ %partb = lshr i32 %a, %sub
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; ...and again with a different truncation representation.
+define i32 @f7(i32 %a, i64 %amt) {
+; CHECK: f7:
+; CHECK: rll %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %sub = sub i64 32, %add
+ %addtrunc = trunc i64 %add to i32
+ %subtrunc = trunc i64 %sub to i32
+ %parta = shl i32 %a, %addtrunc
+ %partb = lshr i32 %a, %subtrunc
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i32 @f8(i32 %a, i32 %amt) {
+; CHECK: f8:
+; CHECK: rll %r2, %r2, 524287(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 524287
+ %sub = sub i32 32, %add
+ %parta = shl i32 %a, %add
+ %partb = lshr i32 %a, %sub
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check the next value up, which without masking must use a separate
+; addition.
+define i32 @f9(i32 %a, i32 %amt) {
+; CHECK: f9:
+; CHECK: afi %r3, 524288
+; CHECK: rll %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 524288
+ %sub = sub i32 32, %add
+ %parta = shl i32 %a, %add
+ %partb = lshr i32 %a, %sub
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check cases where 1 is subtracted from the shift amount.
+define i32 @f10(i32 %a, i32 %amt) {
+; CHECK: f10:
+; CHECK: rll %r2, %r2, -1(%r3)
+; CHECK: br %r14
+ %suba = sub i32 %amt, 1
+ %subb = sub i32 32, %suba
+ %parta = shl i32 %a, %suba
+ %partb = lshr i32 %a, %subb
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check the lowest value that can be subtracted from the shift amount.
+; Again, we could mask the shift amount instead.
+define i32 @f11(i32 %a, i32 %amt) {
+; CHECK: f11:
+; CHECK: rll %r2, %r2, -524288(%r3)
+; CHECK: br %r14
+ %suba = sub i32 %amt, 524288
+ %subb = sub i32 32, %suba
+ %parta = shl i32 %a, %suba
+ %partb = lshr i32 %a, %subb
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check the next value down, which without masking must use a separate
+; addition.
+define i32 @f12(i32 %a, i32 %amt) {
+; CHECK: f12:
+; CHECK: afi %r3, -524289
+; CHECK: rll %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %suba = sub i32 %amt, 524289
+ %subb = sub i32 32, %suba
+ %parta = shl i32 %a, %suba
+ %partb = lshr i32 %a, %subb
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i32 @f13(i32 %a, i32 %b, i32 %c) {
+; CHECK: f13:
+; CHECK: ar {{%r3, %r4|%r4, %r3}}
+; CHECK: rll %r2, %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i32 %b, %c
+ %sub = sub i32 32, %add
+ %parta = shl i32 %a, %add
+ %partb = lshr i32 %a, %sub
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i32 @f14(i32 %a, i32 *%ptr) {
+; CHECK: f14:
+; CHECK: l %r1, 0(%r3)
+; CHECK: rll %r2, %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i32 *%ptr
+ %amtb = sub i32 32, %amt
+ %parta = shl i32 %a, %amt
+ %partb = lshr i32 %a, %amtb
+ %or = or i32 %parta, %partb
+ ret i32 %or
+}
diff --git a/test/CodeGen/SystemZ/shift-05.ll b/test/CodeGen/SystemZ/shift-05.ll
new file mode 100644
index 000000000000..8c0ca9381bcb
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-05.ll
@@ -0,0 +1,149 @@
+; Test 32-bit shifts left.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the SLLG range.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: sllg %r2, %r2, 1
+; CHECK: br %r14
+ %shift = shl i64 %a, 1
+ ret i64 %shift
+}
+
+; Check the high end of the defined SLLG range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: sllg %r2, %r2, 63
+; CHECK: br %r14
+ %shift = shl i64 %a, 63
+ ret i64 %shift
+}
+
+; We don't generate shifts by out-of-range values.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK-NOT: sllg
+; CHECK: br %r14
+ %shift = shl i64 %a, 64
+ ret i64 %shift
+}
+
+; Check variable shifts.
+define i64 @f4(i64 %a, i64 %amt) {
+; CHECK: f4:
+; CHECK: sllg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %shift = shl i64 %a, %amt
+ ret i64 %shift
+}
+
+; Check shift amounts that have a constant term.
+define i64 @f5(i64 %a, i64 %amt) {
+; CHECK: f5:
+; CHECK: sllg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %shift = shl i64 %a, %add
+ ret i64 %shift
+}
+
+; ...and again with a sign-extended 32-bit shift amount.
+define i64 @f6(i64 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: sllg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %addext = sext i32 %add to i64
+ %shift = shl i64 %a, %addext
+ ret i64 %shift
+}
+
+; ...and now with a zero-extended 32-bit shift amount.
+define i64 @f7(i64 %a, i32 %amt) {
+; CHECK: f7:
+; CHECK: sllg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %addext = zext i32 %add to i64
+ %shift = shl i64 %a, %addext
+ ret i64 %shift
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i64 @f8(i64 %a, i64 %amt) {
+; CHECK: f8:
+; CHECK: sllg %r2, %r2, 524287(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524287
+ %shift = shl i64 %a, %add
+ ret i64 %shift
+}
+
+; Check the next value up, which without masking must use a separate
+; addition.
+define i64 @f9(i64 %a, i64 %amt) {
+; CHECK: f9:
+; CHECK: a{{g?}}fi %r3, 524288
+; CHECK: sllg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524288
+ %shift = shl i64 %a, %add
+ ret i64 %shift
+}
+
+; Check cases where 1 is subtracted from the shift amount.
+define i64 @f10(i64 %a, i64 %amt) {
+; CHECK: f10:
+; CHECK: sllg %r2, %r2, -1(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 1
+ %shift = shl i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check the lowest value that can be subtracted from the shift amount.
+; Again, we could mask the shift amount instead.
+define i64 @f11(i64 %a, i64 %amt) {
+; CHECK: f11:
+; CHECK: sllg %r2, %r2, -524288(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 524288
+ %shift = shl i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check the next value down, which without masking must use a separate
+; addition.
+define i64 @f12(i64 %a, i64 %amt) {
+; CHECK: f12:
+; CHECK: a{{g?}}fi %r3, -524289
+; CHECK: sllg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 524289
+ %shift = shl i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i64 @f13(i64 %a, i64 %b, i64 %c) {
+; CHECK: f13:
+; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}}
+; CHECK: sllg %r2, %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i64 %b, %c
+ %shift = shl i64 %a, %add
+ ret i64 %shift
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i64 @f14(i64 %a, i64 *%ptr) {
+; CHECK: f14:
+; CHECK: l %r1, 4(%r3)
+; CHECK: sllg %r2, %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i64 *%ptr
+ %shift = shl i64 %a, %amt
+ ret i64 %shift
+}
diff --git a/test/CodeGen/SystemZ/shift-06.ll b/test/CodeGen/SystemZ/shift-06.ll
new file mode 100644
index 000000000000..5f600b45a884
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-06.ll
@@ -0,0 +1,149 @@
+; Test 32-bit logical shifts right.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the SRLG range.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: srlg %r2, %r2, 1
+; CHECK: br %r14
+ %shift = lshr i64 %a, 1
+ ret i64 %shift
+}
+
+; Check the high end of the defined SRLG range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: srlg %r2, %r2, 63
+; CHECK: br %r14
+ %shift = lshr i64 %a, 63
+ ret i64 %shift
+}
+
+; We don't generate shifts by out-of-range values.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK-NOT: srlg
+; CHECK: br %r14
+ %shift = lshr i64 %a, 64
+ ret i64 %shift
+}
+
+; Check variable shifts.
+define i64 @f4(i64 %a, i64 %amt) {
+; CHECK: f4:
+; CHECK: srlg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %shift = lshr i64 %a, %amt
+ ret i64 %shift
+}
+
+; Check shift amounts that have a constant term.
+define i64 @f5(i64 %a, i64 %amt) {
+; CHECK: f5:
+; CHECK: srlg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %shift = lshr i64 %a, %add
+ ret i64 %shift
+}
+
+; ...and again with a sign-extended 32-bit shift amount.
+define i64 @f6(i64 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: srlg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %addext = sext i32 %add to i64
+ %shift = lshr i64 %a, %addext
+ ret i64 %shift
+}
+
+; ...and now with a zero-extended 32-bit shift amount.
+define i64 @f7(i64 %a, i32 %amt) {
+; CHECK: f7:
+; CHECK: srlg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %addext = zext i32 %add to i64
+ %shift = lshr i64 %a, %addext
+ ret i64 %shift
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i64 @f8(i64 %a, i64 %amt) {
+; CHECK: f8:
+; CHECK: srlg %r2, %r2, 524287(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524287
+ %shift = lshr i64 %a, %add
+ ret i64 %shift
+}
+
+; Check the next value up, which without masking must use a separate
+; addition.
+define i64 @f9(i64 %a, i64 %amt) {
+; CHECK: f9:
+; CHECK: a{{g?}}fi %r3, 524288
+; CHECK: srlg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524288
+ %shift = lshr i64 %a, %add
+ ret i64 %shift
+}
+
+; Check cases where 1 is subtracted from the shift amount.
+define i64 @f10(i64 %a, i64 %amt) {
+; CHECK: f10:
+; CHECK: srlg %r2, %r2, -1(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 1
+ %shift = lshr i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check the lowest value that can be subtracted from the shift amount.
+; Again, we could mask the shift amount instead.
+define i64 @f11(i64 %a, i64 %amt) {
+; CHECK: f11:
+; CHECK: srlg %r2, %r2, -524288(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 524288
+ %shift = lshr i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check the next value down, which without masking must use a separate
+; addition.
+define i64 @f12(i64 %a, i64 %amt) {
+; CHECK: f12:
+; CHECK: a{{g?}}fi %r3, -524289
+; CHECK: srlg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 524289
+ %shift = lshr i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i64 @f13(i64 %a, i64 %b, i64 %c) {
+; CHECK: f13:
+; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}}
+; CHECK: srlg %r2, %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i64 %b, %c
+ %shift = lshr i64 %a, %add
+ ret i64 %shift
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i64 @f14(i64 %a, i64 *%ptr) {
+; CHECK: f14:
+; CHECK: l %r1, 4(%r3)
+; CHECK: srlg %r2, %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i64 *%ptr
+ %shift = lshr i64 %a, %amt
+ ret i64 %shift
+}
diff --git a/test/CodeGen/SystemZ/shift-07.ll b/test/CodeGen/SystemZ/shift-07.ll
new file mode 100644
index 000000000000..ef583e8f3f0d
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-07.ll
@@ -0,0 +1,149 @@
+; Test 32-bit arithmetic shifts right.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the SRAG range.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: srag %r2, %r2, 1
+; CHECK: br %r14
+ %shift = ashr i64 %a, 1
+ ret i64 %shift
+}
+
+; Check the high end of the defined SRAG range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: srag %r2, %r2, 63
+; CHECK: br %r14
+ %shift = ashr i64 %a, 63
+ ret i64 %shift
+}
+
+; We don't generate shifts by out-of-range values.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK-NOT: srag
+; CHECK: br %r14
+ %shift = ashr i64 %a, 64
+ ret i64 %shift
+}
+
+; Check variable shifts.
+define i64 @f4(i64 %a, i64 %amt) {
+; CHECK: f4:
+; CHECK: srag %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %shift = ashr i64 %a, %amt
+ ret i64 %shift
+}
+
+; Check shift amounts that have a constant term.
+define i64 @f5(i64 %a, i64 %amt) {
+; CHECK: f5:
+; CHECK: srag %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %shift = ashr i64 %a, %add
+ ret i64 %shift
+}
+
+; ...and again with a sign-extended 32-bit shift amount.
+define i64 @f6(i64 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: srag %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %addext = sext i32 %add to i64
+ %shift = ashr i64 %a, %addext
+ ret i64 %shift
+}
+
+; ...and now with a zero-extended 32-bit shift amount.
+define i64 @f7(i64 %a, i32 %amt) {
+; CHECK: f7:
+; CHECK: srag %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %addext = zext i32 %add to i64
+ %shift = ashr i64 %a, %addext
+ ret i64 %shift
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i64 @f8(i64 %a, i64 %amt) {
+; CHECK: f8:
+; CHECK: srag %r2, %r2, 524287(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524287
+ %shift = ashr i64 %a, %add
+ ret i64 %shift
+}
+
+; Check the next value up, which without masking must use a separate
+; addition.
+define i64 @f9(i64 %a, i64 %amt) {
+; CHECK: f9:
+; CHECK: a{{g?}}fi %r3, 524288
+; CHECK: srag %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524288
+ %shift = ashr i64 %a, %add
+ ret i64 %shift
+}
+
+; Check cases where 1 is subtracted from the shift amount.
+define i64 @f10(i64 %a, i64 %amt) {
+; CHECK: f10:
+; CHECK: srag %r2, %r2, -1(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 1
+ %shift = ashr i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check the lowest value that can be subtracted from the shift amount.
+; Again, we could mask the shift amount instead.
+define i64 @f11(i64 %a, i64 %amt) {
+; CHECK: f11:
+; CHECK: srag %r2, %r2, -524288(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 524288
+ %shift = ashr i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check the next value down, which without masking must use a separate
+; addition.
+define i64 @f12(i64 %a, i64 %amt) {
+; CHECK: f12:
+; CHECK: a{{g?}}fi %r3, -524289
+; CHECK: srag %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %sub = sub i64 %amt, 524289
+ %shift = ashr i64 %a, %sub
+ ret i64 %shift
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i64 @f13(i64 %a, i64 %b, i64 %c) {
+; CHECK: f13:
+; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}}
+; CHECK: srag %r2, %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i64 %b, %c
+ %shift = ashr i64 %a, %add
+ ret i64 %shift
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i64 @f14(i64 %a, i64 *%ptr) {
+; CHECK: f14:
+; CHECK: l %r1, 4(%r3)
+; CHECK: srag %r2, %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i64 *%ptr
+ %shift = ashr i64 %a, %amt
+ ret i64 %shift
+}
diff --git a/test/CodeGen/SystemZ/shift-08.ll b/test/CodeGen/SystemZ/shift-08.ll
new file mode 100644
index 000000000000..0688a0671671
--- /dev/null
+++ b/test/CodeGen/SystemZ/shift-08.ll
@@ -0,0 +1,190 @@
+; Test 32-bit rotates left.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the low end of the RLLG range.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: rllg %r2, %r2, 1
+; CHECK: br %r14
+ %parta = shl i64 %a, 1
+ %partb = lshr i64 %a, 63
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check the high end of the defined RLLG range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: rllg %r2, %r2, 63
+; CHECK: br %r14
+ %parta = shl i64 %a, 63
+ %partb = lshr i64 %a, 1
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; We don't generate shifts by out-of-range values.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK-NOT: rllg
+; CHECK: br %r14
+ %parta = shl i64 %a, 64
+ %partb = lshr i64 %a, 0
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check variable shifts.
+define i64 @f4(i64 %a, i64 %amt) {
+; CHECK: f4:
+; CHECK: rllg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %amtb = sub i64 64, %amt
+ %parta = shl i64 %a, %amt
+ %partb = lshr i64 %a, %amtb
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check shift amounts that have a constant term.
+define i64 @f5(i64 %a, i64 %amt) {
+; CHECK: f5:
+; CHECK: rllg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 10
+ %sub = sub i64 64, %add
+ %parta = shl i64 %a, %add
+ %partb = lshr i64 %a, %sub
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; ...and again with a sign-extended 32-bit shift amount.
+define i64 @f6(i64 %a, i32 %amt) {
+; CHECK: f6:
+; CHECK: rllg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %sub = sub i32 64, %add
+ %addext = sext i32 %add to i64
+ %subext = sext i32 %sub to i64
+ %parta = shl i64 %a, %addext
+ %partb = lshr i64 %a, %subext
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; ...and now with a zero-extended 32-bit shift amount.
+define i64 @f7(i64 %a, i32 %amt) {
+; CHECK: f7:
+; CHECK: rllg %r2, %r2, 10(%r3)
+; CHECK: br %r14
+ %add = add i32 %amt, 10
+ %sub = sub i32 64, %add
+ %addext = zext i32 %add to i64
+ %subext = zext i32 %sub to i64
+ %parta = shl i64 %a, %addext
+ %partb = lshr i64 %a, %subext
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check shift amounts that have the largest in-range constant term. We could
+; mask the amount instead.
+define i64 @f8(i64 %a, i64 %amt) {
+; CHECK: f8:
+; CHECK: rllg %r2, %r2, 524287(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524287
+ %sub = sub i64 64, %add
+ %parta = shl i64 %a, %add
+ %partb = lshr i64 %a, %sub
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check the next value up, which without masking must use a separate
+; addition.
+define i64 @f9(i64 %a, i64 %amt) {
+; CHECK: f9:
+; CHECK: a{{g?}}fi %r3, 524288
+; CHECK: rllg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %add = add i64 %amt, 524288
+ %sub = sub i64 64, %add
+ %parta = shl i64 %a, %add
+ %partb = lshr i64 %a, %sub
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check cases where 1 is subtracted from the shift amount.
+define i64 @f10(i64 %a, i64 %amt) {
+; CHECK: f10:
+; CHECK: rllg %r2, %r2, -1(%r3)
+; CHECK: br %r14
+ %suba = sub i64 %amt, 1
+ %subb = sub i64 64, %suba
+ %parta = shl i64 %a, %suba
+ %partb = lshr i64 %a, %subb
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check the lowest value that can be subtracted from the shift amount.
+; Again, we could mask the shift amount instead.
+define i64 @f11(i64 %a, i64 %amt) {
+; CHECK: f11:
+; CHECK: rllg %r2, %r2, -524288(%r3)
+; CHECK: br %r14
+ %suba = sub i64 %amt, 524288
+ %subb = sub i64 64, %suba
+ %parta = shl i64 %a, %suba
+ %partb = lshr i64 %a, %subb
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check the next value down, which without masking must use a separate
+; addition.
+define i64 @f12(i64 %a, i64 %amt) {
+; CHECK: f12:
+; CHECK: a{{g?}}fi %r3, -524289
+; CHECK: rllg %r2, %r2, 0(%r3)
+; CHECK: br %r14
+ %suba = sub i64 %amt, 524289
+ %subb = sub i64 64, %suba
+ %parta = shl i64 %a, %suba
+ %partb = lshr i64 %a, %subb
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check that we don't try to generate "indexed" shifts.
+define i64 @f13(i64 %a, i64 %b, i64 %c) {
+; CHECK: f13:
+; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}}
+; CHECK: rllg %r2, %r2, 0({{%r[34]}})
+; CHECK: br %r14
+ %add = add i64 %b, %c
+ %sub = sub i64 64, %add
+ %parta = shl i64 %a, %add
+ %partb = lshr i64 %a, %sub
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
+
+; Check that the shift amount uses an address register. It cannot be in %r0.
+define i64 @f14(i64 %a, i64 *%ptr) {
+; CHECK: f14:
+; CHECK: l %r1, 4(%r3)
+; CHECK: rllg %r2, %r2, 0(%r1)
+; CHECK: br %r14
+ %amt = load i64 *%ptr
+ %amtb = sub i64 64, %amt
+ %parta = shl i64 %a, %amt
+ %partb = lshr i64 %a, %amtb
+ %or = or i64 %parta, %partb
+ ret i64 %or
+}
diff --git a/test/CodeGen/SystemZ/tls-01.ll b/test/CodeGen/SystemZ/tls-01.ll
new file mode 100644
index 000000000000..49037ad51c69
--- /dev/null
+++ b/test/CodeGen/SystemZ/tls-01.ll
@@ -0,0 +1,22 @@
+; Test initial-exec TLS accesses.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-MAIN
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s -check-prefix=CHECK-CP
+
+@x = thread_local global i32 0
+
+; The offset must be loaded from the constant pool. It doesn't really
+; matter whether we use LARL/AG or LGRL/AGR for the last part.
+define i32 *@foo() {
+; CHECK-CP: .LCP{{.*}}:
+; CHECK-CP: .quad x@NTPOFF
+;
+; CHECK-MAIN: foo:
+; CHECK-MAIN: ear [[HIGH:%r[0-5]]], %a0
+; CHECK-MAIN: sllg %r2, [[HIGH]], 32
+; CHECK-MAIN: ear %r2, %a1
+; CHECK-MAIN: larl %r1, .LCP{{.*}}
+; CHECK-MAIN: ag %r2, 0(%r1)
+; CHECK-MAIN: br %r14
+ ret i32 *@x
+}
diff --git a/test/CodeGen/SystemZ/xor-01.ll b/test/CodeGen/SystemZ/xor-01.ll
new file mode 100644
index 000000000000..30bdbe7901f9
--- /dev/null
+++ b/test/CodeGen/SystemZ/xor-01.ll
@@ -0,0 +1,129 @@
+; Test 32-bit XORs in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check XR.
+define i32 @f1(i32 %a, i32 %b) {
+; CHECK: f1:
+; CHECK: xr %r2, %r3
+; CHECK: br %r14
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the low end of the X range.
+define i32 @f2(i32 %a, i32 *%src) {
+; CHECK: f2:
+; CHECK: x %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i32 *%src
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the high end of the aligned X range.
+define i32 @f3(i32 %a, i32 *%src) {
+; CHECK: f3:
+; CHECK: x %r2, 4092(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1023
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the next word up, which should use XY instead of X.
+define i32 @f4(i32 %a, i32 *%src) {
+; CHECK: f4:
+; CHECK: xy %r2, 4096(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 1024
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the high end of the aligned XY range.
+define i32 @f5(i32 %a, i32 *%src) {
+; CHECK: f5:
+; CHECK: xy %r2, 524284(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131071
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the next word up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f6(i32 %a, i32 *%src) {
+; CHECK: f6:
+; CHECK: agfi %r3, 524288
+; CHECK: x %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 131072
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the high end of the negative aligned XY range.
+define i32 @f7(i32 %a, i32 *%src) {
+; CHECK: f7:
+; CHECK: xy %r2, -4(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -1
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the low end of the XY range.
+define i32 @f8(i32 %a, i32 *%src) {
+; CHECK: f8:
+; CHECK: xy %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131072
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check the next word down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i32 @f9(i32 %a, i32 *%src) {
+; CHECK: f9:
+; CHECK: agfi %r3, -524292
+; CHECK: x %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i32 *%src, i64 -131073
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check that X allows an index.
+define i32 @f10(i32 %a, i64 %src, i64 %index) {
+; CHECK: f10:
+; CHECK: x %r2, 4092({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4092
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
+
+; Check that XY allows an index.
+define i32 @f11(i32 %a, i64 %src, i64 %index) {
+; CHECK: f11:
+; CHECK: xy %r2, 4096({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i32 *
+ %b = load i32 *%ptr
+ %xor = xor i32 %a, %b
+ ret i32 %xor
+}
diff --git a/test/CodeGen/SystemZ/xor-02.ll b/test/CodeGen/SystemZ/xor-02.ll
new file mode 100644
index 000000000000..c2b52b9b8e20
--- /dev/null
+++ b/test/CodeGen/SystemZ/xor-02.ll
@@ -0,0 +1,40 @@
+; Test 32-bit XORs in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful XILF value.
+define i32 @f1(i32 %a) {
+; CHECK: f1:
+; CHECK: xilf %r2, 1
+; CHECK: br %r14
+ %xor = xor i32 %a, 1
+ ret i32 %xor
+}
+
+; Check the high end of the signed range.
+define i32 @f2(i32 %a) {
+; CHECK: f2:
+; CHECK: xilf %r2, 2147483647
+; CHECK: br %r14
+ %xor = xor i32 %a, 2147483647
+ ret i32 %xor
+}
+
+; Check the low end of the signed range, which should be treated
+; as a positive value.
+define i32 @f3(i32 %a) {
+; CHECK: f3:
+; CHECK: xilf %r2, 2147483648
+; CHECK: br %r14
+ %xor = xor i32 %a, -2147483648
+ ret i32 %xor
+}
+
+; Check the high end of the XILF range.
+define i32 @f4(i32 %a) {
+; CHECK: f4:
+; CHECK: xilf %r2, 4294967295
+; CHECK: br %r14
+ %xor = xor i32 %a, 4294967295
+ ret i32 %xor
+}
diff --git a/test/CodeGen/SystemZ/xor-03.ll b/test/CodeGen/SystemZ/xor-03.ll
new file mode 100644
index 000000000000..a4851b33090d
--- /dev/null
+++ b/test/CodeGen/SystemZ/xor-03.ll
@@ -0,0 +1,94 @@
+; Test 64-bit XORs in which the second operand is variable.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check XGR.
+define i64 @f1(i64 %a, i64 %b) {
+; CHECK: f1:
+; CHECK: xgr %r2, %r3
+; CHECK: br %r14
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check XG with no displacement.
+define i64 @f2(i64 %a, i64 *%src) {
+; CHECK: f2:
+; CHECK: xg %r2, 0(%r3)
+; CHECK: br %r14
+ %b = load i64 *%src
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check the high end of the aligned XG range.
+define i64 @f3(i64 %a, i64 *%src) {
+; CHECK: f3:
+; CHECK: xg %r2, 524280(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65535
+ %b = load i64 *%ptr
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check the next doubleword up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f4(i64 %a, i64 *%src) {
+; CHECK: f4:
+; CHECK: agfi %r3, 524288
+; CHECK: xg %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 65536
+ %b = load i64 *%ptr
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check the high end of the negative aligned XG range.
+define i64 @f5(i64 %a, i64 *%src) {
+; CHECK: f5:
+; CHECK: xg %r2, -8(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -1
+ %b = load i64 *%ptr
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check the low end of the XG range.
+define i64 @f6(i64 %a, i64 *%src) {
+; CHECK: f6:
+; CHECK: xg %r2, -524288(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65536
+ %b = load i64 *%ptr
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check the next doubleword down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define i64 @f7(i64 %a, i64 *%src) {
+; CHECK: f7:
+; CHECK: agfi %r3, -524296
+; CHECK: xg %r2, 0(%r3)
+; CHECK: br %r14
+ %ptr = getelementptr i64 *%src, i64 -65537
+ %b = load i64 *%ptr
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
+
+; Check that XG allows an index.
+define i64 @f8(i64 %a, i64 %src, i64 %index) {
+; CHECK: f8:
+; CHECK: xg %r2, 524280({{%r4,%r3|%r3,%r4}})
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 524280
+ %ptr = inttoptr i64 %add2 to i64 *
+ %b = load i64 *%ptr
+ %xor = xor i64 %a, %b
+ ret i64 %xor
+}
diff --git a/test/CodeGen/SystemZ/xor-04.ll b/test/CodeGen/SystemZ/xor-04.ll
new file mode 100644
index 000000000000..cc141d391a85
--- /dev/null
+++ b/test/CodeGen/SystemZ/xor-04.ll
@@ -0,0 +1,69 @@
+; Test 64-bit XORs in which the second operand is constant.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful XILF value.
+define i64 @f1(i64 %a) {
+; CHECK: f1:
+; CHECK: xilf %r2, 1
+; CHECK: br %r14
+ %xor = xor i64 %a, 1
+ ret i64 %xor
+}
+
+; Check the high end of the XILF range.
+define i64 @f2(i64 %a) {
+; CHECK: f2:
+; CHECK: xilf %r2, 4294967295
+; CHECK: br %r14
+ %xor = xor i64 %a, 4294967295
+ ret i64 %xor
+}
+
+; Check the lowest useful XIHF value, which is one up from the above.
+define i64 @f3(i64 %a) {
+; CHECK: f3:
+; CHECK: xihf %r2, 1
+; CHECK: br %r14
+ %xor = xor i64 %a, 4294967296
+ ret i64 %xor
+}
+
+; Check the next value up again, which needs a combination of XIHF and XILF.
+define i64 @f4(i64 %a) {
+; CHECK: f4:
+; CHECK: xihf %r2, 1
+; CHECK: xilf %r2, 4294967295
+; CHECK: br %r14
+ %xor = xor i64 %a, 8589934591
+ ret i64 %xor
+}
+
+; Check the high end of the XIHF range.
+define i64 @f5(i64 %a) {
+; CHECK: f5:
+; CHECK: xihf %r2, 4294967295
+; CHECK: br %r14
+ %xor = xor i64 %a, -4294967296
+ ret i64 %xor
+}
+
+; Check the next value up, which again must use XIHF and XILF.
+define i64 @f6(i64 %a) {
+; CHECK: f6:
+; CHECK: xihf %r2, 4294967295
+; CHECK: xilf %r2, 1
+; CHECK: br %r14
+ %xor = xor i64 %a, -4294967295
+ ret i64 %xor
+}
+
+; Check full bitwise negation
+define i64 @f7(i64 %a) {
+; CHECK: f7:
+; CHECK: xihf %r2, 4294967295
+; CHECK: xilf %r2, 4294967295
+; CHECK: br %r14
+ %xor = xor i64 %a, -1
+ ret i64 %xor
+}
diff --git a/test/CodeGen/SystemZ/xor-05.ll b/test/CodeGen/SystemZ/xor-05.ll
new file mode 100644
index 000000000000..9ef0d20ca52b
--- /dev/null
+++ b/test/CodeGen/SystemZ/xor-05.ll
@@ -0,0 +1,165 @@
+; Test XORs of a constant into a byte of memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Check the lowest useful constant, expressed as a signed integer.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: xi 0(%r2), 1
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, -255
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the highest useful constant, expressed as a signed integer.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, -2
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the lowest useful constant, expressed as an unsigned integer.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: xi 0(%r2), 1
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 1
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the highest useful constant, expressed as a unsigned integer.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 254
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the XI range.
+define void @f5(i8 *%src) {
+; CHECK: f5:
+; CHECK: xi 4095(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4095
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which should use XIY instead of XI.
+define void @f6(i8 *%src) {
+; CHECK: f6:
+; CHECK: xiy 4096(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 4096
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the XIY range.
+define void @f7(i8 *%src) {
+; CHECK: f7:
+; CHECK: xiy 524287(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524287
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the next byte up, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f8(i8 *%src) {
+; CHECK: f8:
+; CHECK: agfi %r2, 524288
+; CHECK: xi 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 524288
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the high end of the negative XIY range.
+define void @f9(i8 *%src) {
+; CHECK: f9:
+; CHECK: xiy -1(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -1
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the low end of the XIY range.
+define void @f10(i8 *%src) {
+; CHECK: f10:
+; CHECK: xiy -524288(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524288
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check the next byte down, which needs separate address logic.
+; Other sequences besides this one would be OK.
+define void @f11(i8 *%src) {
+; CHECK: f11:
+; CHECK: agfi %r2, -524289
+; CHECK: xi 0(%r2), 127
+; CHECK: br %r14
+ %ptr = getelementptr i8 *%src, i64 -524289
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check that XI does not allow an index
+define void @f12(i64 %src, i64 %index) {
+; CHECK: f12:
+; CHECK: agr %r2, %r3
+; CHECK: xi 4095(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
+
+; Check that XIY does not allow an index
+define void @f13(i64 %src, i64 %index) {
+; CHECK: f13:
+; CHECK: agr %r2, %r3
+; CHECK: xiy 4096(%r2), 127
+; CHECK: br %r14
+ %add1 = add i64 %src, %index
+ %add2 = add i64 %add1, 4096
+ %ptr = inttoptr i64 %add2 to i8 *
+ %val = load i8 *%ptr
+ %xor = xor i8 %val, 127
+ store i8 %xor, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/SystemZ/xor-06.ll b/test/CodeGen/SystemZ/xor-06.ll
new file mode 100644
index 000000000000..0ffff47c2b5a
--- /dev/null
+++ b/test/CodeGen/SystemZ/xor-06.ll
@@ -0,0 +1,108 @@
+; Test that we can use XI for byte operations that are expressed as i32
+; or i64 operations.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+; Zero extension to 32 bits, negative constant.
+define void @f1(i8 *%ptr) {
+; CHECK: f1:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %xor = xor i32 %ext, -2
+ %trunc = trunc i32 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 64 bits, negative constant.
+define void @f2(i8 *%ptr) {
+; CHECK: f2:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %xor = xor i64 %ext, -2
+ %trunc = trunc i64 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 32 bits, positive constant.
+define void @f3(i8 *%ptr) {
+; CHECK: f3:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i32
+ %xor = xor i32 %ext, 254
+ %trunc = trunc i32 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Zero extension to 64 bits, positive constant.
+define void @f4(i8 *%ptr) {
+; CHECK: f4:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = zext i8 %val to i64
+ %xor = xor i64 %ext, 254
+ %trunc = trunc i64 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 32 bits, negative constant.
+define void @f5(i8 *%ptr) {
+; CHECK: f5:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %xor = xor i32 %ext, -2
+ %trunc = trunc i32 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 64 bits, negative constant.
+define void @f6(i8 *%ptr) {
+; CHECK: f6:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %xor = xor i64 %ext, -2
+ %trunc = trunc i64 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 32 bits, positive constant.
+define void @f7(i8 *%ptr) {
+; CHECK: f7:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i32
+ %xor = xor i32 %ext, 254
+ %trunc = trunc i32 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
+
+; Sign extension to 64 bits, positive constant.
+define void @f8(i8 *%ptr) {
+; CHECK: f8:
+; CHECK: xi 0(%r2), 254
+; CHECK: br %r14
+ %val = load i8 *%ptr
+ %ext = sext i8 %val to i64
+ %xor = xor i64 %ext, 254
+ %trunc = trunc i64 %xor to i8
+ store i8 %trunc, i8 *%ptr
+ ret void
+}
diff --git a/test/CodeGen/Thumb/large-stack.ll b/test/CodeGen/Thumb/large-stack.ll
index f8c438c6e0a4..680976e74fba 100644
--- a/test/CodeGen/Thumb/large-stack.ll
+++ b/test/CodeGen/Thumb/large-stack.ll
@@ -20,8 +20,8 @@ define void @test2() {
define i32 @test3() {
; CHECK: test3:
-; CHECK: ldr.n r2, LCPI
-; CHECK: add sp, r2
+; CHECK: ldr.n r1, LCPI
+; CHECK: add sp, r1
; CHECK: ldr.n r1, LCPI
; CHECK: add r1, sp
; CHECK: subs r4, r7, #4
diff --git a/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll b/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
index 502b138f65c8..e905cb9114c2 100644
--- a/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
+++ b/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
@@ -18,13 +18,13 @@ define hidden void @func(i8* %Data) nounwind ssp {
tail call void @def(%"myclass"* %2) nounwind
%3 = getelementptr inbounds i8* %Data, i32 8
%4 = bitcast i8* %3 to i8**
- %5 = load i8** %4, align 4, !tbaa !0
+ %5 = load i8** %4, align 4
tail call void @ghi(i8* %5) nounwind
%6 = bitcast i8* %Data to void (i8*)**
- %7 = load void (i8*)** %6, align 4, !tbaa !0
+ %7 = load void (i8*)** %6, align 4
%8 = getelementptr inbounds i8* %Data, i32 4
%9 = bitcast i8* %8 to i8**
- %10 = load i8** %9, align 4, !tbaa !0
+ %10 = load i8** %9, align 4
%11 = icmp eq i8* %Data, null
br i1 %11, label %14, label %12
@@ -47,7 +47,3 @@ declare void @abc(%"myclass"*)
declare void @ghi(i8*)
declare %"myclass"* @jkl(%"myclass"*) nounwind
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll b/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll
index 2e4cb1fe7eda..cb90bf644d5f 100644
--- a/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll
+++ b/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll
@@ -4,7 +4,9 @@
; it makes a ton of annoying overlapping live ranges. This code should not
; cause spills!
;
-; RUN: llc < %s -march=x86 -stats 2>&1 | not grep spilled
+; RUN: llc < %s -march=x86 -stats 2>&1 | FileCheck %s
+
+; CHECK-NOT: spilled
target datalayout = "e-p:32:32"
diff --git a/test/CodeGen/X86/2006-07-31-SingleRegClass.ll b/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
index c5c74d104863..c4b08a3be283 100644
--- a/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
+++ b/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
@@ -1,7 +1,8 @@
; PR850
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att > %t
-; RUN: grep "movl 4(%eax),%ebp" %t
-; RUN: grep "movl 0(%eax), %ebx" %t
+; RUN: llc < %s -march=x86 -x86-asm-syntax=att | FileCheck %s
+
+; CHECK: {{movl 4[(]%eax[)],%ebp}}
+; CHECK: {{movl 0[(]%eax[)], %ebx}}
define i32 @foo(i32 %__s.i.i, i32 %tmp5.i.i, i32 %tmp6.i.i, i32 %tmp7.i.i, i32 %tmp8.i.i) {
%tmp9.i.i = call i32 asm sideeffect "push %ebp\0Apush %ebx\0Amovl 4($2),%ebp\0Amovl 0($2), %ebx\0Amovl $1,%eax\0Aint $$0x80\0Apop %ebx\0Apop %ebp", "={ax},i,0,{cx},{dx},{si},{di}"( i32 192, i32 %__s.i.i, i32 %tmp5.i.i, i32 %tmp6.i.i, i32 %tmp7.i.i, i32 %tmp8.i.i ) ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/2006-11-27-SelectLegalize.ll b/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
index ea2e6db61e1a..ba83a8db8399 100644
--- a/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
+++ b/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=x86 | grep test.*1
+; RUN: llc < %s -march=x86 | FileCheck %s
; PR1016
+; CHECK: {{test.*1}}
+
define i32 @test(i32 %A, i32 %B, i32 %C) {
%a = trunc i32 %A to i1 ; <i1> [#uses=1]
%D = select i1 %a, i32 %B, i32 %C ; <i32> [#uses=1]
diff --git a/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll b/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
index 18b06dc0857c..366f5830392d 100644
--- a/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
+++ b/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
@@ -1,8 +1,9 @@
-; RUN: llc < %s -mcpu=yonah -march=x86 | \
-; RUN: grep "cmpltsd %xmm0, %xmm0"
+; RUN: llc < %s -mcpu=yonah -march=x86 | FileCheck %s
+
target datalayout = "e-p:32:32"
target triple = "i686-apple-darwin9"
+; CHECK: {{cmpltsd %xmm0, %xmm0}}
define void @acoshf() {
%tmp19 = tail call <2 x double> asm sideeffect "pcmpeqd $0, $0 \0A\09 cmpltsd $0, $0", "=x,0,~{dirflag},~{fpsr},~{flags}"( <2 x double> zeroinitializer ) ; <<2 x double>> [#uses=0]
diff --git a/test/CodeGen/X86/2007-04-24-Huge-Stack.ll b/test/CodeGen/X86/2007-04-24-Huge-Stack.ll
index 7528129971ab..648718cc7223 100644
--- a/test/CodeGen/X86/2007-04-24-Huge-Stack.ll
+++ b/test/CodeGen/X86/2007-04-24-Huge-Stack.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=x86-64 | not grep 4294967112
+; RUN: llc < %s -march=x86-64 | FileCheck %s
; PR1348
+; CHECK-NOT: 4294967112
+
%struct.md5_ctx = type { i32, i32, i32, i32, [2 x i32], i32, [128 x i8], [4294967288 x i8] }
define i8* @md5_buffer(i8* %buffer, i64 %len, i8* %resblock) {
diff --git a/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll b/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll
index b27ef836960f..38fc5e18fe28 100644
--- a/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll
+++ b/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep punpckhwd
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+
+; CHECK-NOT: punpckhwd
declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>)
diff --git a/test/CodeGen/X86/2007-06-15-IntToMMX.ll b/test/CodeGen/X86/2007-06-15-IntToMMX.ll
index 660d4fe7b19e..5612d9eb282c 100644
--- a/test/CodeGen/X86/2007-06-15-IntToMMX.ll
+++ b/test/CodeGen/X86/2007-06-15-IntToMMX.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep paddusw
+; RUN: llc < %s -march=x86-64 -mattr=+mmx | FileCheck %s
+
+; CHECK: paddusw
+
@R = external global x86_mmx ; <x86_mmx*> [#uses=1]
define void @foo(<1 x i64> %A, <1 x i64> %B) {
diff --git a/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll b/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll
index 62624a7e3447..4f7ae327d1fd 100644
--- a/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll
+++ b/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86 | not grep movl
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; CHECK-NOT: movl
define zeroext i8 @t(i8 zeroext %x, i8 zeroext %y) {
%tmp2 = add i8 %x, 2
diff --git a/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll b/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
index d3120f3e0ef7..82052b13e18d 100644
--- a/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
+++ b/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
@@ -1,4 +1,8 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | grep inc | not grep PTR
+; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | FileCheck %s
+
+; CHECK: inc
+; CHECK-NOT: PTR
+; CHECK: {{$}}
define signext i16 @t(i32* %bitptr, i32* %source, i8** %byteptr, i32 %scale, i32 %round) {
entry:
diff --git a/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll b/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
index 56a109acfc79..c4670242b531 100644
--- a/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
+++ b/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
@@ -1,9 +1,11 @@
-; RUN: llc < %s -relocation-model=static | grep "foo str$"
+; RUN: llc < %s -relocation-model=static | FileCheck %s
; PR1761
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-pc-linux"
@str = internal constant [12 x i8] c"init/main.c\00" ; <[12 x i8]*> [#uses=1]
+; CHECK: {{foo str$}}
+
define i32 @unknown_bootoption() {
entry:
tail call void asm sideeffect "foo ${0:c}\0A", "i,~{dirflag},~{fpsr},~{flags}"( i8* getelementptr ([12 x i8]* @str, i32 0, i64 0) )
diff --git a/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll b/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll
index 6997d535ff92..e8c957b1ff64 100644
--- a/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll
+++ b/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -o - | grep sinl
+; RUN: llc < %s -o - | FileCheck %s
+
+; CHECK: sinl
target triple = "i686-pc-linux-gnu"
diff --git a/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll b/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
index a52b36588a36..b06b249a6326 100644
--- a/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
+++ b/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s | grep "a:" | not grep ax
-; RUN: llc < %s | grep "b:" | not grep ax
+; RUN: llc < %s | FileCheck %s
; PR2078
; The clobber list says that "ax" is clobbered. Make sure that eax isn't
; allocated to the input/output register.
@@ -15,6 +14,10 @@ entry:
ret void
}
+; CHECK: a:
+; CHECK-NOT: ax
+; CHECK: {{$}}
+
define void @test2(i16* %block, i8* %pixels, i32 %line_size) nounwind {
entry:
%tmp1 = getelementptr i16* %block, i32 64 ; <i16*> [#uses=1]
@@ -22,3 +25,6 @@ entry:
ret void
}
+; CHECK: b:
+; CHECK-NOT: ax
+; CHECK: {{$}}
diff --git a/test/CodeGen/X86/2008-11-06-testb.ll b/test/CodeGen/X86/2008-11-06-testb.ll
index f8f317c2dd46..e7caa7a10670 100644
--- a/test/CodeGen/X86/2008-11-06-testb.ll
+++ b/test/CodeGen/X86/2008-11-06-testb.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep testb
+; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
+
+; CHECK: testb
; ModuleID = '<stdin>'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
diff --git a/test/CodeGen/X86/2009-02-25-CommuteBug.ll b/test/CodeGen/X86/2009-02-25-CommuteBug.ll
index 9ea34e27a17e..5bec179534b9 100644
--- a/test/CodeGen/X86/2009-02-25-CommuteBug.ll
+++ b/test/CodeGen/X86/2009-02-25-CommuteBug.ll
@@ -1,7 +1,9 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats 2>&1 | not grep commuted
+; RUN: llc < %s -march=x86 -mattr=+sse2 -stats 2>&1 | FileCheck %s
; rdar://6608609
+; CHECK-NOT: commuted
+
define <2 x double> @t(<2 x double> %A, <2 x double> %B, <2 x double> %C) nounwind readnone {
entry:
%tmp.i2 = bitcast <2 x double> %B to <2 x i64> ; <<2 x i64>> [#uses=1]
diff --git a/test/CodeGen/X86/2009-03-25-TestBug.ll b/test/CodeGen/X86/2009-03-25-TestBug.ll
index f40fddc5a36d..cc1d73da05c5 100644
--- a/test/CodeGen/X86/2009-03-25-TestBug.ll
+++ b/test/CodeGen/X86/2009-03-25-TestBug.ll
@@ -1,8 +1,9 @@
-; RUN: llc < %s -march=x86 -o %t
-; RUN: not grep and %t
-; RUN: not grep shr %t
+; RUN: llc < %s -march=x86 | FileCheck %s
; rdar://6661955
+; CHECK-NOT: and
+; CHECK-NOT: shr
+
@hello = internal constant [7 x i8] c"hello\0A\00"
@world = internal constant [7 x i8] c"world\0A\00"
diff --git a/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll b/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
index 0607eda271af..679a65d93d09 100644
--- a/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
+++ b/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
@@ -1,8 +1,10 @@
; REQUIRES: asserts
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats 2>&1 | grep "Number of modref unfolded"
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats 2>&1 | FileCheck %s
; XFAIL: *
; 69408 removed the opportunity for this optimization to work
+; CHECK: {{Number of modref unfolded}}
+
%struct.SHA512_CTX = type { [8 x i64], i64, i64, %struct.anon, i32, i32 }
%struct.anon = type { [16 x i64] }
@K512 = external constant [80 x i64], align 32 ; <[80 x i64]*> [#uses=2]
diff --git a/test/CodeGen/X86/2009-04-24.ll b/test/CodeGen/X86/2009-04-24.ll
index 08bf9e3f9f36..d104c875760a 100644
--- a/test/CodeGen/X86/2009-04-24.ll
+++ b/test/CodeGen/X86/2009-04-24.ll
@@ -1,8 +1,9 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu -regalloc=fast -optimize-regalloc=0 -relocation-model=pic > %t2
-; RUN: grep "leaq.*TLSGD" %t2
-; RUN: grep "__tls_get_addr" %t2
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu -regalloc=fast -optimize-regalloc=0 -relocation-model=pic | FileCheck %s
; PR4004
+; CHECK: {{leaq.*TLSGD}}
+; CHECK: {{__tls_get_addr}}
+
@i = thread_local global i32 15
define i32 @f() {
diff --git a/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll b/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
index 738b5fbb7048..7468acb95f11 100644
--- a/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
+++ b/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
@@ -1,8 +1,9 @@
-; RUN: llc < %s -relocation-model=static > %t
-; RUN: grep "1: ._pv_cpu_ops+8" %t
-; RUN: grep "2: ._G" %t
+; RUN: llc < %s -relocation-model=static | FileCheck %s
; PR4152
+; CHECK: {{1: ._pv_cpu_ops[+]8}}
+; CHECK: {{2: ._G}}
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin9.6"
%struct.pv_cpu_ops = type { i32, [2 x i32] }
diff --git a/test/CodeGen/X86/2009-05-23-available_externally.ll b/test/CodeGen/X86/2009-05-23-available_externally.ll
index 94773d91ea17..c990108a21c2 100644
--- a/test/CodeGen/X86/2009-05-23-available_externally.ll
+++ b/test/CodeGen/X86/2009-05-23-available_externally.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic | grep atoi | grep PLT
+; RUN: llc < %s -relocation-model=pic | FileCheck %s
; PR4253
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
@@ -9,6 +9,9 @@ entry:
ret i32 %call
}
+; CHECK: foo
+; CHECK: {{atoi.+PLT}}
+
define available_externally fastcc i32 @atoi(i8* %__nptr) nounwind readonly {
entry:
%call = tail call i64 @strtol(i8* nocapture %__nptr, i8** null, i32 10) nounwind readonly ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
index 30763225a53d..3061dc2c8275 100644
--- a/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
+++ b/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+mmx,+sse2 | not grep movl
+; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -mattr=+mmx,+sse2 | FileCheck %s
+
+; CHECK-NOT: movl
define <8 x i8> @a(i8 zeroext %x) nounwind {
%r = insertelement <8 x i8> undef, i8 %x, i32 0
diff --git a/test/CodeGen/X86/2009-08-08-CastError.ll b/test/CodeGen/X86/2009-08-08-CastError.ll
index 2dc812dbc62d..748c5a8cc196 100644
--- a/test/CodeGen/X86/2009-08-08-CastError.ll
+++ b/test/CodeGen/X86/2009-08-08-CastError.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -mtriple=x86_64-pc-mingw64 | grep movabsq
+; RUN: llc < %s -mtriple=x86_64-pc-mingw64 | FileCheck %s
+
+; CHECK: movabsq
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
diff --git a/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll b/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll
index 8ab93fcb978f..7650a5c3be88 100644
--- a/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll
+++ b/test/CodeGen/X86/2010-05-25-DotDebugLoc.ll
@@ -203,7 +203,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786689, metadata !1, metadata !"a", metadata !2, i32 1921, metadata !9, i32 0, null} ; [ DW_TAG_arg_variable ]
!1 = metadata !{i32 786478, metadata !2, metadata !"__divsc3", metadata !"__divsc3", metadata !"__divsc3", metadata !2, i32 1922, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true, %0 (float, float, float, float)* @__divsc3, null, null, metadata !43, i32 1922} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !45} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786449, i32 1, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !44, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786449, i32 1, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !44, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!4 = metadata !{i32 786453, metadata !45, metadata !2, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
!5 = metadata !{metadata !6, metadata !9, metadata !9, metadata !9, metadata !9}
!6 = metadata !{i32 786454, metadata !46, metadata !7, metadata !"SCtype", i32 170, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_typedef ]
diff --git a/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll b/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
index 6519ca063a7c..6510ff17f7bb 100644
--- a/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
+++ b/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
@@ -25,7 +25,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786484, i32 0, metadata !1, metadata !"ret", metadata !"ret", metadata !"", metadata !1, i32 7, metadata !3, i1 false, i1 true, null} ; [ DW_TAG_variable ]
!1 = metadata !{i32 786473, metadata !36} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !36, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !32, metadata !31, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !36, i32 1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !32, metadata !31, metadata !31, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786468, metadata !1, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
!4 = metadata !{i32 786689, metadata !5, metadata !"x", metadata !1, i32 12, metadata !3, i32 0, null} ; [ DW_TAG_arg_variable ]
!5 = metadata !{i32 786478, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", metadata !1, i32 13, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true, void (i32)* @foo, null, null, metadata !33, i32 13} ; [ DW_TAG_subprogram ]
diff --git a/test/CodeGen/X86/2010-05-28-Crash.ll b/test/CodeGen/X86/2010-05-28-Crash.ll
index 4ea3bf077841..ee00dbacbf60 100644
--- a/test/CodeGen/X86/2010-05-28-Crash.ll
+++ b/test/CodeGen/X86/2010-05-28-Crash.ll
@@ -27,7 +27,7 @@ entry:
!0 = metadata !{i32 786689, metadata !1, metadata !"y", metadata !2, i32 2, metadata !6, i32 0, null} ; [ DW_TAG_arg_variable ]
!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"foo", metadata !2, i32 2, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true, i32 (i32)* @foo, null, null, metadata !15, i32 2} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !18} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786449, i32 1, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !17, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786449, i32 1, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !17, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!4 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
!5 = metadata !{metadata !6, metadata !6}
!6 = metadata !{i32 786468, metadata !2, metadata !"int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll b/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
index b22a391ef358..b5679e665696 100644
--- a/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
+++ b/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
@@ -1,4 +1,5 @@
-; RUN: llc -fast-isel -march=x86 < %s | grep %fs:
+; RUN: llc -fast-isel -march=x86 < %s | FileCheck %s
+; CHECK: %fs:
define i32 @test1(i32 addrspace(257)* %arg) nounwind {
%tmp = load i32 addrspace(257)* %arg
diff --git a/test/CodeGen/X86/2010-08-04-StackVariable.ll b/test/CodeGen/X86/2010-08-04-StackVariable.ll
index aaa562a439d5..91711bb758c3 100644
--- a/test/CodeGen/X86/2010-08-04-StackVariable.ll
+++ b/test/CodeGen/X86/2010-08-04-StackVariable.ll
@@ -80,7 +80,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"SVal", metadata !"SVal", metadata !"", metadata !2, i32 11, metadata !14, i1 false, i1 false, i32 0, i32 0, null, i1 false, i1 false, null, null, null, null, i32 11} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786451, metadata !2, metadata !"SVal", metadata !2, i32 1, i64 128, i64 64, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_structure_type ]
!2 = metadata !{i32 786473, metadata !"small.cc", metadata !"/Users/manav/R8248330", metadata !3} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786449, i32 4, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, null, null, metadata !46, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786449, i32 4, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, null, null, metadata !46, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!4 = metadata !{metadata !5, metadata !7, metadata !0, metadata !9}
!5 = metadata !{i32 786445, metadata !1, metadata !"Data", metadata !2, i32 7, i64 64, i64 64, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
!6 = metadata !{i32 786447, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, null} ; [ DW_TAG_pointer_type ]
diff --git a/test/CodeGen/X86/2010-11-02-DbgParameter.ll b/test/CodeGen/X86/2010-11-02-DbgParameter.ll
index 31a6822b34b8..8719f738b7cc 100644
--- a/test/CodeGen/X86/2010-11-02-DbgParameter.ll
+++ b/test/CodeGen/X86/2010-11-02-DbgParameter.ll
@@ -19,7 +19,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"foo", metadata !"foo", metadata !"", metadata !1, i32 3, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 true, i32 (%struct.bar*)* @foo, null, null, metadata !16, i32 3} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !17} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 2.9 (trunk 117922)", i1 true, metadata !"", i32 0, null, null, metadata !15, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 2.9 (trunk 117922)", i1 true, metadata !"", i32 0, null, null, metadata !15, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, metadata !2, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
index 2355528a81e8..14fb3e493231 100644
--- a/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
+++ b/test/CodeGen/X86/2011-01-24-DbgValue-Before-Use.ll
@@ -73,7 +73,7 @@ declare i32 @puts(i8* nocapture) nounwind
!0 = metadata !{i32 786478, metadata !1, metadata !"gcd", metadata !"gcd", metadata !"", metadata !1, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i64 (i64, i64)* @gcd, null, null, metadata !29, i32 0} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !31} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !31, i32 12, metadata !"clang version 2.9 (trunk 124117)", i1 true, metadata !"", i32 0, null, null, metadata !28, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !31, i32 12, metadata !"clang version 2.9 (trunk 124117)", i1 true, metadata !"", i32 0, null, null, metadata !28, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, null, metadata !2, metadata !"long int", i32 0, i64 64, i64 64, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/test/CodeGen/X86/2011-09-14-valcoalesce.ll
index 54d2b403509d..6d91109daafb 100644
--- a/test/CodeGen/X86/2011-09-14-valcoalesce.ll
+++ b/test/CodeGen/X86/2011-09-14-valcoalesce.ll
@@ -96,7 +96,7 @@ while.body.i188: ; preds = %for.end173.i, %if.e
while.body85.i: ; preds = %while.body85.i, %while.body.i188
%aFreq.0518.i = phi i32 [ %add93.i, %while.body85.i ], [ 0, %while.body.i188 ]
%inc87.i = add nsw i32 0, 1
- %tmp91.i = load i32* undef, align 4, !tbaa !0
+ %tmp91.i = load i32* undef, align 4
%add93.i = add nsw i32 %tmp91.i, %aFreq.0518.i
%or.cond514.i = and i1 undef, false
br i1 %or.cond514.i, label %while.body85.i, label %while.end.i
@@ -168,7 +168,3 @@ if.end85: ; preds = %entry
}
declare void @fprintf(...) nounwind
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
index 832a8eb8144c..501a8101a3fe 100644
--- a/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
+++ b/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
@@ -109,7 +109,7 @@ bb49: ; preds = %bb49, %bb48
%tmp51 = add i32 %tmp50, undef
%tmp52 = add i32 %tmp50, undef
%tmp53 = getelementptr i32* %tmp13, i32 %tmp52
- %tmp54 = load i32* %tmp53, align 4, !tbaa !0
+ %tmp54 = load i32* %tmp53, align 4
%tmp55 = add i32 %tmp50, 1
%tmp56 = icmp eq i32 %tmp55, %tmp8
br i1 %tmp56, label %bb57, label %bb49
@@ -127,7 +127,7 @@ bb61: ; preds = %bb61, %bb59
%tmp62 = phi i32 [ %tmp65, %bb61 ], [ 0, %bb59 ]
%tmp63 = add i32 %tmp62, %tmp14
%tmp64 = getelementptr i32* %tmp13, i32 %tmp63
- store i32 0, i32* %tmp64, align 4, !tbaa !0
+ store i32 0, i32* %tmp64, align 4
%tmp65 = add i32 %tmp62, 1
%tmp66 = icmp eq i32 %tmp65, %tmp8
br i1 %tmp66, label %bb67, label %bb61
@@ -149,7 +149,3 @@ declare void @Pjii(i32*, i32, i32) optsize
declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
declare void @OnOverFlow() noreturn optsize ssp align 2
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
index 9525653f3fff..9164eb9c6912 100644
--- a/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
+++ b/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
@@ -18,7 +18,7 @@ define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double
entry:
call void @llvm.dbg.declare(metadata !{%struct.hgstruct.2.29* %hg}, metadata !4)
%type = getelementptr inbounds %struct.node.0.27* %p, i64 0, i32 0
- %0 = load i16* %type, align 2, !tbaa !8
+ %0 = load i16* %type, align 2
%cmp = icmp eq i16 %0, 1
br i1 %cmp, label %return, label %for.cond.preheader
@@ -45,7 +45,4 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!5 = metadata !{i32 786473, metadata !11} ; [ DW_TAG_file_type ]
!6 = metadata !{i32 786454, metadata !11, null, metadata !"hgstruct", i32 492, i64 0, i64 0, i64 0, i32 0, metadata !7} ; [ DW_TAG_typedef ] [hgstruct] [line 492, size 0, align 0, offset 0] [from ]
!7 = metadata !{i32 786451, metadata !11, null, metadata !"", i32 487, i64 512, i64 64, i32 0, i32 0, null, null, i32 0, i32 0, i32 0} ; [ DW_TAG_structure_type ] [line 487, size 512, align 64, offset 0] [from ]
-!8 = metadata !{metadata !"short", metadata !9}
-!9 = metadata !{metadata !"omnipotent char", metadata !10}
-!10 = metadata !{metadata !"Simple C/C++ TBAA"}
!11 = metadata !{metadata !"MultiSource/Benchmarks/Olden/bh/newbh.c", metadata !"MultiSource/Benchmarks/Olden/bh"}
diff --git a/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll b/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
index 03b6bdeafa87..f0c7781fafe9 100644
--- a/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
+++ b/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @main() #0 {
entry:
- %0 = load <8 x float>* bitcast ([8 x float]* @b to <8 x float>*), align 32, !tbaa !0
+ %0 = load <8 x float>* bitcast ([8 x float]* @b to <8 x float>*), align 32
%bitcast.i = extractelement <8 x float> %0, i32 0
%vecinit.i.i = insertelement <4 x float> undef, float %bitcast.i, i32 0
%vecinit2.i.i = insertelement <4 x float> %vecinit.i.i, float 0.000000e+00, i32 1
@@ -17,7 +17,7 @@ entry:
%vecinit4.i.i = insertelement <4 x float> %vecinit3.i.i, float 0.000000e+00, i32 3
%1 = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %vecinit4.i.i) #2
%vecext.i.i = extractelement <4 x float> %1, i32 0
- store float %vecext.i.i, float* getelementptr inbounds ([8 x float]* @e, i64 0, i64 0), align 16, !tbaa !0
+ store float %vecext.i.i, float* getelementptr inbounds ([8 x float]* @e, i64 0, i64 0), align 16
unreachable
}
@@ -26,6 +26,3 @@ declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) #1
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/2013-05-06-ConactVectorCrash.ll b/test/CodeGen/X86/2013-05-06-ConactVectorCrash.ll
new file mode 100644
index 000000000000..920341799d63
--- /dev/null
+++ b/test/CodeGen/X86/2013-05-06-ConactVectorCrash.ll
@@ -0,0 +1,14 @@
+; RUN: llc < %s -march=x86
+
+; Make sure this doesn't crash
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-pc-win32"
+
+define void @foo() {
+ %1 = shufflevector <3 x i8> undef, <3 x i8> undef, <2 x i32> <i32 0, i32 1>
+ %2 = shufflevector <2 x i8> %1, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %3 = shufflevector <4 x i8> undef, <4 x i8> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ store <4 x i8> %3, <4 x i8>* undef
+ ret void
+}
diff --git a/test/CodeGen/X86/MachineSink-DbgValue.ll b/test/CodeGen/X86/MachineSink-DbgValue.ll
index 227ef3466e0a..13a6444a496b 100644
--- a/test/CodeGen/X86/MachineSink-DbgValue.ll
+++ b/test/CodeGen/X86/MachineSink-DbgValue.ll
@@ -27,7 +27,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, metadata !20, i32 12, metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, metadata !"", i32 0, null, null, metadata !18, null, null} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, metadata !20, i32 12, metadata !"Apple clang version 3.0 (tags/Apple/clang-211.10.1) (based on LLVM 3.0svn)", i1 true, metadata !"", i32 0, null, null, metadata !18, null, null, null} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"", metadata !2, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i32*)* @foo, null, null, metadata !19, i32 0} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !20} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll
index 03d2e472cba6..5fe08ed305f1 100644
--- a/test/CodeGen/X86/add.ll
+++ b/test/CodeGen/X86/add.ll
@@ -119,8 +119,8 @@ entry:
; X64: test8:
; X64: addq
-; X64-NEXT: sbbq
-; X64-NEXT: testb
+; X64-NEXT: setb
+; X64: ret
define i32 @test9(i32 %x, i32 %y) nounwind readnone {
%cmp = icmp eq i32 %x, 10
diff --git a/test/CodeGen/X86/asm-invalid-register-class-crasher.ll b/test/CodeGen/X86/asm-invalid-register-class-crasher.ll
new file mode 100644
index 000000000000..24e2284c8c8b
--- /dev/null
+++ b/test/CodeGen/X86/asm-invalid-register-class-crasher.ll
@@ -0,0 +1,9 @@
+; RUN: not llc < %s -mtriple=i386-apple-darwin 2>&1 %t
+
+; Previously, this would assert in an assert build, but crash in a release build.
+; No FileCheck, just make sure we handle this gracefully.
+define i64 @t1(i64* %p, i64 %val) #0 {
+entry:
+ %0 = tail call i64 asm sideeffect "xaddq $0, $1", "=q,*m,0,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %p, i64 %val)
+ ret i64 %0
+}
diff --git a/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll b/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
index 2a34e0298f30..6237b66a5ea6 100644
--- a/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
+++ b/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
@@ -1,7 +1,9 @@
-; RUN: llc < %s -mtriple=i386-linux-gnu -mcpu=atom 2>&1 | \
-; RUN: grep "calll" | not grep "("
-; RUN: llc < %s -mtriple=i386-linux-gnu -mcpu=core2 2>&1 | \
-; RUN: grep "calll" | grep "*funcp"
+; RUN: llc < %s -mtriple=i386-linux-gnu -mcpu=atom | \
+; RUN: FileCheck --check-prefix=ATOM %s
+; RUN: llc < %s -mtriple=i386-linux-gnu -mcpu=core2 | \
+; RUN: FileCheck --check-prefix=CORE2 %s
+; ATOM: calll *{{%[a-z]+}}
+; CORE2: calll *funcp
;
; original source code built with clang -S -emit-llvm -M32 test32.c:
;
@@ -18,10 +20,6 @@
; }
; }
;
-; ModuleID = 'test32.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
-target triple = "i386-unknown-linux-gnu"
-
@sum = external global i32
@a = common global i32 0, align 4
@i = common global i32 0, align 4
@@ -74,4 +72,3 @@ for.end: ; preds = %for.cond
ret void
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll b/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
index bcfbd6107a56..a196d8175aa9 100644
--- a/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
+++ b/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
@@ -1,7 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=atom 2>&1 | \
-; RUN: grep "callq" | not grep "("
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=core2 2>&1 | \
-; RUN: grep "callq" | grep "*funcp"
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=atom | \
+; RUN: FileCheck --check-prefix=ATOM %s
+; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=core2 | \
+; RUN: FileCheck --check-prefix=CORE2 %s
+; ATOM: callq *{{%[a-z]+[0-9]*}}
+; CORE2: callq *funcp
;
; Original source code built with clang -S -emit-llvm -m64 test64.c:
; int a, b, c, d, e, f, g, h, i, j, k, l, m, n;
@@ -19,9 +21,6 @@
; }
; }
;
-; ModuleID = 'test64.c'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
@sum = external global i32
@a = common global i32 0, align 4
@@ -88,4 +87,3 @@ for.end: ; preds = %for.cond
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/X86/atom-fixup-lea1.ll b/test/CodeGen/X86/atom-fixup-lea1.ll
new file mode 100644
index 000000000000..4651bf257fd4
--- /dev/null
+++ b/test/CodeGen/X86/atom-fixup-lea1.ll
@@ -0,0 +1,38 @@
+; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
+; CHECK: addl
+; CHECK-NEXT:leal
+; CHECK-NEXT:decl
+; CHECK-NEXT:jne
+
+; Test for the FixupLEAs pre-emit pass. An LEA should be substituted for the ADD
+; that increments the array pointer because it is within 5 instructions of the
+; corresponding load. The ADD precedes the load by following the loop back edge.
+
+; Original C code
+;int test(int n, int * array)
+;{
+; int sum = 0;
+; for(int i = 0; i < n; i++)
+; sum += array[i];
+; return sum;
+;}
+
+define i32 @test(i32 %n, i32* nocapture %array) {
+entry:
+ %cmp4 = icmp sgt i32 %n, 0
+ br i1 %cmp4, label %for.body, label %for.end
+
+for.body:
+ %i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %array, i32 %i.06
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %sum.05
+ %inc = add nsw i32 %i.06, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ ret i32 %sum.0.lcssa
+}
diff --git a/test/CodeGen/X86/atom-fixup-lea2.ll b/test/CodeGen/X86/atom-fixup-lea2.ll
new file mode 100644
index 000000000000..1855ea1d024d
--- /dev/null
+++ b/test/CodeGen/X86/atom-fixup-lea2.ll
@@ -0,0 +1,84 @@
+; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
+; CHECK:BB#5
+; CHECK-NEXT:leal
+; CHECK-NEXT:leal
+; CHECK-NEXT:leal
+; CHECK-NEXT:movl
+
+
+; Test for fixup lea pre-emit pass. LEA instructions should be substituted for
+; ADD instructions which compute the address and index of the load because they
+; precede the load within 5 instructions. An LEA should also be substituted for
+; an ADD which computes part of the index because it precedes the index LEA
+; within 5 instructions, this substitution is referred to as backwards chaining.
+
+; Original C Code
+;struct node_t
+;{
+; int k, m, n, p;
+; int * array;
+;};
+
+;extern struct node_t getnode();
+
+;int test()
+;{
+; int sum = 0;
+; struct node_t n = getnode();
+; if(n.array != 0 && n.p > 0 && n.k > 0 && n.n > 0 && n.m > 0) {
+; sum = ((int*)((int)n.array + n.p) )[ n.k + n.m + n.n ];
+; }
+; return sum;
+;}
+
+%struct.node_t = type { i32, i32, i32, i32, i32* }
+
+define i32 @test() {
+entry:
+ %n = alloca %struct.node_t, align 4
+ call void bitcast (void (%struct.node_t*, ...)* @getnode to void (%struct.node_t*)*)(%struct.node_t* sret %n)
+ %array = getelementptr inbounds %struct.node_t* %n, i32 0, i32 4
+ %0 = load i32** %array, align 4
+ %cmp = icmp eq i32* %0, null
+ br i1 %cmp, label %if.end, label %land.lhs.true
+
+land.lhs.true:
+ %p = getelementptr inbounds %struct.node_t* %n, i32 0, i32 3
+ %1 = load i32* %p, align 4
+ %cmp1 = icmp sgt i32 %1, 0
+ br i1 %cmp1, label %land.lhs.true2, label %if.end
+
+land.lhs.true2:
+ %k = getelementptr inbounds %struct.node_t* %n, i32 0, i32 0
+ %2 = load i32* %k, align 4
+ %cmp3 = icmp sgt i32 %2, 0
+ br i1 %cmp3, label %land.lhs.true4, label %if.end
+
+land.lhs.true4:
+ %n5 = getelementptr inbounds %struct.node_t* %n, i32 0, i32 2
+ %3 = load i32* %n5, align 4
+ %cmp6 = icmp sgt i32 %3, 0
+ br i1 %cmp6, label %land.lhs.true7, label %if.end
+
+land.lhs.true7:
+ %m = getelementptr inbounds %struct.node_t* %n, i32 0, i32 1
+ %4 = load i32* %m, align 4
+ %cmp8 = icmp sgt i32 %4, 0
+ br i1 %cmp8, label %if.then, label %if.end
+
+if.then:
+ %add = add i32 %3, %2
+ %add12 = add i32 %add, %4
+ %5 = ptrtoint i32* %0 to i32
+ %add15 = add nsw i32 %1, %5
+ %6 = inttoptr i32 %add15 to i32*
+ %arrayidx = getelementptr inbounds i32* %6, i32 %add12
+ %7 = load i32* %arrayidx, align 4
+ br label %if.end
+
+if.end:
+ %sum.0 = phi i32 [ %7, %if.then ], [ 0, %land.lhs.true7 ], [ 0, %land.lhs.true4 ], [ 0, %land.lhs.true2 ], [ 0, %land.lhs.true ], [ 0, %entry ]
+ ret i32 %sum.0
+}
+
+declare void @getnode(%struct.node_t* sret, ...)
diff --git a/test/CodeGen/X86/atom-fixup-lea3.ll b/test/CodeGen/X86/atom-fixup-lea3.ll
new file mode 100644
index 000000000000..311b0b302163
--- /dev/null
+++ b/test/CodeGen/X86/atom-fixup-lea3.ll
@@ -0,0 +1,51 @@
+; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
+; CHECK: addl ([[reg:%[a-z]+]])
+; CHECK-NEXT: addl $4, [[reg]]
+
+; Test for the FixupLEAs pre-emit pass.
+; An LEA should NOT be substituted for the ADD instruction
+; that increments the array pointer if it is greater than 5 instructions
+; away from the memory reference that uses it.
+
+; Original C code: clang -m32 -S -O2
+;int test(int n, int * array, int * m, int * array2)
+;{
+; int i, j = 0;
+; int sum = 0;
+; for (i = 0, j = 0; i < n;) {
+; ++i;
+; *m += array2[j++];
+; sum += array[i];
+; }
+; return sum;
+;}
+
+define i32 @test(i32 %n, i32* nocapture %array, i32* nocapture %m, i32* nocapture %array2) #0 {
+entry:
+ %cmp7 = icmp sgt i32 %n, 0
+ br i1 %cmp7, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph: ; preds = %entry
+ %.pre = load i32* %m, align 4
+ br label %for.body
+
+for.body: ; preds = %for.body, %for.body.lr.ph
+ %0 = phi i32 [ %.pre, %for.body.lr.ph ], [ %add, %for.body ]
+ %sum.010 = phi i32 [ 0, %for.body.lr.ph ], [ %add3, %for.body ]
+ %j.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc1, %for.body ]
+ %inc1 = add nsw i32 %j.09, 1
+ %arrayidx = getelementptr inbounds i32* %array2, i32 %j.09
+ %1 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %1
+ store i32 %add, i32* %m, align 4
+ %arrayidx2 = getelementptr inbounds i32* %array, i32 %inc1
+ %2 = load i32* %arrayidx2, align 4
+ %add3 = add nsw i32 %2, %sum.010
+ %exitcond = icmp eq i32 %inc1, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add3, %for.body ]
+ ret i32 %sum.0.lcssa
+}
+
diff --git a/test/CodeGen/X86/atomic-dagsched.ll b/test/CodeGen/X86/atomic-dagsched.ll
index 0e7cf8c09668..05e630be153c 100644
--- a/test/CodeGen/X86/atomic-dagsched.ll
+++ b/test/CodeGen/X86/atomic-dagsched.ll
@@ -18,8 +18,8 @@ loop.cond: ; preds = %test.exit, %entry
br i1 %3, label %return, label %loop
loop: ; preds = %loop.cond
- %4 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8, !tbaa !0
- %5 = load i64* %4, align 8, !tbaa !3
+ %4 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %5 = load i64* %4, align 8
%vector.size.i = ashr i64 %5, 3
%num.vector.wi.i = shl i64 %vector.size.i, 3
%6 = icmp eq i64 %vector.size.i, 0
@@ -65,8 +65,8 @@ scalarIf.i: ; preds = %vector_kernel_entry
br i1 %18, label %test.exit, label %dim_0_pre_head.i
dim_0_pre_head.i: ; preds = %scalarIf.i
- %19 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8, !tbaa !0
- %20 = load i64* %19, align 8, !tbaa !3
+ %19 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %20 = load i64* %19, align 8
%21 = trunc i64 %20 to i32
%22 = mul i64 %vector.size.i, 8
br label %scalar_kernel_entry.i
@@ -76,10 +76,10 @@ scalar_kernel_entry.i: ; preds = %scalar_kernel_entry
%23 = bitcast i8* %asr.iv6 to i32 addrspace(1)*
%24 = bitcast i8* %ptrtoarg4 to i32 addrspace(1)*
%scevgep16 = getelementptr i32 addrspace(1)* %23, i64 %asr.iv12
- %25 = load i32 addrspace(1)* %scevgep16, align 4, !tbaa !4
+ %25 = load i32 addrspace(1)* %scevgep16, align 4
%26 = atomicrmw min i32 addrspace(1)* %24, i32 %25 seq_cst
%scevgep15 = getelementptr i32 addrspace(1)* %23, i64 %asr.iv12
- store i32 %21, i32 addrspace(1)* %scevgep15, align 4, !tbaa !4
+ store i32 %21, i32 addrspace(1)* %scevgep15, align 4
%asr.iv.next13 = add i64 %asr.iv12, 1
%dim_0_cmp.to.max.i = icmp eq i64 %5, %asr.iv.next13
br i1 %dim_0_cmp.to.max.i, label %test.exit, label %scalar_kernel_entry.i
@@ -97,12 +97,6 @@ return: ; preds = %loop.cond
ret void
}
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"long", metadata !1}
-!4 = metadata !{metadata !"int", metadata !1}
-
; CHECK: test
; CHECK: decq
; CHECK-NOT: cmpxchgl
diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll
index 95854c7960e7..64c4627c47c3 100644
--- a/test/CodeGen/X86/avx-basic.ll
+++ b/test/CodeGen/X86/avx-basic.ll
@@ -121,3 +121,13 @@ define <16 x i16> @build_vec_16x16(i16 %a) nounwind readonly {
%res = insertelement <16 x i16> <i16 undef, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, i16 %a, i32 0
ret <16 x i16> %res
}
+
+;;; Check that VMOVPQIto64rr generates the assembly string "vmovd". Previously
+;;; an incorrect mnemonic of "movd" was printed for this instruction.
+; CHECK: VMOVPQIto64rr
+; CHECK: vmovd
+define i64 @VMOVPQIto64rr(<2 x i64> %a) {
+entry:
+ %vecext.i = extractelement <2 x i64> %a, i32 0
+ ret i64 %vecext.i
+}
diff --git a/test/CodeGen/X86/avx-brcond.ll b/test/CodeGen/X86/avx-brcond.ll
new file mode 100644
index 000000000000..d52ae52e0b98
--- /dev/null
+++ b/test/CodeGen/X86/avx-brcond.ll
@@ -0,0 +1,150 @@
+; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+declare i32 @llvm.x86.avx.ptestz.256(<4 x i64> %p1, <4 x i64> %p2) nounwind
+declare i32 @llvm.x86.avx.ptestc.256(<4 x i64> %p1, <4 x i64> %p2) nounwind
+
+define <4 x float> @test1(<4 x i64> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test1:
+; CHECK: vptest
+; CHECK-NEXT: jne
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
+ %one = icmp ne i32 %res, 0
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test3(<4 x i64> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test3:
+; CHECK: vptest
+; CHECK-NEXT: jne
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
+ %one = trunc i32 %res to i1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test4(<4 x i64> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test4:
+; CHECK: vptest
+; CHECK-NEXT: jae
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a, <4 x i64> %a) nounwind
+ %one = icmp ne i32 %res, 0
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test6(<4 x i64> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test6:
+; CHECK: vptest
+; CHECK-NEXT: jae
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %a, <4 x i64> %a) nounwind
+ %one = trunc i32 %res to i1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test7(<4 x i64> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test7:
+; CHECK: vptest
+; CHECK-NEXT: jne
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
+ %one = icmp eq i32 %res, 1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test8(<4 x i64> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test8:
+; CHECK: vptest
+; CHECK-NEXT: je
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %a, <4 x i64> %a) nounwind
+ %one = icmp ne i32 %res, 1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+
diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll
index 5534712af832..271fb4250517 100644
--- a/test/CodeGen/X86/block-placement.ll
+++ b/test/CodeGen/X86/block-placement.ll
@@ -524,7 +524,7 @@ entry:
br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
entry.if.then_crit_edge:
- %.pre14 = load i8* undef, align 1, !tbaa !0
+ %.pre14 = load i8* undef, align 1
br label %if.then
lor.lhs.false:
@@ -537,7 +537,7 @@ exit:
if.then:
%0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
%1 = and i8 %0, 1
- store i8 %1, i8* undef, align 4, !tbaa !0
+ store i8 %1, i8* undef, align 4
br label %if.end
if.end:
diff --git a/test/CodeGen/X86/brcond.ll b/test/CodeGen/X86/brcond.ll
index 44670c802b41..bc4032b13cc0 100644
--- a/test/CodeGen/X86/brcond.ll
+++ b/test/CodeGen/X86/brcond.ll
@@ -108,3 +108,150 @@ bb2: ; preds = %entry, %bb1
ret float %.0
}
+declare i32 @llvm.x86.sse41.ptestz(<4 x float> %p1, <4 x float> %p2) nounwind
+declare i32 @llvm.x86.sse41.ptestc(<4 x float> %p1, <4 x float> %p2) nounwind
+
+define <4 x float> @test5(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test5:
+; CHECK: ptest
+; CHECK-NEXT: jne
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.sse41.ptestz(<4 x float> %a, <4 x float> %a) nounwind
+ %one = icmp ne i32 %res, 0
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test7(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test7:
+; CHECK: ptest
+; CHECK-NEXT: jne
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.sse41.ptestz(<4 x float> %a, <4 x float> %a) nounwind
+ %one = trunc i32 %res to i1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test8(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test8:
+; CHECK: ptest
+; CHECK-NEXT: jae
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.sse41.ptestc(<4 x float> %a, <4 x float> %a) nounwind
+ %one = icmp ne i32 %res, 0
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test10(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test10:
+; CHECK: ptest
+; CHECK-NEXT: jae
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.sse41.ptestc(<4 x float> %a, <4 x float> %a) nounwind
+ %one = trunc i32 %res to i1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test11(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test11:
+; CHECK: ptest
+; CHECK-NEXT: jne
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.sse41.ptestz(<4 x float> %a, <4 x float> %a) nounwind
+ %one = icmp eq i32 %res, 1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
+define <4 x float> @test12(<4 x float> %a, <4 x float> %b) nounwind {
+entry:
+; CHECK: test12:
+; CHECK: ptest
+; CHECK-NEXT: je
+; CHECK: ret
+
+ %res = call i32 @llvm.x86.sse41.ptestz(<4 x float> %a, <4 x float> %a) nounwind
+ %one = icmp ne i32 %res, 1
+ br i1 %one, label %bb1, label %bb2
+
+bb1:
+ %c = fadd <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+bb2:
+ %d = fdiv <4 x float> %b, < float 1.000000e+002, float 2.000000e+002, float 3.000000e+002, float 4.000000e+002 >
+ br label %return
+
+return:
+ %e = phi <4 x float> [%c, %bb1], [%d, %bb2]
+ ret <4 x float> %e
+}
+
diff --git a/test/CodeGen/X86/bswap-inline-asm.ll b/test/CodeGen/X86/bswap-inline-asm.ll
index 3bb9124633d6..d69bfa6e7eb7 100644
--- a/test/CodeGen/X86/bswap-inline-asm.ll
+++ b/test/CodeGen/X86/bswap-inline-asm.ll
@@ -1,6 +1,7 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin > %t
-; RUN: not grep InlineAsm %t
-; RUN: FileCheck %s < %t
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck -check-prefix CHK %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
+
+; CHK-NOT: InlineAsm
; CHECK: foo:
; CHECK: bswapq
diff --git a/test/CodeGen/X86/bt.ll b/test/CodeGen/X86/bt.ll
index 39a784dec37d..e28923bb21d2 100644
--- a/test/CodeGen/X86/bt.ll
+++ b/test/CodeGen/X86/bt.ll
@@ -522,11 +522,8 @@ UnifiedReturnBlock: ; preds = %entry
declare void @foo()
-; rdar://12755626
define zeroext i1 @invert(i32 %flags, i32 %flag) nounwind {
-; CHECK: invert
-; CHECK: btl %eax, %ecx
-; CHECK: setae
+; CHECK: btl
entry:
%neg = xor i32 %flags, -1
%shl = shl i32 1, %flag
diff --git a/test/CodeGen/X86/call-imm.ll b/test/CodeGen/X86/call-imm.ll
index 38cda4d14040..8753594df10a 100644
--- a/test/CodeGen/X86/call-imm.ll
+++ b/test/CodeGen/X86/call-imm.ll
@@ -1,11 +1,11 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=static | grep "call.*12345678"
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic | not grep "call.*12345678"
-; RUN: llc < %s -mtriple=i386-pc-linux -relocation-model=dynamic-no-pic | grep "call.*12345678"
+; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=static | FileCheck -check-prefix X86STA %s
+; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic | FileCheck -check-prefix X86PIC %s
+; RUN: llc < %s -mtriple=i386-pc-linux -relocation-model=dynamic-no-pic | FileCheck -check-prefix X86DYN %s
; Call to immediate is not safe on x86-64 unless we *know* that the
; call will be within 32-bits pcrel from the dest immediate.
-; RUN: llc < %s -march=x86-64 | grep "call.*\*%rax"
+; RUN: llc < %s -march=x86-64 | FileCheck -check-prefix X64 %s
; PR3666
; PR3773
@@ -16,3 +16,8 @@ entry:
%0 = call i32 inttoptr (i32 12345678 to i32 (i32)*)(i32 0) nounwind ; <i32> [#uses=1]
ret i32 %0
}
+
+; X86STA: {{call.*12345678}}
+; X86PIC-NOT: {{call.*12345678}}
+; X86DYN: {{call.*12345678}}
+; X64: {{call.*[*]%rax}}
diff --git a/test/CodeGen/X86/coalescer-identity.ll b/test/CodeGen/X86/coalescer-identity.ll
index 9c72ee6296bd..1aac09594c43 100644
--- a/test/CodeGen/X86/coalescer-identity.ll
+++ b/test/CodeGen/X86/coalescer-identity.ll
@@ -12,10 +12,10 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @func() nounwind uwtable ssp {
for.body.lr.ph:
- %0 = load i32* @g2, align 4, !tbaa !0
+ %0 = load i32* @g2, align 4
%tobool6 = icmp eq i32 %0, 0
%s.promoted = load i16* @s, align 2
- %.pre = load i32* @g1, align 4, !tbaa !0
+ %.pre = load i32* @g1, align 4
br i1 %tobool6, label %for.body.us, label %for.body
for.body.us: ; preds = %for.body.lr.ph, %for.inc.us
@@ -43,11 +43,11 @@ for.inc.us: ; preds = %cond.end.us, %for.b
cond.end.us: ; preds = %if.then7.us, %cond.false.us
%4 = phi i32 [ 0, %cond.false.us ], [ %1, %if.then7.us ]
%cond.us = phi i32 [ 0, %cond.false.us ], [ %v.010.us, %if.then7.us ]
- store i32 %cond.us, i32* @g0, align 4, !tbaa !0
+ store i32 %cond.us, i32* @g0, align 4
br label %for.inc.us
cond.false.us: ; preds = %if.then7.us
- store i32 0, i32* @g1, align 4, !tbaa !0
+ store i32 0, i32* @g1, align 4
br label %cond.end.us
if.then7.us: ; preds = %for.body.us
@@ -76,7 +76,3 @@ for.end: ; preds = %for.inc.us, %for.bo
store i16 %dec12.lcssa, i16* @s, align 2
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/code_placement_align_all.ll b/test/CodeGen/X86/code_placement_align_all.ll
new file mode 100644
index 000000000000..1e5e8f780b70
--- /dev/null
+++ b/test/CodeGen/X86/code_placement_align_all.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mcpu=corei7 -mtriple=x86_64-linux -align-all-blocks=16 < %s | FileCheck %s
+
+;CHECK: foo
+;CHECK: .align 65536, 0x90
+;CHECK: .align 65536, 0x90
+;CHECK: .align 65536, 0x90
+;CHECK: ret
+define i32 @foo(i32 %t, i32 %l) nounwind readnone ssp uwtable {
+ %1 = icmp eq i32 %t, 0
+ br i1 %1, label %4, label %2
+
+; <label>:2 ; preds = %0
+ %3 = add nsw i32 %t, 2
+ ret i32 %3
+
+; <label>:4 ; preds = %0
+ %5 = icmp eq i32 %l, 0
+ %. = select i1 %5, i32 0, i32 5
+ ret i32 %.
+}
+
+
diff --git a/test/CodeGen/X86/codegen-prepare.ll b/test/CodeGen/X86/codegen-prepare.ll
new file mode 100644
index 000000000000..e8ee07063531
--- /dev/null
+++ b/test/CodeGen/X86/codegen-prepare.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -mtriple=x86_64-pc-linux | FileCheck %s
+
+; Check that the CodeGenPrepare Pass
+; does not wrongly rewrite the address computed by Instruction %4
+; as [12 + Base:%this].
+
+; This test makes sure that:
+; - both the store and the first load instructions
+; within basic block labeled 'if.then' are not removed.
+; - the store instruction stores a value at address [60 + %this]
+; - the first load instruction loads a value at address [12 + %this]
+
+%class.A = type { %struct.B }
+%struct.B = type { %class.C, %class.D, %class.C, %class.D }
+%class.C = type { float, float, float }
+%class.D = type { [3 x %class.C] }
+
+define linkonce_odr void @foo(%class.A* nocapture %this, i32 %BoolValue) nounwind uwtable {
+entry:
+ %cmp = icmp eq i32 %BoolValue, 0
+ %address1 = getelementptr inbounds %class.A* %this, i64 0, i32 0, i32 3
+ %address2 = getelementptr inbounds %class.A* %this, i64 0, i32 0, i32 1
+ br i1 %cmp, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ %0 = getelementptr inbounds %class.D* %address2, i64 0, i32 0, i64 0, i32 0
+ %1 = load float* %0, align 4
+ %2 = getelementptr inbounds float* %0, i64 3
+ %3 = load float* %2, align 4
+ %4 = getelementptr inbounds %class.D* %address1, i64 0, i32 0, i64 0, i32 0
+ store float %1, float* %4, align 4
+ br label %if.end
+
+if.else: ; preds = %entry
+ br label %if.end
+
+if.end: ; preds = %if.then, %if.else, %entry
+ ret void
+}
+
+; CHECK: foo:
+; CHECK: movss 12([[THIS:%[a-zA-Z0-9]+]]), [[REGISTER:%[a-zA-Z0-9]+]]
+; CHECK-NEXT: movss [[REGISTER]], 60([[THIS]])
+
diff --git a/test/CodeGen/X86/commute-intrinsic.ll b/test/CodeGen/X86/commute-intrinsic.ll
index d810cb1eff78..7d5ca4766892 100644
--- a/test/CodeGen/X86/commute-intrinsic.ll
+++ b/test/CodeGen/X86/commute-intrinsic.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -relocation-model=static | not grep movaps
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -relocation-model=static | FileCheck %s
+
+; CHECK-NOT: movaps
@a = external global <2 x i64> ; <<2 x i64>*> [#uses=1]
diff --git a/test/CodeGen/X86/compact-unwind.ll b/test/CodeGen/X86/compact-unwind.ll
new file mode 100644
index 000000000000..8c4fa27da560
--- /dev/null
+++ b/test/CodeGen/X86/compact-unwind.ll
@@ -0,0 +1,30 @@
+; RUN: llc < %s -disable-cfi -disable-fp-elim -mtriple x86_64-apple-darwin11 | FileCheck %s
+
+%ty = type { i8* }
+
+@gv = external global i32
+
+; This is aligning the stack with a push of a random register.
+; CHECK: pushq %rax
+
+; Even though we can't encode %rax into the compact unwind, We still want to be
+; able to generate a compact unwind encoding in this particular case.
+;
+; CHECK: __LD,__compact_unwind
+; CHECK: _foo ## Range Start
+; CHECK: 16842753 ## Compact Unwind Encoding: 0x1010001
+
+define i8* @foo(i64 %size) {
+ %addr = alloca i64, align 8
+ %tmp20 = load i32* @gv, align 4
+ %tmp21 = call i32 @bar()
+ %tmp25 = load i64* %addr, align 8
+ %tmp26 = inttoptr i64 %tmp25 to %ty*
+ %tmp29 = getelementptr inbounds %ty* %tmp26, i64 0, i32 0
+ %tmp34 = load i8** %tmp29, align 8
+ %tmp35 = getelementptr inbounds i8* %tmp34, i64 %size
+ store i8* %tmp35, i8** %tmp29, align 8
+ ret i8* null
+}
+
+declare i32 @bar()
diff --git a/test/CodeGen/X86/compiler_used.ll b/test/CodeGen/X86/compiler_used.ll
index be8de5e09f8a..d38ce91310b1 100644
--- a/test/CodeGen/X86/compiler_used.ll
+++ b/test/CodeGen/X86/compiler_used.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | grep no_dead_strip | count 1
-; We should have a .no_dead_strip directive for Z but not for X/Y.
+; RUN: llc < %s -mtriple=i386-apple-darwin9 | FileCheck %s
@X = internal global i8 4
@Y = internal global i32 123
@@ -7,3 +6,7 @@
@llvm.used = appending global [1 x i8*] [ i8* @Z ], section "llvm.metadata"
@llvm.compiler_used = appending global [2 x i8*] [ i8* @X, i8* bitcast (i32* @Y to i8*)], section "llvm.metadata"
+
+; CHECK-NOT: .no_dead_strip
+; CHECK: .no_dead_strip _Z
+; CHECK-NOT: .no_dead_strip
diff --git a/test/CodeGen/X86/crash.ll b/test/CodeGen/X86/crash.ll
index 6d2196206e7c..852b642de68d 100644
--- a/test/CodeGen/X86/crash.ll
+++ b/test/CodeGen/X86/crash.ll
@@ -238,7 +238,7 @@ declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
define void @_ZNK4llvm17MipsFrameLowering12emitPrologueERNS_15MachineFunctionE() ssp align 2 {
bb:
- %tmp = load %t9** undef, align 4, !tbaa !0
+ %tmp = load %t9** undef, align 4
%tmp2 = getelementptr inbounds %t9* %tmp, i32 0, i32 0
%tmp3 = getelementptr inbounds %t9* %tmp, i32 0, i32 0, i32 0, i32 0, i32 1
br label %bb4
diff --git a/test/CodeGen/X86/dbg-byval-parameter.ll b/test/CodeGen/X86/dbg-byval-parameter.ll
index aca06a27a1df..719a526cc892 100644
--- a/test/CodeGen/X86/dbg-byval-parameter.ll
+++ b/test/CodeGen/X86/dbg-byval-parameter.ll
@@ -30,7 +30,7 @@ declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
!0 = metadata !{i32 786689, metadata !1, metadata !"my_r0", metadata !2, i32 11, metadata !7, i32 0, null} ; [ DW_TAG_arg_variable ]
!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"foo", metadata !2, i32 11, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i32 0, i1 false, double (%struct.Rect*)* @foo, null, null, null, i32 0} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !19} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 786449, i32 1, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, null, null, metadata !18, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!3 = metadata !{i32 786449, i32 1, metadata !2, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 false, metadata !"", i32 0, null, null, metadata !18, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!4 = metadata !{i32 786453, metadata !19, metadata !2, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
!5 = metadata !{metadata !6, metadata !7}
!6 = metadata !{i32 786468, metadata !19, metadata !2, metadata !"double", i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/X86/dbg-const-int.ll b/test/CodeGen/X86/dbg-const-int.ll
index aabc2068068d..f72729c5fee1 100644
--- a/test/CodeGen/X86/dbg-const-int.ll
+++ b/test/CodeGen/X86/dbg-const-int.ll
@@ -14,7 +14,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, i32 12, metadata !2, metadata !"clang version 3.0 (trunk 132191)", i1 true, metadata !"", i32 0, null, null, metadata !11, null, null} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, i32 12, metadata !2, metadata !"clang version 3.0 (trunk 132191)", i1 true, metadata !"", i32 0, null, null, metadata !11, null, null, null} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"", metadata !2, i32 1, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 0, i1 true, i32 ()* @foo, null, null, metadata !12, i32 0} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !13} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
diff --git a/test/CodeGen/X86/dbg-const.ll b/test/CodeGen/X86/dbg-const.ll
index a9b8f1fdc4f3..5c2e62bc9168 100644
--- a/test/CodeGen/X86/dbg-const.ll
+++ b/test/CodeGen/X86/dbg-const.ll
@@ -20,7 +20,7 @@ declare i32 @bar() nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"foobar", metadata !"foobar", metadata !"foobar", metadata !1, i32 12, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true, i32 ()* @foobar, null, null, metadata !14, i32 0} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !15} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 2.9 (trunk 114183)", i1 true, metadata !"", i32 0, null, null, metadata !13, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, i32 12, metadata !1, metadata !"clang version 2.9 (trunk 114183)", i1 true, metadata !"", i32 0, null, null, metadata !13, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null}
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, metadata !1, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
diff --git a/test/CodeGen/X86/dbg-i128-const.ll b/test/CodeGen/X86/dbg-i128-const.ll
index 17d645757d99..cc612b2ca53e 100644
--- a/test/CodeGen/X86/dbg-i128-const.ll
+++ b/test/CodeGen/X86/dbg-i128-const.ll
@@ -19,7 +19,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!2 = metadata !{i32 786443, metadata !4, metadata !3, i32 26, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
!3 = metadata !{i32 786478, metadata !4, metadata !"__foo", metadata !"__foo", metadata !"__foo", metadata !4, i32 26, metadata !6, i1 false, i1 true, i32 0, i32 0, null, i1 false, i128 (i128, i128)* @__foo, null, null, null, i32 26} ; [ DW_TAG_subprogram ]
!4 = metadata !{i32 786473, metadata !13} ; [ DW_TAG_file_type ]
-!5 = metadata !{i32 786449, i32 1, metadata !4, metadata !"clang", i1 true, metadata !"", i32 0, null, null, metadata !12, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!5 = metadata !{i32 786449, i32 1, metadata !4, metadata !"clang", i1 true, metadata !"", i32 0, null, null, metadata !12, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!6 = metadata !{i32 786453, metadata !13, metadata !4, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !7, i32 0, null} ; [ DW_TAG_subroutine_type ]
!7 = metadata !{metadata !8, metadata !8, metadata !8}
!8 = metadata !{i32 786454, metadata !14, metadata !4, metadata !"ti_int", i32 78, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_typedef ]
diff --git a/test/CodeGen/X86/dbg-large-unsigned-const.ll b/test/CodeGen/X86/dbg-large-unsigned-const.ll
index ff16318efcec..c381cd754cfe 100644
--- a/test/CodeGen/X86/dbg-large-unsigned-const.ll
+++ b/test/CodeGen/X86/dbg-large-unsigned-const.ll
@@ -7,8 +7,8 @@ define zeroext i1 @_Z3iseRKxS0_(i64* nocapture %LHS, i64* nocapture %RHS) nounwi
entry:
tail call void @llvm.dbg.value(metadata !{i64* %LHS}, i64 0, metadata !7), !dbg !13
tail call void @llvm.dbg.value(metadata !{i64* %RHS}, i64 0, metadata !11), !dbg !14
- %tmp1 = load i64* %LHS, align 4, !dbg !15, !tbaa !17
- %tmp3 = load i64* %RHS, align 4, !dbg !15, !tbaa !17
+ %tmp1 = load i64* %LHS, align 4, !dbg !15
+ %tmp3 = load i64* %RHS, align 4, !dbg !15
%cmp = icmp eq i64 %tmp1, %tmp3, !dbg !15
ret i1 %cmp, !dbg !15
}
@@ -47,9 +47,6 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!14 = metadata !{i32 2, i32 49, metadata !1, null}
!15 = metadata !{i32 3, i32 3, metadata !16, null}
!16 = metadata !{i32 786443, metadata !2, metadata !1, i32 2, i32 54, i32 0} ; [ DW_TAG_lexical_block ]
-!17 = metadata !{metadata !"long long", metadata !18}
-!18 = metadata !{metadata !"omnipotent char", metadata !19}
-!19 = metadata !{metadata !"Simple C/C++ TBAA", null}
!20 = metadata !{i32 6, i32 19, metadata !6, null}
!21 = metadata !{i32 786689, metadata !1, metadata !"LHS", metadata !2, i32 16777218, metadata !8, i32 0, metadata !22} ; [ DW_TAG_arg_variable ]
!22 = metadata !{i32 7, i32 10, metadata !23, null}
diff --git a/test/CodeGen/X86/dbg-merge-loc-entry.ll b/test/CodeGen/X86/dbg-merge-loc-entry.ll
index baad6c0b60e6..30d03054a104 100644
--- a/test/CodeGen/X86/dbg-merge-loc-entry.ll
+++ b/test/CodeGen/X86/dbg-merge-loc-entry.ll
@@ -47,7 +47,7 @@ declare %0 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"__udivmodti4", metadata !"__udivmodti4", metadata !"", metadata !1, i32 879, metadata !3, i1 true, i1 true, i32 0, i32 0, null, i32 256, i1 true, null, null, null, null, i32 879} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !29} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !28, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, metadata !"", i32 0, null, null, metadata !28, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !29, metadata !1, metadata !"", i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5, metadata !5, metadata !5, metadata !8}
!5 = metadata !{i32 786454, metadata !30, metadata !6, metadata !"UTItype", i32 166, i64 0, i64 0, i64 0, i32 0, metadata !7} ; [ DW_TAG_typedef ]
diff --git a/test/CodeGen/X86/dbg-prolog-end.ll b/test/CodeGen/X86/dbg-prolog-end.ll
index 26bac2e08286..d1774cc7bcaf 100644
--- a/test/CodeGen/X86/dbg-prolog-end.ll
+++ b/test/CodeGen/X86/dbg-prolog-end.ll
@@ -35,7 +35,7 @@ entry:
!llvm.dbg.cu = !{!0}
!18 = metadata !{metadata !1, metadata !6}
-!0 = metadata !{i32 786449, i32 12, metadata !2, metadata !"clang version 3.0 (trunk 131100)", i1 false, metadata !"", i32 0, null, null, metadata !18, null, metadata !""} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, i32 12, metadata !2, metadata !"clang version 3.0 (trunk 131100)", i1 false, metadata !"", i32 0, null, null, metadata !18, null, null, metadata !""} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 786478, metadata !2, metadata !"foo", metadata !"foo", metadata !"", metadata !2, i32 1, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 false, i32 (i32)* @foo, null, null, null, i32 1} ; [ DW_TAG_subprogram ]
!2 = metadata !{i32 786473, metadata !"/tmp/a.c", metadata !"/private/tmp", metadata !0} ; [ DW_TAG_file_type ]
!3 = metadata !{i32 786453, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
diff --git a/test/CodeGen/X86/dbg-subrange.ll b/test/CodeGen/X86/dbg-subrange.ll
index 6090185dc10e..b08d68a6643d 100644
--- a/test/CodeGen/X86/dbg-subrange.ll
+++ b/test/CodeGen/X86/dbg-subrange.ll
@@ -14,7 +14,7 @@ entry:
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, i32 12, metadata !6, metadata !"clang version 3.1 (trunk 144833)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !11, metadata !""} ; [ DW_TAG_compile_unit ]
+!0 = metadata !{i32 786449, i32 12, metadata !6, metadata !"clang version 3.1 (trunk 144833)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !11, metadata !11, metadata !""} ; [ DW_TAG_compile_unit ]
!1 = metadata !{i32 0}
!3 = metadata !{metadata !5}
!5 = metadata !{i32 720942, metadata !6, metadata !"bar", metadata !"bar", metadata !"", metadata !6, i32 4, metadata !7, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 false, void ()* @bar, null, null, metadata !9} ; [ DW_TAG_subprogram ]
diff --git a/test/CodeGen/X86/dbg-value-dag-combine.ll b/test/CodeGen/X86/dbg-value-dag-combine.ll
index fcbf64f42378..c63235e7ad65 100644
--- a/test/CodeGen/X86/dbg-value-dag-combine.ll
+++ b/test/CodeGen/X86/dbg-value-dag-combine.ll
@@ -27,7 +27,7 @@ entry:
!0 = metadata !{i32 786478, metadata !1, metadata !"__OpenCL_test_kernel", metadata !"__OpenCL_test_kernel", metadata !"__OpenCL_test_kernel", metadata !1, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 0, i1 false, null} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !19} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"clc", i1 false, metadata !"", i32 0, null, null, metadata !18, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, i32 1, metadata !1, metadata !"clc", i1 false, metadata !"", i32 0, null, null, metadata !18, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{null, metadata !5}
!5 = metadata !{i32 786447, metadata !2, metadata !"", null, i32 0, i64 32, i64 32, i64 0, i32 0, metadata !6} ; [ DW_TAG_pointer_type ]
diff --git a/test/CodeGen/X86/dbg-value-isel.ll b/test/CodeGen/X86/dbg-value-isel.ll
index 55be3b1f222b..acc360e90cd2 100644
--- a/test/CodeGen/X86/dbg-value-isel.ll
+++ b/test/CodeGen/X86/dbg-value-isel.ll
@@ -82,7 +82,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"__OpenCL_nbt02_kernel", metadata !"__OpenCL_nbt02_kernel", metadata !"__OpenCL_nbt02_kernel", metadata !1, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 0, i1 false, null} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !20} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !20, i32 1, metadata !"clc", i1 false, metadata !"", i32 0, null, null, metadata !19, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !20, i32 1, metadata !"clc", i1 false, metadata !"", i32 0, null, null, metadata !19, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !20, metadata !1, metadata !"", i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{null, metadata !5}
!5 = metadata !{i32 786447, null, metadata !2, metadata !"", i32 0, i64 32, i64 32, i64 0, i32 0, metadata !6} ; [ DW_TAG_pointer_type ]
diff --git a/test/CodeGen/X86/dbg-value-location.ll b/test/CodeGen/X86/dbg-value-location.ll
index 2a1916f26c97..a6c3e13621c9 100644
--- a/test/CodeGen/X86/dbg-value-location.ll
+++ b/test/CodeGen/X86/dbg-value-location.ll
@@ -49,7 +49,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !1, metadata !"foo", metadata !"foo", metadata !"", i32 19510, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (i32, i64, i8*, i32)* @foo, null, null, null, i32 19510} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !26} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !27, i32 12, metadata !"clang version 2.9 (trunk 124753)", i1 true, metadata !"", i32 0, null, null, metadata !24, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !27, i32 12, metadata !"clang version 2.9 (trunk 124753)", i1 true, metadata !"", i32 0, null, null, metadata !24, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, metadata !2, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
diff --git a/test/CodeGen/X86/dbg-value-range.ll b/test/CodeGen/X86/dbg-value-range.ll
index 6766dbe9edb0..b068bbbe784a 100644
--- a/test/CodeGen/X86/dbg-value-range.ll
+++ b/test/CodeGen/X86/dbg-value-range.ll
@@ -6,7 +6,7 @@ define i32 @bar(%struct.a* nocapture %b) nounwind ssp {
entry:
tail call void @llvm.dbg.value(metadata !{%struct.a* %b}, i64 0, metadata !6), !dbg !13
%tmp1 = getelementptr inbounds %struct.a* %b, i64 0, i32 0, !dbg !14
- %tmp2 = load i32* %tmp1, align 4, !dbg !14, !tbaa !15
+ %tmp2 = load i32* %tmp1, align 4, !dbg !14
tail call void @llvm.dbg.value(metadata !{i32 %tmp2}, i64 0, metadata !11), !dbg !14
%call = tail call i32 (...)* @foo(i32 %tmp2) nounwind , !dbg !18
%add = add nsw i32 %tmp2, 1, !dbg !19
@@ -21,7 +21,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!0 = metadata !{i32 786478, metadata !1, metadata !"bar", metadata !"bar", metadata !"", metadata !1, i32 5, metadata !3, i1 false, i1 true, i32 0, i32 0, i32 0, i32 256, i1 true, i32 (%struct.a*)* @bar, null, null, metadata !21, i32 0} ; [ DW_TAG_subprogram ]
!1 = metadata !{i32 786473, metadata !22} ; [ DW_TAG_file_type ]
-!2 = metadata !{i32 786449, metadata !22, i32 12, metadata !"clang version 2.9 (trunk 122997)", i1 true, metadata !"", i32 0, null, null, metadata !20, null, null} ; [ DW_TAG_compile_unit ]
+!2 = metadata !{i32 786449, metadata !22, i32 12, metadata !"clang version 2.9 (trunk 122997)", i1 true, metadata !"", i32 0, null, null, metadata !20, null, null, null} ; [ DW_TAG_compile_unit ]
!3 = metadata !{i32 786453, metadata !1, metadata !"", metadata !1, i32 0, i64 0, i64 0, i32 0, i32 0, i32 0, metadata !4, i32 0, i32 0} ; [ DW_TAG_subroutine_type ]
!4 = metadata !{metadata !5}
!5 = metadata !{i32 786468, metadata !2, metadata !"int", null, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
@@ -34,9 +34,6 @@ declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
!12 = metadata !{i32 786443, metadata !22, metadata !0, i32 5, i32 22, i32 0} ; [ DW_TAG_lexical_block ]
!13 = metadata !{i32 5, i32 19, metadata !0, null}
!14 = metadata !{i32 6, i32 14, metadata !12, null}
-!15 = metadata !{metadata !"int", metadata !16}
-!16 = metadata !{metadata !"omnipotent char", metadata !17}
-!17 = metadata !{metadata !"Simple C/C++ TBAA", null}
!18 = metadata !{i32 7, i32 2, metadata !12, null}
!19 = metadata !{i32 8, i32 2, metadata !12, null}
!20 = metadata !{metadata !0}
diff --git a/test/CodeGen/X86/fast-cc-merge-stack-adj.ll b/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
index d591f9408b14..5121ed13228d 100644
--- a/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
+++ b/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
@@ -1,5 +1,5 @@
-; RUN: llc < %s -mcpu=generic -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep "add ESP, 8"
+; RUN: llc < %s -mcpu=generic -march=x86 -x86-asm-syntax=intel | FileCheck %s
+; CHECK: add ESP, 8
target triple = "i686-pc-linux-gnu"
diff --git a/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll b/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
index 9233d3f7c1a0..21fae4a82051 100644
--- a/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
+++ b/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -relocation-model=pic < %s | not grep call
+; RUN: llc -O0 -relocation-model=pic < %s | FileCheck %s
+; CHECK-NOT: call
; rdar://8396318
; Don't emit a PIC base register if no addresses are needed.
diff --git a/test/CodeGen/X86/fast-isel-constpool.ll b/test/CodeGen/X86/fast-isel-constpool.ll
index b3adb802a8c5..bbbaeb233919 100644
--- a/test/CodeGen/X86/fast-isel-constpool.ll
+++ b/test/CodeGen/X86/fast-isel-constpool.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -fast-isel | grep "LCPI0_0(%rip)"
+; RUN: llc < %s -fast-isel | FileCheck %s
+; CHECK: LCPI0_0(%rip)
+
; Make sure fast isel uses rip-relative addressing when required.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin9.0"
diff --git a/test/CodeGen/X86/fast-isel-divrem-x86-64.ll b/test/CodeGen/X86/fast-isel-divrem-x86-64.ll
new file mode 100644
index 000000000000..45494f139e24
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-divrem-x86-64.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+
+define i64 @test_sdiv64(i64 %dividend, i64 %divisor) nounwind {
+entry:
+ %result = sdiv i64 %dividend, %divisor
+ ret i64 %result
+}
+
+; CHECK: test_sdiv64:
+; CHECK: cqto
+; CHECK: idivq
+
+define i64 @test_srem64(i64 %dividend, i64 %divisor) nounwind {
+entry:
+ %result = srem i64 %dividend, %divisor
+ ret i64 %result
+}
+
+; CHECK: test_srem64:
+; CHECK: cqto
+; CHECK: idivq
+
+define i64 @test_udiv64(i64 %dividend, i64 %divisor) nounwind {
+entry:
+ %result = udiv i64 %dividend, %divisor
+ ret i64 %result
+}
+
+; CHECK: test_udiv64:
+; CHECK: xorl
+; CHECK: divq
+
+define i64 @test_urem64(i64 %dividend, i64 %divisor) nounwind {
+entry:
+ %result = urem i64 %dividend, %divisor
+ ret i64 %result
+}
+
+; CHECK: test_urem64:
+; CHECK: xorl
+; CHECK: divq
diff --git a/test/CodeGen/X86/fast-isel-divrem.ll b/test/CodeGen/X86/fast-isel-divrem.ll
new file mode 100644
index 000000000000..7aba7f7b7953
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-divrem.ll
@@ -0,0 +1,122 @@
+; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+
+define i8 @test_sdiv8(i8 %dividend, i8 %divisor) nounwind {
+entry:
+ %result = sdiv i8 %dividend, %divisor
+ ret i8 %result
+}
+
+; CHECK: test_sdiv8:
+; CHECK: movsbw
+; CHECK: idivb
+
+define i8 @test_srem8(i8 %dividend, i8 %divisor) nounwind {
+entry:
+ %result = srem i8 %dividend, %divisor
+ ret i8 %result
+}
+
+; CHECK: test_srem8:
+; CHECK: movsbw
+; CHECK: idivb
+
+define i8 @test_udiv8(i8 %dividend, i8 %divisor) nounwind {
+entry:
+ %result = udiv i8 %dividend, %divisor
+ ret i8 %result
+}
+
+; CHECK: test_udiv8:
+; CHECK: movzbw
+; CHECK: divb
+
+define i8 @test_urem8(i8 %dividend, i8 %divisor) nounwind {
+entry:
+ %result = urem i8 %dividend, %divisor
+ ret i8 %result
+}
+
+; CHECK: test_urem8:
+; CHECK: movzbw
+; CHECK: divb
+
+define i16 @test_sdiv16(i16 %dividend, i16 %divisor) nounwind {
+entry:
+ %result = sdiv i16 %dividend, %divisor
+ ret i16 %result
+}
+
+; CHECK: test_sdiv16:
+; CHECK: cwtd
+; CHECK: idivw
+
+define i16 @test_srem16(i16 %dividend, i16 %divisor) nounwind {
+entry:
+ %result = srem i16 %dividend, %divisor
+ ret i16 %result
+}
+
+; CHECK: test_srem16:
+; CHECK: cwtd
+; CHECK: idivw
+
+define i16 @test_udiv16(i16 %dividend, i16 %divisor) nounwind {
+entry:
+ %result = udiv i16 %dividend, %divisor
+ ret i16 %result
+}
+
+; CHECK: test_udiv16:
+; CHECK: xorl
+; CHECK: divw
+
+define i16 @test_urem16(i16 %dividend, i16 %divisor) nounwind {
+entry:
+ %result = urem i16 %dividend, %divisor
+ ret i16 %result
+}
+
+; CHECK: test_urem16:
+; CHECK: xorl
+; CHECK: divw
+
+define i32 @test_sdiv32(i32 %dividend, i32 %divisor) nounwind {
+entry:
+ %result = sdiv i32 %dividend, %divisor
+ ret i32 %result
+}
+
+; CHECK: test_sdiv32:
+; CHECK: cltd
+; CHECK: idivl
+
+define i32 @test_srem32(i32 %dividend, i32 %divisor) nounwind {
+entry:
+ %result = srem i32 %dividend, %divisor
+ ret i32 %result
+}
+
+; CHECK: test_srem32:
+; CHECK: cltd
+; CHECK: idivl
+
+define i32 @test_udiv32(i32 %dividend, i32 %divisor) nounwind {
+entry:
+ %result = udiv i32 %dividend, %divisor
+ ret i32 %result
+}
+
+; CHECK: test_udiv32:
+; CHECK: xorl
+; CHECK: divl
+
+define i32 @test_urem32(i32 %dividend, i32 %divisor) nounwind {
+entry:
+ %result = urem i32 %dividend, %divisor
+ ret i32 %result
+}
+
+; CHECK: test_urem32:
+; CHECK: xorl
+; CHECK: divl
diff --git a/test/CodeGen/X86/fast-isel-fneg.ll b/test/CodeGen/X86/fast-isel-fneg.ll
index f42a4a245bc8..67fdad299369 100644
--- a/test/CodeGen/X86/fast-isel-fneg.ll
+++ b/test/CodeGen/X86/fast-isel-fneg.ll
@@ -1,5 +1,9 @@
; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
-; RUN: llc < %s -fast-isel -march=x86 -mattr=+sse2 | grep xor | count 2
+; RUN: llc < %s -fast-isel -march=x86 -mattr=+sse2 | FileCheck --check-prefix=SSE2 %s
+
+; SSE2: xor
+; SSE2: xor
+; SSE2-NOT: xor
; CHECK: doo:
; CHECK: xor
diff --git a/test/CodeGen/X86/fast-isel-gv.ll b/test/CodeGen/X86/fast-isel-gv.ll
index cb2464e746b1..de7509568907 100644
--- a/test/CodeGen/X86/fast-isel-gv.ll
+++ b/test/CodeGen/X86/fast-isel-gv.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -fast-isel | grep "_kill@GOTPCREL(%rip)"
+; RUN: llc < %s -fast-isel | FileCheck %s
+; CHECK: _kill@GOTPCREL(%rip)
+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-apple-darwin10.0"
@f = global i8 (...)* @kill ; <i8 (...)**> [#uses=1]
diff --git a/test/CodeGen/X86/fast-isel-tailcall.ll b/test/CodeGen/X86/fast-isel-tailcall.ll
index c3e527c4e5b4..79ff79d4bca5 100644
--- a/test/CodeGen/X86/fast-isel-tailcall.ll
+++ b/test/CodeGen/X86/fast-isel-tailcall.ll
@@ -1,4 +1,5 @@
-; RUN: llc < %s -fast-isel -tailcallopt -march=x86 | not grep add
+; RUN: llc < %s -fast-isel -tailcallopt -march=x86 | FileCheck %s
+; CHECK-NOT: add
; PR4154
; On x86, -tailcallopt changes the ABI so the caller shouldn't readjust
diff --git a/test/CodeGen/X86/fast-isel-unaligned-store.ll b/test/CodeGen/X86/fast-isel-unaligned-store.ll
new file mode 100644
index 000000000000..7ce7f676add0
--- /dev/null
+++ b/test/CodeGen/X86/fast-isel-unaligned-store.ll
@@ -0,0 +1,18 @@
+; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+
+define i32 @test_store_32(i32* nocapture %addr, i32 %value) {
+entry:
+ store i32 %value, i32* %addr, align 1
+ ret i32 %value
+}
+
+; CHECK: ret
+
+define i16 @test_store_16(i16* nocapture %addr, i16 %value) {
+entry:
+ store i16 %value, i16* %addr, align 1
+ ret i16 %value
+}
+
+; CHECK: ret
diff --git a/test/CodeGen/X86/fastcall-correct-mangling.ll b/test/CodeGen/X86/fastcall-correct-mangling.ll
index 33b18bb8cc6e..3569d36541f7 100644
--- a/test/CodeGen/X86/fastcall-correct-mangling.ll
+++ b/test/CodeGen/X86/fastcall-correct-mangling.ll
@@ -7,3 +7,8 @@ define x86_fastcallcc void @func(i64 %X, i8 %Y, i8 %G, i16 %Z) {
ret void
}
+define x86_fastcallcc i32 @"\01DoNotMangle"(i32 %a) {
+; CHECK: DoNotMangle:
+entry:
+ ret i32 %a
+}
diff --git a/test/CodeGen/X86/fastcc-2.ll b/test/CodeGen/X86/fastcc-2.ll
index d044a2ad9e84..e11cdd19723a 100644
--- a/test/CodeGen/X86/fastcc-2.ll
+++ b/test/CodeGen/X86/fastcc-2.ll
@@ -1,5 +1,6 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | grep movsd
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | grep mov | count 1
+; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | FileCheck %s
+; CHECK: movsd
+; CHECK-NOT: mov
define i32 @foo() nounwind {
entry:
diff --git a/test/CodeGen/X86/fastcc-byval.ll b/test/CodeGen/X86/fastcc-byval.ll
index f1204d677a55..e6828e42827c 100644
--- a/test/CodeGen/X86/fastcc-byval.ll
+++ b/test/CodeGen/X86/fastcc-byval.ll
@@ -1,4 +1,8 @@
-; RUN: llc < %s -tailcallopt=false | grep "movl[[:space:]]*8(%esp), %eax" | count 2
+; RUN: llc < %s -tailcallopt=false | FileCheck %s
+; CHECK: movl 8(%esp), %eax
+; CHECK: movl 8(%esp), %eax
+; CHECK-NOT: movl 8(%esp), %eax
+
; PR3122
; rdar://6400815
diff --git a/test/CodeGen/X86/fastcc-sret.ll b/test/CodeGen/X86/fastcc-sret.ll
index d45741884c7d..97814dbabdcc 100644
--- a/test/CodeGen/X86/fastcc-sret.ll
+++ b/test/CodeGen/X86/fastcc-sret.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -tailcallopt=false | grep ret | not grep 4
+; RUN: llc < %s -march=x86 -tailcallopt=false | FileCheck %s
%struct.foo = type { [4 x i32] }
@@ -9,6 +9,8 @@ entry:
store i32 1, i32* %tmp3, align 8
ret void
}
+; CHECK: bar
+; CHECK: ret{{[^4]*$}}
@dst = external global i32
@@ -21,3 +23,5 @@ define void @foo() nounwind {
store i32 %tmp6, i32* @dst
ret void
}
+; CHECK: foo
+; CHECK: ret{{[^4]*$}}
diff --git a/test/CodeGen/X86/fastcc3struct.ll b/test/CodeGen/X86/fastcc3struct.ll
index 84f8ef6cf360..98dc2f5a1c78 100644
--- a/test/CodeGen/X86/fastcc3struct.ll
+++ b/test/CodeGen/X86/fastcc3struct.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=x86 -o %t
-; RUN: grep "movl .48, %ecx" %t
-; RUN: grep "movl .24, %edx" %t
-; RUN: grep "movl .12, %eax" %t
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; CHECK: movl {{.}}12, %eax
+; CHECK: movl {{.}}24, %edx
+; CHECK: movl {{.}}48, %ecx
%0 = type { i32, i32, i32 }
diff --git a/test/CodeGen/X86/fold-imm.ll b/test/CodeGen/X86/fold-imm.ll
index f1fcbcfd13b4..16e4786979b9 100644
--- a/test/CodeGen/X86/fold-imm.ll
+++ b/test/CodeGen/X86/fold-imm.ll
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=x86 | grep inc
-; RUN: llc < %s -march=x86 | grep add | grep 4
+; RUN: llc < %s -march=x86 | FileCheck %s
define i32 @test(i32 %X) nounwind {
entry:
@@ -7,8 +6,16 @@ entry:
ret i32 %0
}
+; CHECK: test
+; CHECK: inc
+; CHECK: ret
+
define i32 @test2(i32 %X) nounwind {
entry:
%0 = add i32 %X, 4
ret i32 %0
}
+
+; CHECK: test2
+; CHECK: {{add.*4.*$}}
+; CHECK: ret
diff --git a/test/CodeGen/X86/fp-elim-and-no-fp-elim.ll b/test/CodeGen/X86/fp-elim-and-no-fp-elim.ll
new file mode 100644
index 000000000000..3468a457e95f
--- /dev/null
+++ b/test/CodeGen/X86/fp-elim-and-no-fp-elim.ll
@@ -0,0 +1,32 @@
+; RUN: llc < %s -mtriple x86_64-apple-darwin | FileCheck %s
+
+define void @bar(i32 %argc) #0 {
+; CHECK: bar:
+; CHECK: pushq %rbp
+entry:
+ %conv = sitofp i32 %argc to double
+ %mul = fmul double %conv, 3.792700e+01
+ %conv1 = fptrunc double %mul to float
+ %div = fdiv double 9.273700e+02, %conv
+ %conv3 = fptrunc double %div to float
+ tail call void @foo(float %conv1, float %conv3)
+ ret void
+}
+
+define void @qux(i32 %argc) #1 {
+; CHECK: qux:
+; CHECK-NOT: pushq %rbp
+entry:
+ %conv = sitofp i32 %argc to double
+ %mul = fmul double %conv, 3.792700e+01
+ %conv1 = fptrunc double %mul to float
+ %div = fdiv double 9.273700e+02, %conv
+ %conv3 = fptrunc double %div to float
+ tail call void @foo(float %conv1, float %conv3)
+ ret void
+}
+
+declare void @foo(float, float)
+
+attributes #0 = { "no-frame-pointer-elim"="true" }
+attributes #1 = { "no-frame-pointer-elim"="false" }
diff --git a/test/CodeGen/X86/fp-immediate-shorten.ll b/test/CodeGen/X86/fp-immediate-shorten.ll
index 62d81003a62d..dc59c5a44b4e 100644
--- a/test/CodeGen/X86/fp-immediate-shorten.ll
+++ b/test/CodeGen/X86/fp-immediate-shorten.ll
@@ -1,7 +1,8 @@
;; Test that this FP immediate is stored in the constant pool as a float.
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3 | \
-; RUN: grep ".long.1123418112"
+; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3 | FileCheck %s
+
+; CHECK: {{.long.1123418112}}
define double @D() {
ret double 1.230000e+02
diff --git a/test/CodeGen/X86/fp_load_cast_fold.ll b/test/CodeGen/X86/fp_load_cast_fold.ll
index a160ac694429..72ea12f9430e 100644
--- a/test/CodeGen/X86/fp_load_cast_fold.ll
+++ b/test/CodeGen/X86/fp_load_cast_fold.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | grep fild | not grep ESP
+; RUN: llc < %s -march=x86 | FileCheck %s
define double @short(i16* %P) {
%V = load i16* %P ; <i16> [#uses=1]
@@ -18,3 +18,9 @@ define double @long(i64* %P) {
ret double %V2
}
+; CHECK: long
+; CHECK: fild
+; CHECK-NOT: ESP
+; CHECK-NOT: esp
+; CHECK: {{$}}
+; CHECK: ret
diff --git a/test/CodeGen/X86/long-setcc.ll b/test/CodeGen/X86/long-setcc.ll
index e0165fb01b53..13046d8b3dec 100644
--- a/test/CodeGen/X86/long-setcc.ll
+++ b/test/CodeGen/X86/long-setcc.ll
@@ -1,18 +1,31 @@
-; RUN: llc < %s -march=x86 | grep cmp | count 1
-; RUN: llc < %s -march=x86 | grep shr | count 1
-; RUN: llc < %s -march=x86 | grep xor | count 1
+; RUN: llc < %s -march=x86 | FileCheck %s
define i1 @t1(i64 %x) nounwind {
%B = icmp slt i64 %x, 0
ret i1 %B
}
+; CHECK: t1
+; CHECK: shrl
+; CHECK-NOT: shrl
+; CHECK: ret
+
define i1 @t2(i64 %x) nounwind {
%tmp = icmp ult i64 %x, 4294967296
ret i1 %tmp
}
+; CHECK: t2
+; CHECK: cmp
+; CHECK-NOT: cmp
+; CHECK: ret
+
define i1 @t3(i32 %x) nounwind {
%tmp = icmp ugt i32 %x, -1
ret i1 %tmp
}
+
+; CHECK: t3
+; CHECK: xor
+; CHECK-NOT: xor
+; CHECK: ret
diff --git a/test/CodeGen/X86/lsr-normalization.ll b/test/CodeGen/X86/lsr-normalization.ll
index 932141d0448e..bbf8f010efde 100644
--- a/test/CodeGen/X86/lsr-normalization.ll
+++ b/test/CodeGen/X86/lsr-normalization.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 | grep div | count 1
+; RUN: llc < %s -march=x86-64 | FileCheck %s
; rdar://8168938
; This testcase involves SCEV normalization with the exit value from
@@ -6,6 +6,9 @@
; loop. The expression should be properly normalized and simplified,
; and require only a single division.
+; CHECK: div
+; CHECK-NOT: div
+
%0 = type { %0*, %0* }
@0 = private constant [13 x i8] c"Result: %lu\0A\00" ; <[13 x i8]*> [#uses=1]
diff --git a/test/CodeGen/X86/lsr-static-addr.ll b/test/CodeGen/X86/lsr-static-addr.ll
index 6566f563784d..b2aea90500c4 100644
--- a/test/CodeGen/X86/lsr-static-addr.ll
+++ b/test/CodeGen/X86/lsr-static-addr.ll
@@ -17,7 +17,7 @@
; ATOM-NEXT: movsd A(,%rax,8)
; ATOM-NEXT: mulsd
; ATOM-NEXT: movsd
-; ATOM-NEXT: incq %rax
+; ATOM-NEXT: leaq 1(%rax), %rax
@A = external global [0 x double]
diff --git a/test/CodeGen/X86/misched-copy.ll b/test/CodeGen/X86/misched-copy.ll
new file mode 100644
index 000000000000..0450cfb53908
--- /dev/null
+++ b/test/CodeGen/X86/misched-copy.ll
@@ -0,0 +1,49 @@
+; REQUIRES: asserts
+; RUN: llc < %s -march=x86 -mcpu=core2 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
+;
+; Test scheduling of copy instructions.
+;
+; Argument copies should be hoisted to the top of the block.
+; Return copies should be sunk to the end.
+; MUL_HiLo PhysReg use copies should be just above the mul.
+; MUL_HiLo PhysReg def copies should be just below the mul.
+;
+; CHECK: *** Final schedule for BB#1 ***
+; CHECK-NEXT: %EAX<def> = COPY
+; CHECK: MUL32r %vreg{{[0-9]+}}, %EAX<imp-def>, %EDX<imp-def>, %EFLAGS<imp-def,dead>, %EAX<imp-use>;
+; CHECK-NEXT: COPY %E{{[AD]}}X;
+; CHECK-NEXT: COPY %E{{[AD]}}X;
+; CHECK: DIVSSrm
+define i64 @mulhoist(i32 %a, i32 %b) #0 {
+entry:
+ br label %body
+
+body:
+ %convb = sitofp i32 %b to float
+ ; Generates an iMUL64r to legalize types.
+ %aa = zext i32 %a to i64
+ %mul = mul i64 %aa, 74383
+ ; Do some dependent long latency stuff.
+ %trunc = trunc i64 %mul to i32
+ %convm = sitofp i32 %trunc to float
+ %divm = fdiv float %convm, 0.75
+ ;%addmb = fadd float %divm, %convb
+ ;%divmb = fdiv float %addmb, 0.125
+ ; Do some independent long latency stuff.
+ %conva = sitofp i32 %a to float
+ %diva = fdiv float %conva, 0.75
+ %addab = fadd float %diva, %convb
+ %divab = fdiv float %addab, 0.125
+ br label %end
+
+end:
+ %val = fptosi float %divab to i64
+ %add = add i64 %mul, %val
+ ret i64 %add
+}
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!0 = metadata !{metadata !"float", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/misched-matmul.ll b/test/CodeGen/X86/misched-matmul.ll
index 0f6e442b1a8d..15e8a0ad6f4b 100644
--- a/test/CodeGen/X86/misched-matmul.ll
+++ b/test/CodeGen/X86/misched-matmul.ll
@@ -12,86 +12,86 @@
define void @wrap_mul4(double* nocapture %Out, [4 x double]* nocapture %A, [4 x double]* nocapture %B) #0 {
entry:
%arrayidx1.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 0
- %0 = load double* %arrayidx1.i, align 8, !tbaa !0
+ %0 = load double* %arrayidx1.i, align 8
%arrayidx3.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 0
- %1 = load double* %arrayidx3.i, align 8, !tbaa !0
+ %1 = load double* %arrayidx3.i, align 8
%mul.i = fmul double %0, %1
%arrayidx5.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 1
- %2 = load double* %arrayidx5.i, align 8, !tbaa !0
+ %2 = load double* %arrayidx5.i, align 8
%arrayidx7.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 0
- %3 = load double* %arrayidx7.i, align 8, !tbaa !0
+ %3 = load double* %arrayidx7.i, align 8
%mul8.i = fmul double %2, %3
%add.i = fadd double %mul.i, %mul8.i
%arrayidx10.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 2
- %4 = load double* %arrayidx10.i, align 8, !tbaa !0
+ %4 = load double* %arrayidx10.i, align 8
%arrayidx12.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 0
- %5 = load double* %arrayidx12.i, align 8, !tbaa !0
+ %5 = load double* %arrayidx12.i, align 8
%mul13.i = fmul double %4, %5
%add14.i = fadd double %add.i, %mul13.i
%arrayidx16.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 3
- %6 = load double* %arrayidx16.i, align 8, !tbaa !0
+ %6 = load double* %arrayidx16.i, align 8
%arrayidx18.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 0
- %7 = load double* %arrayidx18.i, align 8, !tbaa !0
+ %7 = load double* %arrayidx18.i, align 8
%mul19.i = fmul double %6, %7
%add20.i = fadd double %add14.i, %mul19.i
%arrayidx25.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 1
- %8 = load double* %arrayidx25.i, align 8, !tbaa !0
+ %8 = load double* %arrayidx25.i, align 8
%mul26.i = fmul double %0, %8
%arrayidx30.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 1
- %9 = load double* %arrayidx30.i, align 8, !tbaa !0
+ %9 = load double* %arrayidx30.i, align 8
%mul31.i = fmul double %2, %9
%add32.i = fadd double %mul26.i, %mul31.i
%arrayidx36.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 1
- %10 = load double* %arrayidx36.i, align 8, !tbaa !0
+ %10 = load double* %arrayidx36.i, align 8
%mul37.i = fmul double %4, %10
%add38.i = fadd double %add32.i, %mul37.i
%arrayidx42.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 1
- %11 = load double* %arrayidx42.i, align 8, !tbaa !0
+ %11 = load double* %arrayidx42.i, align 8
%mul43.i = fmul double %6, %11
%add44.i = fadd double %add38.i, %mul43.i
%arrayidx49.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 2
- %12 = load double* %arrayidx49.i, align 8, !tbaa !0
+ %12 = load double* %arrayidx49.i, align 8
%mul50.i = fmul double %0, %12
%arrayidx54.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 2
- %13 = load double* %arrayidx54.i, align 8, !tbaa !0
+ %13 = load double* %arrayidx54.i, align 8
%mul55.i = fmul double %2, %13
%add56.i = fadd double %mul50.i, %mul55.i
%arrayidx60.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 2
- %14 = load double* %arrayidx60.i, align 8, !tbaa !0
+ %14 = load double* %arrayidx60.i, align 8
%mul61.i = fmul double %4, %14
%add62.i = fadd double %add56.i, %mul61.i
%arrayidx66.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 2
- %15 = load double* %arrayidx66.i, align 8, !tbaa !0
+ %15 = load double* %arrayidx66.i, align 8
%mul67.i = fmul double %6, %15
%add68.i = fadd double %add62.i, %mul67.i
%arrayidx73.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 3
- %16 = load double* %arrayidx73.i, align 8, !tbaa !0
+ %16 = load double* %arrayidx73.i, align 8
%mul74.i = fmul double %0, %16
%arrayidx78.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 3
- %17 = load double* %arrayidx78.i, align 8, !tbaa !0
+ %17 = load double* %arrayidx78.i, align 8
%mul79.i = fmul double %2, %17
%add80.i = fadd double %mul74.i, %mul79.i
%arrayidx84.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 3
- %18 = load double* %arrayidx84.i, align 8, !tbaa !0
+ %18 = load double* %arrayidx84.i, align 8
%mul85.i = fmul double %4, %18
%add86.i = fadd double %add80.i, %mul85.i
%arrayidx90.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 3
- %19 = load double* %arrayidx90.i, align 8, !tbaa !0
+ %19 = load double* %arrayidx90.i, align 8
%mul91.i = fmul double %6, %19
%add92.i = fadd double %add86.i, %mul91.i
%arrayidx95.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 0
- %20 = load double* %arrayidx95.i, align 8, !tbaa !0
+ %20 = load double* %arrayidx95.i, align 8
%mul98.i = fmul double %1, %20
%arrayidx100.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 1
- %21 = load double* %arrayidx100.i, align 8, !tbaa !0
+ %21 = load double* %arrayidx100.i, align 8
%mul103.i = fmul double %3, %21
%add104.i = fadd double %mul98.i, %mul103.i
%arrayidx106.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 2
- %22 = load double* %arrayidx106.i, align 8, !tbaa !0
+ %22 = load double* %arrayidx106.i, align 8
%mul109.i = fmul double %5, %22
%add110.i = fadd double %add104.i, %mul109.i
%arrayidx112.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 3
- %23 = load double* %arrayidx112.i, align 8, !tbaa !0
+ %23 = load double* %arrayidx112.i, align 8
%mul115.i = fmul double %7, %23
%add116.i = fadd double %add110.i, %mul115.i
%mul122.i = fmul double %8, %20
@@ -116,18 +116,18 @@ entry:
%mul187.i = fmul double %19, %23
%add188.i = fadd double %add182.i, %mul187.i
%arrayidx191.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 0
- %24 = load double* %arrayidx191.i, align 8, !tbaa !0
+ %24 = load double* %arrayidx191.i, align 8
%mul194.i = fmul double %1, %24
%arrayidx196.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 1
- %25 = load double* %arrayidx196.i, align 8, !tbaa !0
+ %25 = load double* %arrayidx196.i, align 8
%mul199.i = fmul double %3, %25
%add200.i = fadd double %mul194.i, %mul199.i
%arrayidx202.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 2
- %26 = load double* %arrayidx202.i, align 8, !tbaa !0
+ %26 = load double* %arrayidx202.i, align 8
%mul205.i = fmul double %5, %26
%add206.i = fadd double %add200.i, %mul205.i
%arrayidx208.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 3
- %27 = load double* %arrayidx208.i, align 8, !tbaa !0
+ %27 = load double* %arrayidx208.i, align 8
%mul211.i = fmul double %7, %27
%add212.i = fadd double %add206.i, %mul211.i
%mul218.i = fmul double %8, %24
@@ -152,18 +152,18 @@ entry:
%mul283.i = fmul double %19, %27
%add284.i = fadd double %add278.i, %mul283.i
%arrayidx287.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 0
- %28 = load double* %arrayidx287.i, align 8, !tbaa !0
+ %28 = load double* %arrayidx287.i, align 8
%mul290.i = fmul double %1, %28
%arrayidx292.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 1
- %29 = load double* %arrayidx292.i, align 8, !tbaa !0
+ %29 = load double* %arrayidx292.i, align 8
%mul295.i = fmul double %3, %29
%add296.i = fadd double %mul290.i, %mul295.i
%arrayidx298.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 2
- %30 = load double* %arrayidx298.i, align 8, !tbaa !0
+ %30 = load double* %arrayidx298.i, align 8
%mul301.i = fmul double %5, %30
%add302.i = fadd double %add296.i, %mul301.i
%arrayidx304.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 3
- %31 = load double* %arrayidx304.i, align 8, !tbaa !0
+ %31 = load double* %arrayidx304.i, align 8
%mul307.i = fmul double %7, %31
%add308.i = fadd double %add302.i, %mul307.i
%mul314.i = fmul double %8, %28
@@ -222,7 +222,3 @@ entry:
}
attributes #0 = { noinline nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!0 = metadata !{metadata !"double", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/misched-matrix.ll b/test/CodeGen/X86/misched-matrix.ll
index f5566e5e5de9..4dc95c5e9326 100644
--- a/test/CodeGen/X86/misched-matrix.ll
+++ b/test/CodeGen/X86/misched-matrix.ll
@@ -94,57 +94,57 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx8 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 0
- %tmp = load i32* %arrayidx8, align 4, !tbaa !0
+ %tmp = load i32* %arrayidx8, align 4
%arrayidx12 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 0
- %tmp1 = load i32* %arrayidx12, align 4, !tbaa !0
+ %tmp1 = load i32* %arrayidx12, align 4
%arrayidx8.1 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 1
- %tmp2 = load i32* %arrayidx8.1, align 4, !tbaa !0
+ %tmp2 = load i32* %arrayidx8.1, align 4
%arrayidx12.1 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 0
- %tmp3 = load i32* %arrayidx12.1, align 4, !tbaa !0
+ %tmp3 = load i32* %arrayidx12.1, align 4
%arrayidx8.2 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 2
- %tmp4 = load i32* %arrayidx8.2, align 4, !tbaa !0
+ %tmp4 = load i32* %arrayidx8.2, align 4
%arrayidx12.2 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 0
- %tmp5 = load i32* %arrayidx12.2, align 4, !tbaa !0
+ %tmp5 = load i32* %arrayidx12.2, align 4
%arrayidx8.3 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 3
- %tmp6 = load i32* %arrayidx8.3, align 4, !tbaa !0
+ %tmp6 = load i32* %arrayidx8.3, align 4
%arrayidx12.3 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 0
- %tmp8 = load i32* %arrayidx8, align 4, !tbaa !0
+ %tmp8 = load i32* %arrayidx8, align 4
%arrayidx12.137 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 1
- %tmp9 = load i32* %arrayidx12.137, align 4, !tbaa !0
- %tmp10 = load i32* %arrayidx8.1, align 4, !tbaa !0
+ %tmp9 = load i32* %arrayidx12.137, align 4
+ %tmp10 = load i32* %arrayidx8.1, align 4
%arrayidx12.1.1 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 1
- %tmp11 = load i32* %arrayidx12.1.1, align 4, !tbaa !0
- %tmp12 = load i32* %arrayidx8.2, align 4, !tbaa !0
+ %tmp11 = load i32* %arrayidx12.1.1, align 4
+ %tmp12 = load i32* %arrayidx8.2, align 4
%arrayidx12.2.1 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 1
- %tmp13 = load i32* %arrayidx12.2.1, align 4, !tbaa !0
- %tmp14 = load i32* %arrayidx8.3, align 4, !tbaa !0
+ %tmp13 = load i32* %arrayidx12.2.1, align 4
+ %tmp14 = load i32* %arrayidx8.3, align 4
%arrayidx12.3.1 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 1
- %tmp15 = load i32* %arrayidx12.3.1, align 4, !tbaa !0
- %tmp16 = load i32* %arrayidx8, align 4, !tbaa !0
+ %tmp15 = load i32* %arrayidx12.3.1, align 4
+ %tmp16 = load i32* %arrayidx8, align 4
%arrayidx12.239 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 2
- %tmp17 = load i32* %arrayidx12.239, align 4, !tbaa !0
- %tmp18 = load i32* %arrayidx8.1, align 4, !tbaa !0
+ %tmp17 = load i32* %arrayidx12.239, align 4
+ %tmp18 = load i32* %arrayidx8.1, align 4
%arrayidx12.1.2 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 2
- %tmp19 = load i32* %arrayidx12.1.2, align 4, !tbaa !0
- %tmp20 = load i32* %arrayidx8.2, align 4, !tbaa !0
+ %tmp19 = load i32* %arrayidx12.1.2, align 4
+ %tmp20 = load i32* %arrayidx8.2, align 4
%arrayidx12.2.2 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 2
- %tmp21 = load i32* %arrayidx12.2.2, align 4, !tbaa !0
- %tmp22 = load i32* %arrayidx8.3, align 4, !tbaa !0
+ %tmp21 = load i32* %arrayidx12.2.2, align 4
+ %tmp22 = load i32* %arrayidx8.3, align 4
%arrayidx12.3.2 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 2
- %tmp23 = load i32* %arrayidx12.3.2, align 4, !tbaa !0
- %tmp24 = load i32* %arrayidx8, align 4, !tbaa !0
+ %tmp23 = load i32* %arrayidx12.3.2, align 4
+ %tmp24 = load i32* %arrayidx8, align 4
%arrayidx12.341 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 3
- %tmp25 = load i32* %arrayidx12.341, align 4, !tbaa !0
- %tmp26 = load i32* %arrayidx8.1, align 4, !tbaa !0
+ %tmp25 = load i32* %arrayidx12.341, align 4
+ %tmp26 = load i32* %arrayidx8.1, align 4
%arrayidx12.1.3 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 3
- %tmp27 = load i32* %arrayidx12.1.3, align 4, !tbaa !0
- %tmp28 = load i32* %arrayidx8.2, align 4, !tbaa !0
+ %tmp27 = load i32* %arrayidx12.1.3, align 4
+ %tmp28 = load i32* %arrayidx8.2, align 4
%arrayidx12.2.3 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 3
- %tmp29 = load i32* %arrayidx12.2.3, align 4, !tbaa !0
- %tmp30 = load i32* %arrayidx8.3, align 4, !tbaa !0
+ %tmp29 = load i32* %arrayidx12.2.3, align 4
+ %tmp30 = load i32* %arrayidx8.3, align 4
%arrayidx12.3.3 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 3
- %tmp31 = load i32* %arrayidx12.3.3, align 4, !tbaa !0
- %tmp7 = load i32* %arrayidx12.3, align 4, !tbaa !0
+ %tmp31 = load i32* %arrayidx12.3.3, align 4
+ %tmp7 = load i32* %arrayidx12.3, align 4
%mul = mul nsw i32 %tmp1, %tmp
%mul.1 = mul nsw i32 %tmp3, %tmp2
%mul.2 = mul nsw i32 %tmp5, %tmp4
@@ -174,13 +174,13 @@ for.body: ; preds = %for.body, %entry
%add.2.3 = add nsw i32 %mul.2.3, %add.1.3
%add.3.3 = add nsw i32 %mul.3.3, %add.2.3
%arrayidx16 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 0
- store i32 %add.3, i32* %arrayidx16, align 4, !tbaa !0
+ store i32 %add.3, i32* %arrayidx16, align 4
%arrayidx16.1 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 1
- store i32 %add.3.1, i32* %arrayidx16.1, align 4, !tbaa !0
+ store i32 %add.3.1, i32* %arrayidx16.1, align 4
%arrayidx16.2 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 2
- store i32 %add.3.2, i32* %arrayidx16.2, align 4, !tbaa !0
+ store i32 %add.3.2, i32* %arrayidx16.2, align 4
%arrayidx16.3 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 3
- store i32 %add.3.3, i32* %arrayidx16.3, align 4, !tbaa !0
+ store i32 %add.3.3, i32* %arrayidx16.3, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 4
@@ -189,7 +189,3 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/mmx-pinsrw.ll b/test/CodeGen/X86/mmx-pinsrw.ll
index d9c7c678d1b2..33dd2eb81cfa 100644
--- a/test/CodeGen/X86/mmx-pinsrw.ll
+++ b/test/CodeGen/X86/mmx-pinsrw.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | grep pinsr
+; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s
; PR2562
+; CHECK: pinsr
+
external global i16 ; <i16*>:0 [#uses=1]
external global <4 x i16> ; <<4 x i16>*>:1 [#uses=2]
diff --git a/test/CodeGen/X86/mul-legalize.ll b/test/CodeGen/X86/mul-legalize.ll
index 069737d4d10d..339de3104335 100644
--- a/test/CodeGen/X86/mul-legalize.ll
+++ b/test/CodeGen/X86/mul-legalize.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=x86 | grep 24576
+; RUN: llc < %s -march=x86 | FileCheck %s
; PR2135
+; CHECK: 24576
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
@.str = constant [13 x i8] c"c45531m.adb\00\00"
diff --git a/test/CodeGen/X86/negative_zero.ll b/test/CodeGen/X86/negative_zero.ll
index 29474c21f244..c8c2cd753e08 100644
--- a/test/CodeGen/X86/negative_zero.ll
+++ b/test/CodeGen/X86/negative_zero.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3 | grep fchs
+; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3 | FileCheck %s
+
+; CHECK: fchs
define double @T() {
diff --git a/test/CodeGen/X86/no-compact-unwind.ll b/test/CodeGen/X86/no-compact-unwind.ll
new file mode 100644
index 000000000000..627f7da9f707
--- /dev/null
+++ b/test/CodeGen/X86/no-compact-unwind.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -mtriple x86_64-apple-macosx10.8.0 -disable-cfi | FileCheck %s
+
+%"struct.dyld::MappedRanges" = type { [400 x %struct.anon], %"struct.dyld::MappedRanges"* }
+%struct.anon = type { %class.ImageLoader*, i64, i64 }
+%class.ImageLoader = type { i32 (...)**, i8*, i8*, i32, i64, i64, i32, i32, %"struct.ImageLoader::recursive_lock"*, i16, i16, [4 x i8] }
+%"struct.ImageLoader::recursive_lock" = type { i32, i32 }
+
+@G1 = external hidden global %"struct.dyld::MappedRanges", align 8
+
+declare void @OSMemoryBarrier() optsize
+
+; This compact unwind encoding indicates that we could not generate correct
+; compact unwind encodings for this function. This then defaults to using the
+; DWARF EH frame.
+;
+; CHECK: .section __LD,__compact_unwind,regular,debug
+; CHECK: .quad _func
+; CHECK: .long 67108864 ## Compact Unwind Encoding: 0x4000000
+; CHECK: .quad 0 ## Personality Function
+; CHECK: .quad 0 ## LSDA
+;
+define void @func(%class.ImageLoader* %image) optsize ssp uwtable {
+entry:
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc10, %entry
+ %p.019 = phi %"struct.dyld::MappedRanges"* [ @G1, %entry ], [ %1, %for.inc10 ]
+ br label %for.body3
+
+for.body3: ; preds = %for.inc, %for.cond1.preheader
+ %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
+ %image4 = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
+ %0 = load %class.ImageLoader** %image4, align 8
+ %cmp5 = icmp eq %class.ImageLoader* %0, %image
+ br i1 %cmp5, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body3
+ tail call void @OSMemoryBarrier() optsize
+ store %class.ImageLoader* null, %class.ImageLoader** %image4, align 8
+ br label %for.inc
+
+for.inc: ; preds = %if.then, %for.body3
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 400
+ br i1 %exitcond, label %for.inc10, label %for.body3
+
+for.inc10: ; preds = %for.inc
+ %next = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
+ %1 = load %"struct.dyld::MappedRanges"** %next, align 8
+ %cmp = icmp eq %"struct.dyld::MappedRanges"* %1, null
+ br i1 %cmp, label %for.end11, label %for.cond1.preheader
+
+for.end11: ; preds = %for.inc10
+ ret void
+}
diff --git a/test/CodeGen/X86/nosse-error1.ll b/test/CodeGen/X86/nosse-error1.ll
index 16cbb732af0e..cddff3f2753b 100644
--- a/test/CodeGen/X86/nosse-error1.ll
+++ b/test/CodeGen/X86/nosse-error1.ll
@@ -1,7 +1,10 @@
-; RUN: llvm-as < %s > %t1
-; RUN: not llc -march=x86-64 -mattr=-sse < %t1 2> %t2
-; RUN: grep "SSE register return with SSE disabled" %t2
-; RUN: llc -march=x86-64 < %t1 | grep xmm
+; RUN: llc < %s -march=x86-64 -mattr=-sse 2>&1 | FileCheck --check-prefix NOSSE %s
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; NOSSE: {{SSE register return with SSE disabled}}
+
+; CHECK: xmm
+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"
@f = external global float ; <float*> [#uses=4]
diff --git a/test/CodeGen/X86/nosse-error2.ll b/test/CodeGen/X86/nosse-error2.ll
index 45a5eaf3a415..fc9ba010e19d 100644
--- a/test/CodeGen/X86/nosse-error2.ll
+++ b/test/CodeGen/X86/nosse-error2.ll
@@ -1,7 +1,10 @@
-; RUN: llvm-as < %s > %t1
-; RUN: not llc -march=x86 -mcpu=i686 -mattr=-sse < %t1 2> %t2
-; RUN: grep "SSE register return with SSE disabled" %t2
-; RUN: llc -march=x86 -mcpu=i686 -mattr=+sse < %t1 | grep xmm
+; RUN: llc < %s -march=x86 -mcpu=i686 -mattr=-sse 2>&1 | FileCheck --check-prefix NOSSE %s
+; RUN: llc < %s -march=x86 -mcpu=i686 -mattr=+sse | FileCheck %s
+
+; NOSSE: {{SSE register return with SSE disabled}}
+
+; CHECK: xmm
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-unknown-linux-gnu"
@f = external global float ; <float*> [#uses=4]
diff --git a/test/CodeGen/X86/optimize-max-2.ll b/test/CodeGen/X86/optimize-max-2.ll
index 8851c5b1a305..10ab831c1e3d 100644
--- a/test/CodeGen/X86/optimize-max-2.ll
+++ b/test/CodeGen/X86/optimize-max-2.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep cmov %t | count 2
-; RUN: grep jne %t | count 1
+; RUN: llc < %s -march=x86-64 | grep cmov | count 2
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; CHECK: jne
+; CHECK-NOT: jne
; LSR's OptimizeMax function shouldn't try to eliminate this max, because
; it has three operands.
diff --git a/test/CodeGen/X86/peep-test-2.ll b/test/CodeGen/X86/peep-test-2.ll
index 274517297592..e4bafbb6ffab 100644
--- a/test/CodeGen/X86/peep-test-2.ll
+++ b/test/CodeGen/X86/peep-test-2.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86 | grep testl
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; CHECK: testl
; It's tempting to eliminate the testl instruction here and just use the
; EFLAGS value from the incl, however it can't be known whether the add
diff --git a/test/CodeGen/X86/phys_subreg_coalesce.ll b/test/CodeGen/X86/phys_subreg_coalesce.ll
index 2c855ce8da63..8b2f61e1e2d2 100644
--- a/test/CodeGen/X86/phys_subreg_coalesce.ll
+++ b/test/CodeGen/X86/phys_subreg_coalesce.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+sse2 | not grep movl
+; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+sse2 | FileCheck %s
+
+; CHECK-NOT: movl
%struct.dpoint = type { double, double }
diff --git a/test/CodeGen/X86/pr12889.ll b/test/CodeGen/X86/pr12889.ll
index 331d8f907d58..428e9b760b70 100644
--- a/test/CodeGen/X86/pr12889.ll
+++ b/test/CodeGen/X86/pr12889.ll
@@ -6,13 +6,10 @@ target triple = "x86_64-unknown-linux-gnu"
define void @func() nounwind uwtable {
entry:
- %0 = load i8* @c0, align 1, !tbaa !0
+ %0 = load i8* @c0, align 1
%tobool = icmp ne i8 %0, 0
%conv = zext i1 %tobool to i8
%storemerge = shl nuw nsw i8 %conv, %conv
store i8 %storemerge, i8* @c0, align 1
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/pr2656.ll b/test/CodeGen/X86/pr2656.ll
index f0e31f7f5fdc..1122d2d57114 100644
--- a/test/CodeGen/X86/pr2656.ll
+++ b/test/CodeGen/X86/pr2656.ll
@@ -1,6 +1,9 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep "xorps.*sp" | count 1
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
; PR2656
+; CHECK: {{xorps.*sp}}
+; CHECK-NOT: {{xorps.*sp}}
+
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin9.4.0"
%struct.anon = type <{ float, float }>
diff --git a/test/CodeGen/X86/private-2.ll b/test/CodeGen/X86/private-2.ll
index 8aa744ead8ca..4413cee23b33 100644
--- a/test/CodeGen/X86/private-2.ll
+++ b/test/CodeGen/X86/private-2.ll
@@ -1,7 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | grep L__ZZ20
+; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
; Quote should be outside of private prefix.
; rdar://6855766x
+; CHECK: L__ZZ20
+
%struct.A = type { i32*, i32 }
@"_ZZ20-[Example1 whatever]E4C.91" = private constant %struct.A { i32* null, i32 1 } ; <%struct.A*> [#uses=1]
diff --git a/test/CodeGen/X86/rd-mod-wr-eflags.ll b/test/CodeGen/X86/rd-mod-wr-eflags.ll
index 8ef9b5dec0d5..0bf601bc1c42 100644
--- a/test/CodeGen/X86/rd-mod-wr-eflags.ll
+++ b/test/CodeGen/X86/rd-mod-wr-eflags.ll
@@ -8,9 +8,9 @@ entry:
; CHECK: decq (%{{rdi|rcx}})
; CHECK-NEXT: je
%refcnt = getelementptr inbounds %struct.obj* %o, i64 0, i32 0
- %0 = load i64* %refcnt, align 8, !tbaa !0
+ %0 = load i64* %refcnt, align 8
%dec = add i64 %0, -1
- store i64 %dec, i64* %refcnt, align 8, !tbaa !0
+ store i64 %dec, i64* %refcnt, align 8
%tobool = icmp eq i64 %dec, 0
br i1 %tobool, label %if.end, label %return
@@ -33,12 +33,12 @@ define i32 @test() nounwind uwtable ssp {
entry:
; CHECK: decq
; CHECK-NOT: decq
-%0 = load i64* @c, align 8, !tbaa !0
+%0 = load i64* @c, align 8
%dec.i = add nsw i64 %0, -1
-store i64 %dec.i, i64* @c, align 8, !tbaa !0
+store i64 %dec.i, i64* @c, align 8
%tobool.i = icmp ne i64 %dec.i, 0
%lor.ext.i = zext i1 %tobool.i to i32
-store i32 %lor.ext.i, i32* @a, align 4, !tbaa !3
+store i32 %lor.ext.i, i32* @a, align 4
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i64 0, i64 0), i64 %dec.i) nounwind
ret i32 0
}
@@ -47,12 +47,12 @@ ret i32 0
define i32 @test2() nounwind uwtable ssp {
entry:
; CHECK-NOT: decq ({{.*}})
-%0 = load i64* @c, align 8, !tbaa !0
+%0 = load i64* @c, align 8
%dec.i = add nsw i64 %0, -1
-store i64 %dec.i, i64* @c, align 8, !tbaa !0
+store i64 %dec.i, i64* @c, align 8
%tobool.i = icmp ne i64 %0, 0
%lor.ext.i = zext i1 %tobool.i to i32
-store i32 %lor.ext.i, i32* @a, align 4, !tbaa !3
+store i32 %lor.ext.i, i32* @a, align 4
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i64 0, i64 0), i64 %dec.i) nounwind
ret i32 0
}
@@ -61,11 +61,6 @@ declare i32 @printf(i8* nocapture, ...) nounwind
declare void @free(i8* nocapture) nounwind
-!0 = metadata !{metadata !"long", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"int", metadata !1}
-
%struct.obj2 = type { i64, i32, i16, i8 }
declare void @other(%struct.obj2* ) nounwind;
diff --git a/test/CodeGen/X86/select-with-and-or.ll b/test/CodeGen/X86/select-with-and-or.ll
new file mode 100644
index 000000000000..1ccf30bf2083
--- /dev/null
+++ b/test/CodeGen/X86/select-with-and-or.ll
@@ -0,0 +1,72 @@
+; RUN: opt < %s -O3 | \
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+define <4 x i32> @test1(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> %c, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+; CHECK: test1
+; CHECK: cmpnle
+; CHECK-NEXT: andps
+; CHECK: ret
+}
+
+define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c
+ ret <4 x i32> %r
+; CHECK: test2
+; CHECK: cmpnle
+; CHECK-NEXT: orps
+; CHECK: ret
+}
+
+define <4 x i32> @test3(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> zeroinitializer, <4 x i32> %c
+ ret <4 x i32> %r
+; CHECK: test3
+; CHECK: cmple
+; CHECK-NEXT: andps
+; CHECK: ret
+}
+
+define <4 x i32> @test4(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %r
+; CHECK: test4
+; CHECK: cmple
+; CHECK-NEXT: orps
+; CHECK: ret
+}
+
+define <4 x i32> @test5(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+; CHECK: test5
+; CHECK: cmpnle
+; CHECK-NEXT: ret
+}
+
+define <4 x i32> @test6(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %r
+; CHECK: test6
+; CHECK: cmple
+; CHECK-NEXT: ret
+}
+
+define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) {
+ %f = fcmp ult <4 x float> %a, %b
+ %s = sext <4 x i1> %f to <4 x i32>
+ %l = load <4 x i32>* %p
+ %r = and <4 x i32> %l, %s
+ ret <4 x i32> %r
+; CHECK: test7
+; CHECK: cmpnle
+; CHECK-NEXT: andps
+; CHECK: ret
+}
diff --git a/test/CodeGen/X86/sincos-opt.ll b/test/CodeGen/X86/sincos-opt.ll
index f364d1fc2dc8..333c4663eb0b 100644
--- a/test/CodeGen/X86/sincos-opt.ll
+++ b/test/CodeGen/X86/sincos-opt.ll
@@ -4,6 +4,7 @@
; Combine sin / cos into a single call.
; rdar://13087969
+; rdar://13599493
define float @test1(float %x) nounwind {
entry:
@@ -14,7 +15,8 @@ entry:
; OSX_SINCOS: test1:
; OSX_SINCOS: callq ___sincosf_stret
-; OSX_SINCOS: addss %xmm1, %xmm0
+; OSX_SINCOS: pshufd $1, %xmm0, %xmm1
+; OSX_SINCOS: addss %xmm0, %xmm1
; OSX_NOOPT: test1
; OSX_NOOPT: callq _cosf
diff --git a/test/CodeGen/X86/stdcall.ll b/test/CodeGen/X86/stdcall.ll
index a7c2517e7dbe..73826ed0b29d 100644
--- a/test/CodeGen/X86/stdcall.ll
+++ b/test/CodeGen/X86/stdcall.ll
@@ -1,16 +1,24 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -mtriple="i386-pc-mingw32" < %s | FileCheck %s
; PR5851
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-pc-mingw32"
-
%0 = type { void (...)* }
-@B = global %0 { void (...)* bitcast (void ()* @MyFunc to void (...)*) }, align 4
-; CHECK: _B:
-; CHECK: .long _MyFunc@0
-
define internal x86_stdcallcc void @MyFunc() nounwind {
entry:
+; CHECK: MyFunc@0:
+; CHECK: ret
ret void
}
+
+; PR14410
+define x86_stdcallcc i32 @"\01DoNotMangle"(i32 %a) {
+; CHECK: DoNotMangle:
+; CHECK: ret $4
+entry:
+ ret i32 %a
+}
+
+@B = global %0 { void (...)* bitcast (void ()* @MyFunc to void (...)*) }, align 4
+; CHECK: _B:
+; CHECK: .long _MyFunc@0
+
diff --git a/test/CodeGen/X86/store-fp-constant.ll b/test/CodeGen/X86/store-fp-constant.ll
index 206886bb608f..71df8d3109e6 100644
--- a/test/CodeGen/X86/store-fp-constant.ll
+++ b/test/CodeGen/X86/store-fp-constant.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -march=x86 | not grep rodata
-; RUN: llc < %s -march=x86 | not grep literal
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; CHECK-NOT: rodata
+; CHECK-NOT: literal
+
;
; Check that no FP constants in this testcase ends up in the
; constant pool.
diff --git a/test/CodeGen/X86/subreg-to-reg-1.ll b/test/CodeGen/X86/subreg-to-reg-1.ll
index 4f31ab5a9229..2931bab0cdd1 100644
--- a/test/CodeGen/X86/subreg-to-reg-1.ll
+++ b/test/CodeGen/X86/subreg-to-reg-1.ll
@@ -1,4 +1,7 @@
-; RUN: llc < %s -march=x86-64 | grep "leal .*), %e.*" | count 1
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; CHECK: {{leal .*[)], %e.*}}
+; CHECK-NOT: {{leal .*[)], %e.*}}
; Don't eliminate or coalesce away the explicit zero-extension!
; This is currently using an leal because of a 3-addressification detail,
diff --git a/test/CodeGen/X86/subreg-to-reg-3.ll b/test/CodeGen/X86/subreg-to-reg-3.ll
index 931ae758ac5c..80ab1a2e2494 100644
--- a/test/CodeGen/X86/subreg-to-reg-3.ll
+++ b/test/CodeGen/X86/subreg-to-reg-3.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86-64 | grep imull
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; CHECK: imull
; Don't eliminate or coalesce away the explicit zero-extension!
diff --git a/test/CodeGen/X86/subtarget-feature-change.ll b/test/CodeGen/X86/subtarget-feature-change.ll
index cd677294c669..04d4a7199632 100644
--- a/test/CodeGen/X86/subtarget-feature-change.ll
+++ b/test/CodeGen/X86/subtarget-feature-change.ll
@@ -14,12 +14,12 @@ entry:
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float* %c, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %1 = load float* %arrayidx2, align 4
%mul = fmul float %0, %1
%arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
- store float %mul, float* %arrayidx4, align 4, !tbaa !0
+ store float %mul, float* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -43,12 +43,12 @@ entry:
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float* %c, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %1 = load float* %arrayidx2, align 4
%mul = fmul float %0, %1
%arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
- store float %mul, float* %arrayidx4, align 4, !tbaa !0
+ store float %mul, float* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -60,7 +60,3 @@ for.end:
attributes #0 = { nounwind optsize ssp uwtable "target-cpu"="core2" "target-features"="-sse4a,-avx2,-xop,-fma4,-bmi2,-3dnow,-3dnowa,-pclmul,-sse,-avx,-sse41,-ssse3,+mmx,-rtm,-sse42,-lzcnt,-f16c,-popcnt,-bmi,-aes,-fma,-rdrand,-sse2,-sse3" }
attributes #1 = { nounwind optsize ssp uwtable "target-cpu"="core2" "target-features"="-sse4a,-avx2,-xop,-fma4,-bmi2,-3dnow,-3dnowa,-pclmul,+sse,-avx,-sse41,+ssse3,+mmx,-rtm,-sse42,-lzcnt,-f16c,-popcnt,-bmi,-aes,-fma,-rdrand,+sse2,+sse3" }
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/X86/switch-crit-edge-constant.ll b/test/CodeGen/X86/switch-crit-edge-constant.ll
index 1f2ab0dbcbe9..18f987e72213 100644
--- a/test/CodeGen/X86/switch-crit-edge-constant.ll
+++ b/test/CodeGen/X86/switch-crit-edge-constant.ll
@@ -1,6 +1,8 @@
; PR925
-; RUN: llc < %s -march=x86 | \
-; RUN: grep mov.*str1 | count 1
+; RUN: llc < %s -march=x86 | FileCheck %s
+
+; CHECK: {{mov.*str1}}
+; CHECK-NOT: {{mov.*str1}}
target datalayout = "e-p:32:32"
target triple = "i686-apple-darwin8.7.2"
diff --git a/test/CodeGen/X86/tailcall-64.ll b/test/CodeGen/X86/tailcall-64.ll
index ecc253ba587e..60fe77661797 100644
--- a/test/CodeGen/X86/tailcall-64.ll
+++ b/test/CodeGen/X86/tailcall-64.ll
@@ -50,9 +50,18 @@ define {i64, i64} @test_pair_trivial() {
; CHECK: test_pair_trivial:
; CHECK: jmp _testp ## TAILCALL
+define {i64, i64} @test_pair_notail() {
+ %A = tail call i64 @testi()
+
+ %b = insertvalue {i64, i64} undef, i64 %A, 0
+ %c = insertvalue {i64, i64} %b, i64 %A, 1
+ ret { i64, i64} %c
+}
+; CHECK: test_pair_notail:
+; CHECK-NOT: jmp _testi
-define {i64, i64} @test_pair_trivial_extract() {
+define {i64, i64} @test_pair_extract_trivial() {
%A = tail call { i64, i64} @testp()
%x = extractvalue { i64, i64} %A, 0
%y = extractvalue { i64, i64} %A, 1
@@ -63,10 +72,24 @@ define {i64, i64} @test_pair_trivial_extract() {
ret { i64, i64} %c
}
-; CHECK: test_pair_trivial_extract:
+; CHECK: test_pair_extract_trivial:
; CHECK: jmp _testp ## TAILCALL
-define {i8*, i64} @test_pair_conv_extract() {
+define {i64, i64} @test_pair_extract_notail() {
+ %A = tail call { i64, i64} @testp()
+ %x = extractvalue { i64, i64} %A, 0
+ %y = extractvalue { i64, i64} %A, 1
+
+ %b = insertvalue {i64, i64} undef, i64 %y, 0
+ %c = insertvalue {i64, i64} %b, i64 %x, 1
+
+ ret { i64, i64} %c
+}
+
+; CHECK: test_pair_extract_notail:
+; CHECK-NOT: jmp _testp
+
+define {i8*, i64} @test_pair_extract_conv() {
%A = tail call { i64, i64} @testp()
%x = extractvalue { i64, i64} %A, 0
%y = extractvalue { i64, i64} %A, 1
@@ -79,10 +102,75 @@ define {i8*, i64} @test_pair_conv_extract() {
ret { i8*, i64} %c
}
-; CHECK: test_pair_conv_extract:
+; CHECK: test_pair_extract_conv:
+; CHECK: jmp _testp ## TAILCALL
+
+define {i64, i64} @test_pair_extract_multiple() {
+ %A = tail call { i64, i64} @testp()
+ %x = extractvalue { i64, i64} %A, 0
+ %y = extractvalue { i64, i64} %A, 1
+
+ %b = insertvalue {i64, i64} undef, i64 %x, 0
+ %c = insertvalue {i64, i64} %b, i64 %y, 1
+
+ %x1 = extractvalue { i64, i64} %b, 0
+ %y1 = extractvalue { i64, i64} %c, 1
+
+ %d = insertvalue {i64, i64} undef, i64 %x1, 0
+ %e = insertvalue {i64, i64} %b, i64 %y1, 1
+
+ ret { i64, i64} %e
+}
+
+; CHECK: test_pair_extract_multiple:
+; CHECK: jmp _testp ## TAILCALL
+
+define {i64, i64} @test_pair_extract_undef() {
+ %A = tail call { i64, i64} @testp()
+ %x = extractvalue { i64, i64} %A, 0
+
+ %b = insertvalue {i64, i64} undef, i64 %x, 0
+
+ ret { i64, i64} %b
+}
+
+; CHECK: test_pair_extract_undef:
; CHECK: jmp _testp ## TAILCALL
+declare { i64, { i32, i32 } } @testn()
+
+define {i64, {i32, i32}} @test_nest() {
+ %A = tail call { i64, { i32, i32 } } @testn()
+ %x = extractvalue { i64, { i32, i32}} %A, 0
+ %y = extractvalue { i64, { i32, i32}} %A, 1
+ %y1 = extractvalue { i32, i32} %y, 0
+ %y2 = extractvalue { i32, i32} %y, 1
+
+ %b = insertvalue {i64, {i32, i32}} undef, i64 %x, 0
+ %c1 = insertvalue {i32, i32} undef, i32 %y1, 0
+ %c2 = insertvalue {i32, i32} %c1, i32 %y2, 1
+ %c = insertvalue {i64, {i32, i32}} %b, {i32, i32} %c2, 1
+
+ ret { i64, { i32, i32}} %c
+}
+
+; CHECK: test_nest:
+; CHECK: jmp _testn ## TAILCALL
+
+%struct.A = type { i32 }
+%struct.B = type { %struct.A, i32 }
+
+declare %struct.B* @testu()
+
+define %struct.A* @test_upcast() {
+entry:
+ %A = tail call %struct.B* @testu()
+ %x = getelementptr inbounds %struct.B* %A, i32 0, i32 0
+ ret %struct.A* %x
+}
+; CHECK: test_upcast:
+; CHECK: jmp _testu ## TAILCALL
; PR13006
define { i64, i64 } @crash(i8* %this) {
diff --git a/test/CodeGen/X86/this-return-64.ll b/test/CodeGen/X86/this-return-64.ll
new file mode 100644
index 000000000000..2b26a89e3c87
--- /dev/null
+++ b/test/CodeGen/X86/this-return-64.ll
@@ -0,0 +1,89 @@
+; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s
+
+%struct.A = type { i8 }
+%struct.B = type { i32 }
+%struct.C = type { %struct.B }
+%struct.D = type { %struct.B }
+%struct.E = type { %struct.B }
+
+declare %struct.A* @A_ctor(%struct.A* returned)
+declare %struct.B* @B_ctor(%struct.B* returned, i32)
+
+declare %struct.A* @A_ctor_nothisret(%struct.A*)
+declare %struct.B* @B_ctor_nothisret(%struct.B*, i32)
+
+define %struct.C* @C_ctor(%struct.C* %this, i32 %y) {
+entry:
+; CHECK: C_ctor:
+; CHECK: jmp B_ctor # TAILCALL
+ %0 = getelementptr inbounds %struct.C* %this, i64 0, i32 0
+ %call = tail call %struct.B* @B_ctor(%struct.B* %0, i32 %y)
+ ret %struct.C* %this
+}
+
+define %struct.C* @C_ctor_nothisret(%struct.C* %this, i32 %y) {
+entry:
+; CHECK: C_ctor_nothisret:
+; CHECK-NOT: jmp B_ctor_nothisret
+ %0 = getelementptr inbounds %struct.C* %this, i64 0, i32 0
+ %call = tail call %struct.B* @B_ctor_nothisret(%struct.B* %0, i32 %y)
+ ret %struct.C* %this
+}
+
+define %struct.D* @D_ctor(%struct.D* %this, i32 %y) {
+entry:
+; CHECK: D_ctor:
+; CHECK: movq %rcx, [[SAVETHIS:%r[0-9a-z]+]]
+; CHECK: callq A_ctor
+; CHECK: movq [[SAVETHIS]], %rcx
+; CHECK: jmp B_ctor # TAILCALL
+ %0 = bitcast %struct.D* %this to %struct.A*
+ %call = tail call %struct.A* @A_ctor(%struct.A* %0)
+ %1 = getelementptr inbounds %struct.D* %this, i64 0, i32 0
+ %call2 = tail call %struct.B* @B_ctor(%struct.B* %1, i32 %y)
+; (this next line would never be generated by Clang, actually)
+ %2 = bitcast %struct.A* %call to %struct.D*
+ ret %struct.D* %2
+}
+
+define %struct.D* @D_ctor_nothisret(%struct.D* %this, i32 %y) {
+entry:
+; CHECK: D_ctor_nothisret:
+; CHECK: movq %rcx, [[SAVETHIS:%r[0-9a-z]+]]
+; CHECK: callq A_ctor_nothisret
+; CHECK: movq [[SAVETHIS]], %rcx
+; CHECK-NOT: jmp B_ctor_nothisret
+ %0 = bitcast %struct.D* %this to %struct.A*
+ %call = tail call %struct.A* @A_ctor_nothisret(%struct.A* %0)
+ %1 = getelementptr inbounds %struct.D* %this, i64 0, i32 0
+ %call2 = tail call %struct.B* @B_ctor_nothisret(%struct.B* %1, i32 %y)
+; (this next line would never be generated by Clang, actually)
+ %2 = bitcast %struct.A* %call to %struct.D*
+ ret %struct.D* %2
+}
+
+define %struct.E* @E_ctor(%struct.E* %this, i32 %x) {
+entry:
+; CHECK: E_ctor:
+; CHECK: movq %rcx, [[SAVETHIS:%r[0-9a-z]+]]
+; CHECK: callq B_ctor
+; CHECK: movq [[SAVETHIS]], %rcx
+; CHECK: jmp B_ctor # TAILCALL
+ %b = getelementptr inbounds %struct.E* %this, i64 0, i32 0
+ %call = tail call %struct.B* @B_ctor(%struct.B* %b, i32 %x)
+ %call4 = tail call %struct.B* @B_ctor(%struct.B* %b, i32 %x)
+ ret %struct.E* %this
+}
+
+define %struct.E* @E_ctor_nothisret(%struct.E* %this, i32 %x) {
+entry:
+; CHECK: E_ctor_nothisret:
+; CHECK: movq %rcx, [[SAVETHIS:%r[0-9a-z]+]]
+; CHECK: callq B_ctor_nothisret
+; CHECK: movq [[SAVETHIS]], %rcx
+; CHECK-NOT: jmp B_ctor_nothisret
+ %b = getelementptr inbounds %struct.E* %this, i64 0, i32 0
+ %call = tail call %struct.B* @B_ctor_nothisret(%struct.B* %b, i32 %x)
+ %call4 = tail call %struct.B* @B_ctor_nothisret(%struct.B* %b, i32 %x)
+ ret %struct.E* %this
+}
diff --git a/test/CodeGen/X86/unwindraise.ll b/test/CodeGen/X86/unwindraise.ll
index a438723d9bd4..9bbe98043fd8 100644
--- a/test/CodeGen/X86/unwindraise.ll
+++ b/test/CodeGen/X86/unwindraise.ll
@@ -50,12 +50,12 @@ while.body: ; preds = %uw_update_context.e
]
if.end3: ; preds = %while.body
- %4 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality, align 8, !tbaa !0
+ %4 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality, align 8
%tobool = icmp eq i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)* %4, null
br i1 %tobool, label %if.end13, label %if.then4
if.then4: ; preds = %if.end3
- %5 = load i64* %exception_class, align 8, !tbaa !3
+ %5 = load i64* %exception_class, align 8
%call6 = call i32 %4(i32 1, i32 1, i64 %5, %struct._Unwind_Exception* %exc, %struct._Unwind_Context* %cur_context)
switch i32 %call6, label %do.end21.loopexit46 [
i32 6, label %while.end
@@ -64,7 +64,7 @@ if.then4: ; preds = %if.end3
if.end13: ; preds = %if.then4, %if.end3
call fastcc void @uw_update_context_1(%struct._Unwind_Context* %cur_context, %struct._Unwind_FrameState* %fs)
- %6 = load i64* %retaddr_column.i, align 8, !tbaa !3
+ %6 = load i64* %retaddr_column.i, align 8
%conv.i = trunc i64 %6 to i32
%cmp.i.i.i = icmp slt i32 %conv.i, 18
br i1 %cmp.i.i.i, label %cond.end.i.i.i, label %cond.true.i.i.i
@@ -77,17 +77,17 @@ cond.end.i.i.i: ; preds = %if.end13
%sext.i = shl i64 %6, 32
%idxprom.i.i.i = ashr exact i64 %sext.i, 32
%arrayidx.i.i.i = getelementptr inbounds [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i
- %7 = load i8* %arrayidx.i.i.i, align 1, !tbaa !1
+ %7 = load i8* %arrayidx.i.i.i, align 1
%arrayidx2.i.i.i = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i
- %8 = load i8** %arrayidx2.i.i.i, align 8, !tbaa !0
- %9 = load i64* %flags.i.i.i.i, align 8, !tbaa !3
+ %8 = load i8** %arrayidx2.i.i.i, align 8
+ %9 = load i64* %flags.i.i.i.i, align 8
%and.i.i.i.i = and i64 %9, 4611686018427387904
%tobool.i.i.i = icmp eq i64 %and.i.i.i.i, 0
br i1 %tobool.i.i.i, label %if.end.i.i.i, label %land.lhs.true.i.i.i
land.lhs.true.i.i.i: ; preds = %cond.end.i.i.i
%arrayidx4.i.i.i = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i
- %10 = load i8* %arrayidx4.i.i.i, align 1, !tbaa !1
+ %10 = load i8* %arrayidx4.i.i.i, align 1
%tobool6.i.i.i = icmp eq i8 %10, 0
br i1 %tobool6.i.i.i, label %if.end.i.i.i, label %if.then.i.i.i
@@ -101,7 +101,7 @@ if.end.i.i.i: ; preds = %land.lhs.true.i.i.i
if.then10.i.i.i: ; preds = %if.end.i.i.i
%12 = bitcast i8* %8 to i64*
- %13 = load i64* %12, align 8, !tbaa !3
+ %13 = load i64* %12, align 8
br label %uw_update_context.exit
cond.true14.i.i.i: ; preds = %if.end.i.i.i
@@ -111,16 +111,16 @@ cond.true14.i.i.i: ; preds = %if.end.i.i.i
uw_update_context.exit: ; preds = %if.then10.i.i.i, %if.then.i.i.i
%retval.0.i.i.i = phi i64 [ %11, %if.then.i.i.i ], [ %13, %if.then10.i.i.i ]
%14 = inttoptr i64 %retval.0.i.i.i to i8*
- store i8* %14, i8** %ra.i, align 8, !tbaa !0
+ store i8* %14, i8** %ra.i, align 8
br label %while.body
while.end: ; preds = %if.then4
%private_1 = getelementptr inbounds %struct._Unwind_Exception* %exc, i64 0, i32 2
- store i64 0, i64* %private_1, align 8, !tbaa !3
- %15 = load i8** %ra.i, align 8, !tbaa !0
+ store i64 0, i64* %private_1, align 8
+ %15 = load i8** %ra.i, align 8
%16 = ptrtoint i8* %15 to i64
%private_2 = getelementptr inbounds %struct._Unwind_Exception* %exc, i64 0, i32 3
- store i64 %16, i64* %private_2, align 8, !tbaa !3
+ store i64 %16, i64* %private_2, align 8
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false)
%17 = bitcast %struct._Unwind_FrameState* %fs.i to i8*
call void @llvm.lifetime.start(i64 -1, i8* %17)
@@ -130,21 +130,21 @@ while.end: ; preds = %if.then4
while.body.i: ; preds = %uw_update_context.exit44, %while.end
%call.i = call fastcc i32 @uw_frame_state_for(%struct._Unwind_Context* %cur_context, %struct._Unwind_FrameState* %fs.i)
- %18 = load i8** %ra.i, align 8, !tbaa !0
+ %18 = load i8** %ra.i, align 8
%19 = ptrtoint i8* %18 to i64
- %20 = load i64* %private_2, align 8, !tbaa !3
+ %20 = load i64* %private_2, align 8
%cmp.i = icmp eq i64 %19, %20
%cmp2.i = icmp eq i32 %call.i, 0
br i1 %cmp2.i, label %if.end.i, label %do.end21
if.end.i: ; preds = %while.body.i
- %21 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality.i, align 8, !tbaa !0
+ %21 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality.i, align 8
%tobool.i = icmp eq i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)* %21, null
br i1 %tobool.i, label %if.end12.i, label %if.then3.i
if.then3.i: ; preds = %if.end.i
%or.i = select i1 %cmp.i, i32 6, i32 2
- %22 = load i64* %exception_class, align 8, !tbaa !3
+ %22 = load i64* %exception_class, align 8
%call5.i = call i32 %21(i32 1, i32 %or.i, i64 %22, %struct._Unwind_Exception* %exc, %struct._Unwind_Context* %cur_context)
switch i32 %call5.i, label %do.end21 [
i32 7, label %do.body19
@@ -160,7 +160,7 @@ cond.true.i: ; preds = %if.end12.i
cond.end.i: ; preds = %if.end12.i
call fastcc void @uw_update_context_1(%struct._Unwind_Context* %cur_context, %struct._Unwind_FrameState* %fs.i)
- %23 = load i64* %retaddr_column.i22, align 8, !tbaa !3
+ %23 = load i64* %retaddr_column.i22, align 8
%conv.i23 = trunc i64 %23 to i32
%cmp.i.i.i24 = icmp slt i32 %conv.i23, 18
br i1 %cmp.i.i.i24, label %cond.end.i.i.i33, label %cond.true.i.i.i25
@@ -173,17 +173,17 @@ cond.end.i.i.i33: ; preds = %cond.end.i
%sext.i26 = shl i64 %23, 32
%idxprom.i.i.i27 = ashr exact i64 %sext.i26, 32
%arrayidx.i.i.i28 = getelementptr inbounds [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i27
- %24 = load i8* %arrayidx.i.i.i28, align 1, !tbaa !1
+ %24 = load i8* %arrayidx.i.i.i28, align 1
%arrayidx2.i.i.i29 = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i27
- %25 = load i8** %arrayidx2.i.i.i29, align 8, !tbaa !0
- %26 = load i64* %flags.i.i.i.i, align 8, !tbaa !3
+ %25 = load i8** %arrayidx2.i.i.i29, align 8
+ %26 = load i64* %flags.i.i.i.i, align 8
%and.i.i.i.i31 = and i64 %26, 4611686018427387904
%tobool.i.i.i32 = icmp eq i64 %and.i.i.i.i31, 0
br i1 %tobool.i.i.i32, label %if.end.i.i.i39, label %land.lhs.true.i.i.i36
land.lhs.true.i.i.i36: ; preds = %cond.end.i.i.i33
%arrayidx4.i.i.i34 = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i27
- %27 = load i8* %arrayidx4.i.i.i34, align 1, !tbaa !1
+ %27 = load i8* %arrayidx4.i.i.i34, align 1
%tobool6.i.i.i35 = icmp eq i8 %27, 0
br i1 %tobool6.i.i.i35, label %if.end.i.i.i39, label %if.then.i.i.i37
@@ -197,7 +197,7 @@ if.end.i.i.i39: ; preds = %land.lhs.true.i.i.i
if.then10.i.i.i40: ; preds = %if.end.i.i.i39
%29 = bitcast i8* %25 to i64*
- %30 = load i64* %29, align 8, !tbaa !3
+ %30 = load i64* %29, align 8
br label %uw_update_context.exit44
cond.true14.i.i.i41: ; preds = %if.end.i.i.i39
@@ -207,13 +207,13 @@ cond.true14.i.i.i41: ; preds = %if.end.i.i.i39
uw_update_context.exit44: ; preds = %if.then10.i.i.i40, %if.then.i.i.i37
%retval.0.i.i.i42 = phi i64 [ %28, %if.then.i.i.i37 ], [ %30, %if.then10.i.i.i40 ]
%31 = inttoptr i64 %retval.0.i.i.i42 to i8*
- store i8* %31, i8** %ra.i, align 8, !tbaa !0
+ store i8* %31, i8** %ra.i, align 8
br label %while.body.i
do.body19: ; preds = %if.then3.i
call void @llvm.lifetime.end(i64 -1, i8* %17)
%call20 = call fastcc i64 @uw_install_context_1(%struct._Unwind_Context* %this_context, %struct._Unwind_Context* %cur_context)
- %32 = load i8** %ra.i, align 8, !tbaa !0
+ %32 = load i8** %ra.i, align 8
call void @llvm.eh.return.i64(i64 %call20, i8* %32)
unreachable
@@ -245,8 +245,3 @@ declare fastcc void @uw_update_context_1(%struct._Unwind_Context*, %struct._Unwi
declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"long", metadata !1}
diff --git a/test/CodeGen/X86/v4f32-immediate.ll b/test/CodeGen/X86/v4f32-immediate.ll
index b5ebaa74bd07..68d20a04ecf0 100644
--- a/test/CodeGen/X86/v4f32-immediate.ll
+++ b/test/CodeGen/X86/v4f32-immediate.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=+sse | grep movaps
+; RUN: llc < %s -march=x86 -mattr=+sse | FileCheck %s
+
+; CHECK: movaps
define <4 x float> @foo() {
ret <4 x float> <float 0x4009C9D0A0000000, float 0x4002666660000000, float 0x3FF3333340000000, float 0x3FB99999A0000000>
diff --git a/test/CodeGen/X86/vararg_tailcall.ll b/test/CodeGen/X86/vararg_tailcall.ll
index 73d80ebc1d5f..eeda5e15a168 100644
--- a/test/CodeGen/X86/vararg_tailcall.ll
+++ b/test/CodeGen/X86/vararg_tailcall.ll
@@ -39,7 +39,7 @@ declare void @bar2(i8*, i64) optsize noredzone
; WIN64: callq
define i8* @foo2(i8* %arg) nounwind optsize ssp noredzone {
entry:
- %tmp1 = load i8** @sel, align 8, !tbaa !0
+ %tmp1 = load i8** @sel, align 8
%call = tail call i8* (i8*, i8*, ...)* @x2(i8* %arg, i8* %tmp1) nounwind optsize noredzone
ret i8* %call
}
@@ -52,10 +52,10 @@ declare i8* @x2(i8*, i8*, ...) optsize noredzone
; WIN64: callq
define i8* @foo6(i8* %arg1, i8* %arg2) nounwind optsize ssp noredzone {
entry:
- %tmp2 = load i8** @sel3, align 8, !tbaa !0
- %tmp3 = load i8** @sel4, align 8, !tbaa !0
- %tmp4 = load i8** @sel5, align 8, !tbaa !0
- %tmp5 = load i8** @sel6, align 8, !tbaa !0
+ %tmp2 = load i8** @sel3, align 8
+ %tmp3 = load i8** @sel4, align 8
+ %tmp4 = load i8** @sel5, align 8
+ %tmp5 = load i8** @sel6, align 8
%call = tail call i8* (i8*, i8*, i8*, ...)* @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5) nounwind optsize noredzone
ret i8* %call
}
@@ -68,11 +68,11 @@ declare i8* @x3(i8*, i8*, i8*, ...) optsize noredzone
; WIN64: callq
define i8* @foo7(i8* %arg1, i8* %arg2) nounwind optsize ssp noredzone {
entry:
- %tmp2 = load i8** @sel3, align 8, !tbaa !0
- %tmp3 = load i8** @sel4, align 8, !tbaa !0
- %tmp4 = load i8** @sel5, align 8, !tbaa !0
- %tmp5 = load i8** @sel6, align 8, !tbaa !0
- %tmp6 = load i8** @sel7, align 8, !tbaa !0
+ %tmp2 = load i8** @sel3, align 8
+ %tmp3 = load i8** @sel4, align 8
+ %tmp4 = load i8** @sel5, align 8
+ %tmp5 = load i8** @sel6, align 8
+ %tmp6 = load i8** @sel7, align 8
%call = tail call i8* (i8*, i8*, i8*, i8*, i8*, i8*, i8*, ...)* @x7(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i8* %tmp6) nounwind optsize noredzone
ret i8* %call
}
@@ -85,14 +85,10 @@ declare i8* @x7(i8*, i8*, i8*, i8*, i8*, i8*, i8*, ...) optsize noredzone
; WIN64: callq
define i8* @foo8(i8* %arg1, i8* %arg2) nounwind optsize ssp noredzone {
entry:
- %tmp2 = load i8** @sel3, align 8, !tbaa !0
- %tmp3 = load i8** @sel4, align 8, !tbaa !0
- %tmp4 = load i8** @sel5, align 8, !tbaa !0
- %tmp5 = load i8** @sel6, align 8, !tbaa !0
+ %tmp2 = load i8** @sel3, align 8
+ %tmp3 = load i8** @sel4, align 8
+ %tmp4 = load i8** @sel5, align 8
+ %tmp5 = load i8** @sel6, align 8
%call = tail call i8* (i8*, i8*, i8*, ...)* @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i32 48879, i32 48879) nounwind optsize noredzone
ret i8* %call
}
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/X86/vec_compare.ll b/test/CodeGen/X86/vec_compare.ll
index b6d91a3f770e..fd5c234bb160 100644
--- a/test/CodeGen/X86/vec_compare.ll
+++ b/test/CodeGen/X86/vec_compare.ll
@@ -65,3 +65,159 @@ define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) nounwind {
%D = sext <2 x i1> %C to <2 x i64>
ret <2 x i64> %D
}
+
+define <2 x i64> @test7(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: [[CONSTSEG:[A-Z0-9_]*]]:
+; CHECK: .long 2147483648
+; CHECK-NEXT: .long 0
+; CHECK-NEXT: .long 2147483648
+; CHECK-NEXT: .long 0
+; CHECK: test7:
+; CHECK: movdqa [[CONSTSEG]], [[CONSTREG:%xmm[0-9]*]]
+; CHECK: pxor [[CONSTREG]]
+; CHECK: pxor [[CONSTREG]]
+; CHECK: pcmpgtd %xmm1
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: ret
+ %C = icmp sgt <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test8(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: test8:
+; CHECK: pxor
+; CHECK: pxor
+; CHECK: pcmpgtd %xmm0
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: ret
+ %C = icmp slt <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test9(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: test9:
+; CHECK: pxor
+; CHECK: pxor
+; CHECK: pcmpgtd %xmm0
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: pcmpeqd
+; CHECK: pxor
+; CHECK: ret
+ %C = icmp sge <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test10(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: test10:
+; CHECK: pxor
+; CHECK: pxor
+; CHECK: pcmpgtd %xmm1
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: pcmpeqd
+; CHECK: pxor
+; CHECK: ret
+ %C = icmp sle <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test11(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: [[CONSTSEG:[A-Z0-9_]*]]:
+; CHECK: .long 2147483648
+; CHECK-NEXT: .long 2147483648
+; CHECK-NEXT: .long 2147483648
+; CHECK-NEXT: .long 2147483648
+; CHECK: test11:
+; CHECK: movdqa [[CONSTSEG]], [[CONSTREG:%xmm[0-9]*]]
+; CHECK: pxor [[CONSTREG]]
+; CHECK: pxor [[CONSTREG]]
+; CHECK: pcmpgtd %xmm1
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: ret
+ %C = icmp ugt <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test12(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: test12:
+; CHECK: pxor
+; CHECK: pxor
+; CHECK: pcmpgtd %xmm0
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: ret
+ %C = icmp ult <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test13(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: test13:
+; CHECK: pxor
+; CHECK: pxor
+; CHECK: pcmpgtd %xmm0
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: pcmpeqd
+; CHECK: pxor
+; CHECK: ret
+ %C = icmp uge <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
+
+define <2 x i64> @test14(<2 x i64> %A, <2 x i64> %B) nounwind {
+; CHECK: test14:
+; CHECK: pxor
+; CHECK: pxor
+; CHECK: pcmpgtd %xmm1
+; CHECK: pshufd $-96
+; CHECK: pcmpeqd
+; CHECK: pshufd $-11
+; CHECK: pand
+; CHECK: pshufd $-11
+; CHECK: por
+; CHECK: pcmpeqd
+; CHECK: pxor
+; CHECK: ret
+ %C = icmp ule <2 x i64> %A, %B
+ %D = sext <2 x i1> %C to <2 x i64>
+ ret <2 x i64> %D
+}
diff --git a/test/CodeGen/X86/vec_set-9.ll b/test/CodeGen/X86/vec_set-9.ll
index b8ec0cf08095..6979f6bb1c26 100644
--- a/test/CodeGen/X86/vec_set-9.ll
+++ b/test/CodeGen/X86/vec_set-9.ll
@@ -1,5 +1,10 @@
-; RUN: llc < %s -march=x86-64 | grep movd | count 1
-; RUN: llc < %s -march=x86-64 | grep "movlhps.*%xmm0, %xmm0"
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; CHECK: test3
+; CHECK: movd
+; CHECK-NOT: movd
+; CHECK: {{movlhps.*%xmm0, %xmm0}}
+; CHECK-NEXT: ret
define <2 x i64> @test3(i64 %A) nounwind {
entry:
diff --git a/test/CodeGen/X86/vec_set-B.ll b/test/CodeGen/X86/vec_set-B.ll
index f5b3e8baa33a..5578ecaf0007 100644
--- a/test/CodeGen/X86/vec_set-B.ll
+++ b/test/CodeGen/X86/vec_set-B.ll
@@ -1,6 +1,8 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep movaps
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
; RUN: llc < %s -march=x86 -mattr=+sse2 | grep esp | count 2
+; CHECK-NOT: movaps
+
; These should both generate something like this:
;_test3:
; movl $1234567, %eax
diff --git a/test/CodeGen/X86/vec_set-D.ll b/test/CodeGen/X86/vec_set-D.ll
index 3d6369e1c76a..9c1e1acf0bab 100644
--- a/test/CodeGen/X86/vec_set-D.ll
+++ b/test/CodeGen/X86/vec_set-D.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+
+; CHECK: movq
define <4 x i32> @t(i32 %x, i32 %y) nounwind {
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
diff --git a/test/CodeGen/X86/vec_set-I.ll b/test/CodeGen/X86/vec_set-I.ll
index 64f36f99e4d2..c5d6ab88a35d 100644
--- a/test/CodeGen/X86/vec_set-I.ll
+++ b/test/CodeGen/X86/vec_set-I.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep xorp
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
+
+; CHECK-NOT: xorp
+; CHECK: movd
+; CHECK-NOT: xorp
define void @t1() nounwind {
%tmp298.i.i = load <4 x float>* null, align 16
diff --git a/test/CodeGen/X86/vec_shuffle-28.ll b/test/CodeGen/X86/vec_shuffle-28.ll
index 343685bf8ad2..ebf557762cb9 100644
--- a/test/CodeGen/X86/vec_shuffle-28.ll
+++ b/test/CodeGen/X86/vec_shuffle-28.ll
@@ -1,5 +1,7 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 -o %t
-; RUN: grep pshufb %t | count 1
+; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s
+
+; CHECK: pshufb
+; CHECK-NOT: pshufb
; FIXME: this test has a superfluous punpcklqdq pre-pshufb currently.
; Don't XFAIL it because it's still better than the previous code.
diff --git a/test/CodeGen/X86/vec_zero_cse.ll b/test/CodeGen/X86/vec_zero_cse.ll
index 41ea0245ed86..bda3feff2b00 100644
--- a/test/CodeGen/X86/vec_zero_cse.ll
+++ b/test/CodeGen/X86/vec_zero_cse.ll
@@ -1,7 +1,13 @@
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep xorps | count 1
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 1
+; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | FileCheck %s
+; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | FileCheck -check-prefix CHECK2 %s
; 64-bit stores here do not use MMX.
+; CHECK: xorps
+; CHECK-NOT: xorps
+
+; CHECK2: pcmpeqd
+; CHECK2-NOT: pcmpeqd
+
@M1 = external global <1 x i64>
@M2 = external global <2 x i32>
diff --git a/test/CodeGen/X86/vector.ll b/test/CodeGen/X86/vector.ll
index 46b0e1890f11..82d20a23f357 100644
--- a/test/CodeGen/X86/vector.ll
+++ b/test/CodeGen/X86/vector.ll
@@ -1,6 +1,6 @@
; Test that vectors are scalarized/lowered correctly.
-; RUN: llc < %s -march=x86 -mcpu=i386 > %t
-; RUN: llc < %s -march=x86 -mcpu=yonah >> %t
+; RUN: llc < %s -march=x86 -mcpu=i386
+; RUN: llc < %s -march=x86 -mcpu=yonah
%d8 = type <8 x double>
%f1 = type <1 x float>
diff --git a/test/CodeGen/X86/viabs.ll b/test/CodeGen/X86/viabs.ll
new file mode 100644
index 000000000000..f748a14836c8
--- /dev/null
+++ b/test/CodeGen/X86/viabs.ll
@@ -0,0 +1,183 @@
+; RUN: llc < %s -march=x86-64 -mcpu=x86-64 | FileCheck %s -check-prefix=SSE2
+; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s -check-prefix=SSSE3
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=AVX2
+
+define <4 x i32> @test1(<4 x i32> %a) nounwind {
+; SSE2: test1:
+; SSE2: movdqa
+; SSE2: psrad $31
+; SSE2-NEXT: padd
+; SSE2-NEXT: pxor
+; SSE2-NEXT: ret
+
+; SSSE3: test1:
+; SSSE3: pabsd
+; SSSE3-NEXT: ret
+
+; AVX2: test1:
+; AVX2: vpabsd
+; AVX2-NEXT: ret
+ %tmp1neg = sub <4 x i32> zeroinitializer, %a
+ %b = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
+ ret <4 x i32> %abs
+}
+
+define <4 x i32> @test2(<4 x i32> %a) nounwind {
+; SSE2: test2:
+; SSE2: movdqa
+; SSE2: psrad $31
+; SSE2-NEXT: padd
+; SSE2-NEXT: pxor
+; SSE2-NEXT: ret
+
+; SSSE3: test2:
+; SSSE3: pabsd
+; SSSE3-NEXT: ret
+
+; AVX2: test2:
+; AVX2: vpabsd
+; AVX2-NEXT: ret
+ %tmp1neg = sub <4 x i32> zeroinitializer, %a
+ %b = icmp sge <4 x i32> %a, zeroinitializer
+ %abs = select <4 x i1> %b, <4 x i32> %a, <4 x i32> %tmp1neg
+ ret <4 x i32> %abs
+}
+
+define <8 x i16> @test3(<8 x i16> %a) nounwind {
+; SSE2: test3:
+; SSE2: movdqa
+; SSE2: psraw $15
+; SSE2-NEXT: padd
+; SSE2-NEXT: pxor
+; SSE2-NEXT: ret
+
+; SSSE3: test3:
+; SSSE3: pabsw
+; SSSE3-NEXT: ret
+
+; AVX2: test3:
+; AVX2: vpabsw
+; AVX2-NEXT: ret
+ %tmp1neg = sub <8 x i16> zeroinitializer, %a
+ %b = icmp sgt <8 x i16> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i16> %a, <8 x i16> %tmp1neg
+ ret <8 x i16> %abs
+}
+
+define <16 x i8> @test4(<16 x i8> %a) nounwind {
+; SSE2: test4:
+; SSE2: pxor
+; SSE2: pcmpgtb
+; SSE2-NEXT: padd
+; SSE2-NEXT: pxor
+; SSE2-NEXT: ret
+
+; SSSE3: test4:
+; SSSE3: pabsb
+; SSSE3-NEXT: ret
+
+; AVX2: test4:
+; AVX2: vpabsb
+; AVX2-NEXT: ret
+ %tmp1neg = sub <16 x i8> zeroinitializer, %a
+ %b = icmp slt <16 x i8> %a, zeroinitializer
+ %abs = select <16 x i1> %b, <16 x i8> %tmp1neg, <16 x i8> %a
+ ret <16 x i8> %abs
+}
+
+define <4 x i32> @test5(<4 x i32> %a) nounwind {
+; SSE2: test5:
+; SSE2: movdqa
+; SSE2: psrad $31
+; SSE2-NEXT: padd
+; SSE2-NEXT: pxor
+; SSE2-NEXT: ret
+
+; SSSE3: test5:
+; SSSE3: pabsd
+; SSSE3-NEXT: ret
+
+; AVX2: test5:
+; AVX2: vpabsd
+; AVX2-NEXT: ret
+ %tmp1neg = sub <4 x i32> zeroinitializer, %a
+ %b = icmp sle <4 x i32> %a, zeroinitializer
+ %abs = select <4 x i1> %b, <4 x i32> %tmp1neg, <4 x i32> %a
+ ret <4 x i32> %abs
+}
+
+define <8 x i32> @test6(<8 x i32> %a) nounwind {
+; SSSE3: test6:
+; SSSE3: pabsd
+; SSSE3: pabsd
+; SSSE3-NEXT: ret
+
+; AVX2: test6:
+; AVX2: vpabsd {{.*}}%ymm
+; AVX2-NEXT: ret
+ %tmp1neg = sub <8 x i32> zeroinitializer, %a
+ %b = icmp sgt <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
+ %abs = select <8 x i1> %b, <8 x i32> %a, <8 x i32> %tmp1neg
+ ret <8 x i32> %abs
+}
+
+define <8 x i32> @test7(<8 x i32> %a) nounwind {
+; SSSE3: test7:
+; SSSE3: pabsd
+; SSSE3: pabsd
+; SSSE3-NEXT: ret
+
+; AVX2: test7:
+; AVX2: vpabsd {{.*}}%ymm
+; AVX2-NEXT: ret
+ %tmp1neg = sub <8 x i32> zeroinitializer, %a
+ %b = icmp sge <8 x i32> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i32> %a, <8 x i32> %tmp1neg
+ ret <8 x i32> %abs
+}
+
+define <16 x i16> @test8(<16 x i16> %a) nounwind {
+; SSSE3: test8:
+; SSSE3: pabsw
+; SSSE3: pabsw
+; SSSE3-NEXT: ret
+
+; AVX2: test8:
+; AVX2: vpabsw {{.*}}%ymm
+; AVX2-NEXT: ret
+ %tmp1neg = sub <16 x i16> zeroinitializer, %a
+ %b = icmp sgt <16 x i16> %a, zeroinitializer
+ %abs = select <16 x i1> %b, <16 x i16> %a, <16 x i16> %tmp1neg
+ ret <16 x i16> %abs
+}
+
+define <32 x i8> @test9(<32 x i8> %a) nounwind {
+; SSSE3: test9:
+; SSSE3: pabsb
+; SSSE3: pabsb
+; SSSE3-NEXT: ret
+
+; AVX2: test9:
+; AVX2: vpabsb {{.*}}%ymm
+; AVX2-NEXT: ret
+ %tmp1neg = sub <32 x i8> zeroinitializer, %a
+ %b = icmp slt <32 x i8> %a, zeroinitializer
+ %abs = select <32 x i1> %b, <32 x i8> %tmp1neg, <32 x i8> %a
+ ret <32 x i8> %abs
+}
+
+define <8 x i32> @test10(<8 x i32> %a) nounwind {
+; SSSE3: test10:
+; SSSE3: pabsd
+; SSSE3: pabsd
+; SSSE3-NEXT: ret
+
+; AVX2: test10:
+; AVX2: vpabsd {{.*}}%ymm
+; AVX2-NEXT: ret
+ %tmp1neg = sub <8 x i32> zeroinitializer, %a
+ %b = icmp sle <8 x i32> %a, zeroinitializer
+ %abs = select <8 x i1> %b, <8 x i32> %tmp1neg, <8 x i32> %a
+ ret <8 x i32> %abs
+}
diff --git a/test/CodeGen/X86/win32_sret.ll b/test/CodeGen/X86/win32_sret.ll
index 52b987e2be65..2bfe5fb1007b 100644
--- a/test/CodeGen/X86/win32_sret.ll
+++ b/test/CodeGen/X86/win32_sret.ll
@@ -1,7 +1,9 @@
-; RUN: llc < %s -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN32
+; We specify -mcpu explicitly to avoid instruction reordering that happens on
+; some setups (e.g., Atom) from affecting the output.
+; RUN: llc < %s -mcpu=core2 -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN32
; RUN: llc < %s -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X86
; RUN: llc < %s -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
-; RUN: llc < %s -O0 -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN32
+; RUN: llc < %s -mcpu=core2 -O0 -mtriple=i686-pc-win32 | FileCheck %s -check-prefix=WIN32
; RUN: llc < %s -O0 -mtriple=i686-pc-mingw32 | FileCheck %s -check-prefix=MINGW_X86
; RUN: llc < %s -O0 -mtriple=i386-pc-linux | FileCheck %s -check-prefix=LINUX
@@ -117,11 +119,8 @@ entry:
; WIN32: movl %eax, (%e{{[sc][px]}})
; The this pointer goes to ECX.
-; FIXME: for some reason, the below checks fail on the Ubuntu Atom D2700 bot.
-; FIXME-NEXT: leal {{[0-9]+}}(%esp), %ecx
-; FIXME-NEXT: calll "?foo@C5@@QAE?AUS5@@XZ"
-
-; WIN32: calll "?foo@C5@@QAE?AUS5@@XZ"
+; WIN32-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; WIN32-NEXT: calll "?foo@C5@@QAE?AUS5@@XZ"
; WIN32: ret
ret void
}
diff --git a/test/CodeGen/X86/x86-64-frameaddr.ll b/test/CodeGen/X86/x86-64-frameaddr.ll
index 57163d3c6839..7d36a7af6aaa 100644
--- a/test/CodeGen/X86/x86-64-frameaddr.ll
+++ b/test/CodeGen/X86/x86-64-frameaddr.ll
@@ -1,4 +1,9 @@
-; RUN: llc < %s -march=x86-64 | grep movq | grep rbp
+; RUN: llc < %s -march=x86-64 | FileCheck %s
+
+; CHECK: stack_end_address
+; CHECK: {{movq.+rbp.*$}}
+; CHECK: {{movq.+rbp.*$}}
+; CHECK: ret
define i64* @stack_end_address() nounwind {
entry:
diff --git a/test/CodeGen/X86/x86-64-pic-3.ll b/test/CodeGen/X86/x86-64-pic-3.ll
index ba933788a3a5..1b0ddc6fe5ad 100644
--- a/test/CodeGen/X86/x86-64-pic-3.ll
+++ b/test/CodeGen/X86/x86-64-pic-3.ll
@@ -1,6 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep "callq f" %t1
-; RUN: not grep "callq f@PLT" %t1
+; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic | FileCheck %s
+
+
+; CHECK-NOT: {{callq f@PLT}}
+; CHECK: {{callq f}}
+; CHECK-NOT: {{callq f@PLT}}
define void @g() {
entry:
diff --git a/test/CodeGen/X86/x86-64-shortint.ll b/test/CodeGen/X86/x86-64-shortint.ll
index cbf658888ced..75f89023509d 100644
--- a/test/CodeGen/X86/x86-64-shortint.ll
+++ b/test/CodeGen/X86/x86-64-shortint.ll
@@ -1,4 +1,6 @@
-; RUN: llc < %s | grep movswl
+; RUN: llc < %s | FileCheck %s
+
+; CHECK: movswl
target datalayout = "e-p:64:64"
target triple = "x86_64-apple-darwin8"
diff --git a/test/CodeGen/X86/zext-extract_subreg.ll b/test/CodeGen/X86/zext-extract_subreg.ll
index 4f1dde3c4f0e..168b898f12bd 100644
--- a/test/CodeGen/X86/zext-extract_subreg.ll
+++ b/test/CodeGen/X86/zext-extract_subreg.ll
@@ -6,7 +6,7 @@ entry:
br i1 undef, label %return, label %if.end.i
if.end.i: ; preds = %entry
- %tmp7.i = load i32* undef, align 4, !tbaa !0
+ %tmp7.i = load i32* undef, align 4
br i1 undef, label %return, label %if.end
if.end: ; preds = %if.end.i
@@ -55,7 +55,3 @@ cond.false280: ; preds = %cond.true225
return: ; preds = %if.end.i, %entry
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/CodeGen/X86/zext-inreg-0.ll b/test/CodeGen/X86/zext-inreg-0.ll
index ae6221af9d81..688b88db526a 100644
--- a/test/CodeGen/X86/zext-inreg-0.ll
+++ b/test/CodeGen/X86/zext-inreg-0.ll
@@ -1,9 +1,12 @@
-; RUN: llc < %s -march=x86 | not grep and
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: not grep and %t
-; RUN: not grep movzbq %t
-; RUN: not grep movzwq %t
-; RUN: not grep movzlq %t
+; RUN: llc < %s -march=x86 | FileCheck -check-prefix=X86 %s
+; RUN: llc < %s -march=x86-64 | FileCheck -check-prefix=X64 %s
+
+; X86-NOT: and
+
+; X64-NOT: and
+; X64-NOT: movzbq
+; X64-NOT: movzwq
+; X64-NOT: movzlq
; These should use movzbl instead of 'and 255'.
; This related to not having a ZERO_EXTEND_REG opcode.
diff --git a/test/CodeGen/XCore/global_negative_offset.ll b/test/CodeGen/XCore/offset_folding.ll
index 0328fb0460f3..30edfe695c3f 100644
--- a/test/CodeGen/XCore/global_negative_offset.ll
+++ b/test/CodeGen/XCore/offset_folding.ll
@@ -1,23 +1,40 @@
; RUN: llc < %s -march=xcore | FileCheck %s
-; Don't fold negative offsets into cp / dp accesses to avoid a relocation
-; error if the address + addend is less than the start of the cp / dp.
-
@a = external constant [0 x i32], section ".cp.rodata"
@b = external global [0 x i32]
-define i32 *@f() nounwind {
+define i32 *@f1() nounwind {
+entry:
+; CHECK: f1:
+; CHECK: ldaw r11, cp[a+4]
+; CHECK: mov r0, r11
+ %0 = getelementptr [0 x i32]* @a, i32 0, i32 1
+ ret i32* %0
+}
+
+define i32 *@f2() nounwind {
+entry:
+; CHECK: f2:
+; CHECK: ldaw r0, dp[b+4]
+ %0 = getelementptr [0 x i32]* @b, i32 0, i32 1
+ ret i32* %0
+}
+
+; Don't fold negative offsets into cp / dp accesses to avoid a relocation
+; error if the address + addend is less than the start of the cp / dp.
+
+define i32 *@f3() nounwind {
entry:
-; CHECK: f:
+; CHECK: f3:
; CHECK: ldaw r11, cp[a]
; CHECK: sub r0, r11, 4
%0 = getelementptr [0 x i32]* @a, i32 0, i32 -1
ret i32* %0
}
-define i32 *@g() nounwind {
+define i32 *@f4() nounwind {
entry:
-; CHECK: g:
+; CHECK: f4:
; CHECK: ldaw [[REG:r[0-9]+]], dp[b]
; CHECK: sub r0, [[REG]], 4
%0 = getelementptr [0 x i32]* @b, i32 0, i32 -1
diff --git a/test/CodeGen/XCore/unaligned_load.ll b/test/CodeGen/XCore/unaligned_load.ll
index 0ee8e1c32667..772a847bd220 100644
--- a/test/CodeGen/XCore/unaligned_load.ll
+++ b/test/CodeGen/XCore/unaligned_load.ll
@@ -1,20 +1,19 @@
-; RUN: llc < %s -march=xcore > %t1.s
-; RUN: grep "bl __misaligned_load" %t1.s | count 1
-; RUN: grep ld16s %t1.s | count 2
-; RUN: grep ldw %t1.s | count 2
-; RUN: grep shl %t1.s | count 2
-; RUN: grep shr %t1.s | count 1
-; RUN: grep zext %t1.s | count 1
-; RUN: grep "or " %t1.s | count 2
+; RUN: llc < %s -march=xcore | FileCheck %s
-; Byte aligned load. Expands to call to __misaligned_load.
+; Byte aligned load.
+; CHECK: align1
+; CHECK: bl __misaligned_load
define i32 @align1(i32* %p) nounwind {
entry:
%0 = load i32* %p, align 1 ; <i32> [#uses=1]
ret i32 %0
}
-; Half word aligned load. Expands to two 16bit loads.
+; Half word aligned load.
+; CHECK: align2:
+; CHECK: ld16s
+; CHECK: ld16s
+; CHECK: or
define i32 @align2(i32* %p) nounwind {
entry:
%0 = load i32* %p, align 2 ; <i32> [#uses=1]
@@ -23,7 +22,11 @@ entry:
@a = global [5 x i8] zeroinitializer, align 4
-; Constant offset from word aligned base. Expands to two 32bit loads.
+; Constant offset from word aligned base.
+; CHECK: align3:
+; CHECK: ldw {{r[0-9]+}}, dp
+; CHECK: ldw {{r[0-9]+}}, dp
+; CHECK: or
define i32 @align3() nounwind {
entry:
%0 = load i32* bitcast (i8* getelementptr ([5 x i8]* @a, i32 0, i32 1) to i32*), align 1
diff --git a/test/CodeGen/XCore/unaligned_store.ll b/test/CodeGen/XCore/unaligned_store.ll
index 62078e6f6077..94e1852faea7 100644
--- a/test/CodeGen/XCore/unaligned_store.ll
+++ b/test/CodeGen/XCore/unaligned_store.ll
@@ -1,16 +1,18 @@
-; RUN: llc < %s -march=xcore > %t1.s
-; RUN: grep "bl __misaligned_store" %t1.s | count 1
-; RUN: grep st16 %t1.s | count 2
-; RUN: grep shr %t1.s | count 1
+; RUN: llc < %s -march=xcore | FileCheck %s
-; Byte aligned store. Expands to call to __misaligned_store.
+; Byte aligned store.
+; CHECK: align1:
+; CHECK: bl __misaligned_store
define void @align1(i32* %p, i32 %val) nounwind {
entry:
store i32 %val, i32* %p, align 1
ret void
}
-; Half word aligned store. Expands to two 16bit stores.
+; Half word aligned store.
+; CHECK: align2
+; CHECK: st16
+; CHECK: st16
define void @align2(i32* %p, i32 %val) nounwind {
entry:
store i32 %val, i32* %p, align 2